code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
__lowerCamelCase :List[Any] = 'Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'
def snake_case ( ) -> List[str]:
'''simple docstring'''
lowerCamelCase : Any = _ask_options(
"""In which compute environment are you running?""" , ["""This machine""", """AWS (Amazon SageMaker)"""] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
lowerCamelCase : str = get_sagemaker_input()
else:
lowerCamelCase : Dict = get_cluster_input()
return config
def snake_case ( UpperCamelCase__ : Optional[int]=None ) -> Optional[Any]:
'''simple docstring'''
if subparsers is not None:
lowerCamelCase : Any = subparsers.add_parser("""config""" , description=UpperCamelCase__ )
else:
lowerCamelCase : Optional[Any] = argparse.ArgumentParser("""Accelerate config command""" , description=UpperCamelCase__ )
parser.add_argument(
"""--config_file""" , default=UpperCamelCase__ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , )
if subparsers is not None:
parser.set_defaults(func=UpperCamelCase__ )
return parser
def snake_case ( UpperCamelCase__ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase : Dict = get_user_input()
if args.config_file is not None:
lowerCamelCase : Tuple = args.config_file
else:
if not os.path.isdir(UpperCamelCase__ ):
os.makedirs(UpperCamelCase__ )
lowerCamelCase : str = default_yaml_config_file
if config_file.endswith(""".json""" ):
config.to_json_file(UpperCamelCase__ )
else:
config.to_yaml_file(UpperCamelCase__ )
print(F'accelerate configuration saved at {config_file}' )
def snake_case ( ) -> Tuple:
'''simple docstring'''
lowerCamelCase : List[str] = config_command_parser()
lowerCamelCase : List[str] = parser.parse_args()
config_command(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 703
|
"""simple docstring"""
import os
def snake_case ( ) -> Optional[Any]:
with open(os.path.dirname(UpperCamelCase__ ) + """/grid.txt""" ) as f:
lowerCamelCase : int = [] # noqa: E741
for _ in range(20 ):
l.append([int(UpperCamelCase__ ) for x in f.readline().split()] )
lowerCamelCase : Union[str, Any] = 0
# right
for i in range(20 ):
for j in range(17 ):
lowerCamelCase : Dict = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
lowerCamelCase : Tuple = temp
# down
for i in range(17 ):
for j in range(20 ):
lowerCamelCase : Any = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
lowerCamelCase : Optional[Any] = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
lowerCamelCase : List[Any] = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
lowerCamelCase : List[str] = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
lowerCamelCase : List[str] = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
lowerCamelCase : List[Any] = temp
return maximum
if __name__ == "__main__":
print(solution())
| 42
| 0
|
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
__lowerCamelCase :Dict = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__lowerCamelCase :Union[str, Any] = 128_022
__lowerCamelCase :Union[str, Any] = 128_028
@require_sentencepiece
class A__ ( __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[Any] =MaMaaaTokenizer
snake_case__ : Optional[Any] =False
snake_case__ : Any =False
snake_case__ : int =True
def a__ ( self: str )-> Optional[int]:
super().setUp()
lowerCamelCase : Any = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
lowerCamelCase : Tuple = dict(zip(__a , range(len(__a ) ) ) )
lowerCamelCase : str = Path(self.tmpdirname )
save_json(__a , save_dir / VOCAB_FILES_NAMES["""vocab_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__a , save_dir / VOCAB_FILES_NAMES["""spm_file"""] )
lowerCamelCase : List[str] = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def a__ ( self: Optional[Any] , **__a: Any )-> str:
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **__a )
def a__ ( self: Union[str, Any] , __a: List[Any] )-> Union[str, Any]:
return (
"This is a test",
"This is a test",
)
def a__ ( self: Tuple )-> int:
lowerCamelCase : List[str] = """</s>"""
lowerCamelCase : Union[str, Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def a__ ( self: Optional[Any] )-> Dict:
lowerCamelCase : Optional[Any] = self.get_tokenizer()
lowerCamelCase : List[Any] = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<s>""" )
self.assertEqual(len(__a ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("""Skip this test while all models are still to be uploaded.""" )
def a__ ( self: str )-> str:
pass
def a__ ( self: Tuple )-> Dict:
lowerCamelCase : List[Any] = self.get_tokenizer()
lowerCamelCase : List[str] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__a , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [2, 3, 4, 5, 6] , )
lowerCamelCase : str = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(__a , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
lowerCamelCase : List[Any] = tokenizer.convert_tokens_to_string(__a )
self.assertEqual(__a , """This is a test""" )
@slow
def a__ ( self: Any )-> List[str]:
# fmt: off
lowerCamelCase : Union[str, Any] = {"""input_ids""": [[128_022, 110_108, 397, 11, 38_272, 2_247, 124_811, 285, 18_105, 1_586, 207, 7, 39_534, 4_428, 397, 1_019, 18_105, 1_586, 207, 7, 41_337, 16_786, 241, 7, 20_214, 17, 125_690, 10_398, 7, 44_378, 58_069, 68_342, 7_798, 7_343, 11, 299, 33_310, 4, 158, 37_350, 94_077, 4_569, 299, 33_310, 90, 4, 52_840, 290, 4, 31_270, 112, 299, 682, 4, 52_840, 39_953, 14_079, 193, 52_519, 90_894, 17_894, 120_697, 11, 40_445, 551, 17, 1_019, 52_519, 90_894, 17_756, 963, 11, 40_445, 480, 17, 9_792, 1_120, 5_173, 1_393, 6_240, 16_786, 241, 120_996, 28, 1_245, 1_393, 118_240, 11_123, 1_019, 93_612, 2_691, 10_618, 98_058, 120_409, 1_928, 279, 4, 40_683, 367, 178, 207, 1_019, 103, 103_121, 506, 65_296, 5, 2], [128_022, 21_217, 367, 117, 125_450, 128, 719, 7, 7_308, 40, 93_612, 12_669, 1_116, 16_704, 71, 17_785, 3_699, 15_592, 35, 144, 9_584, 241, 11_943, 713, 950, 799, 2_247, 88_427, 150, 149, 118_813, 120_706, 1_019, 106_906, 81_518, 28, 1_224, 22_799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128_022, 1_658, 123_311, 5_155, 5_578, 4_722, 279, 14_947, 2_366, 1_120, 1_197, 14, 1_348, 9_232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="""facebook/m2m100_418M""" , revision="""c168bae485c864188cf9aa0e4108b0b6934dc91e""" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase):
"""simple docstring"""
snake_case__ : List[str] ='''facebook/m2m100_418M'''
snake_case__ : Union[str, Any] =[
'''In my opinion, there are two levels of response from the French government.''',
'''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''',
]
snake_case__ : Union[str, Any] =[
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
]
# fmt: off
snake_case__ : List[Any] =[EN_CODE, 5_93, 19_49, 11_57_81, 4, 7_15_86, 42_34, 6_06_33, 12_62_33, 4_32, 12_38_08, 1_55_92, 11_97, 11_71_32, 12_06_18, 5, 2]
@classmethod
def a__ ( cls: Union[str, Any] )-> List[str]:
lowerCamelCase : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en""" , tgt_lang="""fr""" )
lowerCamelCase : List[str] = 1
return cls
def a__ ( self: int )-> Any:
self.assertEqual(self.tokenizer.get_lang_id("""ar""" ) , 128_006 )
self.assertEqual(self.tokenizer.get_lang_id("""en""" ) , 128_022 )
self.assertEqual(self.tokenizer.get_lang_id("""ro""" ) , 128_076 )
self.assertEqual(self.tokenizer.get_lang_id("""mr""" ) , 128_063 )
def a__ ( self: List[Any] )-> Union[str, Any]:
lowerCamelCase : Optional[Any] = self.tokenizer.get_vocab()
self.assertEqual(len(__a ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["""<unk>"""] , 3 )
self.assertIn(self.tokenizer.get_lang_token("""en""" ) , __a )
def a__ ( self: Tuple )-> Optional[Any]:
lowerCamelCase : str = """en"""
lowerCamelCase : List[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __a )
def a__ ( self: List[Any] )-> int:
self.assertIn(__a , self.tokenizer.all_special_ids )
# fmt: off
lowerCamelCase : Optional[int] = [FR_CODE, 5_364, 82, 8_642, 4, 294, 47, 8, 14_028, 136, 3_286, 9_706, 6, 90_797, 6, 144_012, 162, 88_128, 30_061, 5, 2]
# fmt: on
lowerCamelCase : Any = self.tokenizer.decode(__a , skip_special_tokens=__a )
lowerCamelCase : Union[str, Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__a )
self.assertEqual(__a , __a )
self.assertNotIn(self.tokenizer.eos_token , __a )
def a__ ( self: List[str] )-> List[str]:
lowerCamelCase : Optional[int] = tempfile.mkdtemp()
lowerCamelCase : Any = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(__a )
lowerCamelCase : Tuple = MaMaaaTokenizer.from_pretrained(__a )
self.assertDictEqual(new_tok.lang_token_to_id , __a )
@require_torch
def a__ ( self: Tuple )-> str:
lowerCamelCase : Optional[Any] = """en"""
lowerCamelCase : List[str] = """fr"""
lowerCamelCase : int = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__a , return_tensors="""pt""" )
lowerCamelCase : str = shift_tokens_right(
batch["""labels"""] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
lowerCamelCase : Optional[Any] = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def a__ ( self: Union[str, Any] )-> Union[str, Any]:
lowerCamelCase : Optional[Any] = """mr"""
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""mr""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
lowerCamelCase : Tuple = """zh"""
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""zh""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def a__ ( self: Any )-> Optional[int]:
lowerCamelCase : Any = """mr"""
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""mr""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
lowerCamelCase : Any = """zh"""
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""zh""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def a__ ( self: Optional[int] )-> Optional[Any]:
lowerCamelCase : Optional[Any] = self.tokenizer._build_translation_inputs("""A test""" , return_tensors="""pt""" , src_lang="""en""" , tgt_lang="""ar""" )
self.assertEqual(
nested_simplify(__a ) , {
# en_XX, A, test, EOS
"""input_ids""": [[128_022, 58, 4_183, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 128_006,
} , )
| 704
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
__lowerCamelCase :Any = False
@skip_mps
class A__ ( __lowercase , __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[Any] =StableDiffusionAttendAndExcitePipeline
snake_case__ : Any =False
snake_case__ : Dict =TEXT_TO_IMAGE_PARAMS
snake_case__ : Any =TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''})
snake_case__ : Dict =TEXT_TO_IMAGE_IMAGE_PARAMS
snake_case__ : str =TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def a__ ( cls: Dict )-> Tuple:
super().setUpClass()
torch.use_deterministic_algorithms(__a )
@classmethod
def a__ ( cls: Union[str, Any] )-> Any:
super().tearDownClass()
torch.use_deterministic_algorithms(__a )
def a__ ( self: Tuple )-> Union[str, Any]:
torch.manual_seed(0 )
lowerCamelCase : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__a , )
lowerCamelCase : Union[str, Any] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=__a , set_alpha_to_one=__a , )
torch.manual_seed(0 )
lowerCamelCase : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , )
lowerCamelCase : Optional[int] = CLIPTextModel(__a )
lowerCamelCase : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCamelCase : List[str] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def a__ ( self: Tuple , __a: int , __a: Union[str, Any]=0 )-> Optional[Any]:
if str(__a ).startswith("""mps""" ):
lowerCamelCase : Tuple = torch.manual_seed(__a )
else:
lowerCamelCase : str = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase : Dict = {
"""prompt""": """a cat and a frog""",
"""token_indices""": [2, 5],
"""generator""": generator,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""max_iter_to_alter""": 2,
"""thresholds""": {0: 0.7},
}
return inputs
def a__ ( self: Dict )-> str:
lowerCamelCase : Tuple = """cpu"""
lowerCamelCase : List[str] = self.get_dummy_components()
lowerCamelCase : List[Any] = self.pipeline_class(**__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Any = self.get_dummy_inputs(__a )
lowerCamelCase : Union[str, Any] = pipe(**__a ).images
lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
lowerCamelCase : Optional[Any] = np.array(
[0.63_90_53_64, 0.62_89_73_07, 0.48_59_90_17, 0.5_13_36_24, 0.5_55_00_48, 0.45_76_95_16, 0.50_32_69_73, 0.5_02_31_39, 0.45_38_44_96] )
lowerCamelCase : Optional[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__a , 1e-3 )
def a__ ( self: int )-> Optional[Any]:
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def a__ ( self: Union[str, Any] )-> Optional[int]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def a__ ( self: Tuple )-> int:
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def a__ ( self: Dict )-> List[Any]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def a__ ( self: Optional[int] )-> Dict:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def a__ ( self: Any )-> Tuple:
super().test_save_load_local(expected_max_difference=5e-4 )
def a__ ( self: str )-> str:
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class A__ ( unittest.TestCase):
"""simple docstring"""
@classmethod
def a__ ( cls: Any )-> Tuple:
super().setUpClass()
torch.use_deterministic_algorithms(__a )
@classmethod
def a__ ( cls: Dict )-> Optional[int]:
super().tearDownClass()
torch.use_deterministic_algorithms(__a )
def a__ ( self: int )-> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : List[Any] = torch.manual_seed(51 )
lowerCamelCase : List[str] = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , safety_checker=__a , torch_dtype=torch.floataa )
pipe.to("""cuda""" )
lowerCamelCase : Dict = """a painting of an elephant with glasses"""
lowerCamelCase : Any = [5, 7]
lowerCamelCase : Tuple = pipe(
prompt=__a , token_indices=__a , guidance_scale=7.5 , generator=__a , num_inference_steps=5 , max_iter_to_alter=5 , output_type="""numpy""" , ).images[0]
lowerCamelCase : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy""" )
assert np.abs((expected_image - image).max() ) < 5e-1
| 42
| 0
|
"""simple docstring"""
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def snake_case ( UpperCamelCase__ : bytes , UpperCamelCase__ : int ) -> np.array:
lowerCamelCase : List[str] = F'{sampling_rate}'
lowerCamelCase : Optional[Any] = """1"""
lowerCamelCase : str = """f32le"""
lowerCamelCase : Optional[int] = [
"""ffmpeg""",
"""-i""",
"""pipe:0""",
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
try:
with subprocess.Popen(UpperCamelCase__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
lowerCamelCase : Dict = ffmpeg_process.communicate(UpperCamelCase__ )
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to load audio files from filename""" ) from error
lowerCamelCase : Union[str, Any] = output_stream[0]
lowerCamelCase : List[str] = np.frombuffer(UpperCamelCase__ , np.floataa )
if audio.shape[0] == 0:
raise ValueError("""Malformed soundfile""" )
return audio
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : str = "f32le" , ) -> Dict:
lowerCamelCase : Any = F'{sampling_rate}'
lowerCamelCase : Tuple = """1"""
if format_for_conversion == "s16le":
lowerCamelCase : Optional[Any] = 2
elif format_for_conversion == "f32le":
lowerCamelCase : Tuple = 4
else:
raise ValueError(F'Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`' )
lowerCamelCase : Optional[Any] = platform.system()
if system == "Linux":
lowerCamelCase : Optional[int] = """alsa"""
lowerCamelCase : Dict = """default"""
elif system == "Darwin":
lowerCamelCase : Any = """avfoundation"""
lowerCamelCase : str = """:0"""
elif system == "Windows":
lowerCamelCase : int = """dshow"""
lowerCamelCase : Dict = """default"""
lowerCamelCase : Union[str, Any] = [
"""ffmpeg""",
"""-f""",
format_,
"""-i""",
input_,
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-fflags""",
"""nobuffer""",
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
lowerCamelCase : List[str] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
lowerCamelCase : Union[str, Any] = _ffmpeg_stream(UpperCamelCase__ , UpperCamelCase__ )
for item in iterator:
yield item
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Union[Tuple[float, float], float]] = None , UpperCamelCase__ : str = "f32le" , ) -> Any:
if stream_chunk_s is not None:
lowerCamelCase : str = stream_chunk_s
else:
lowerCamelCase : Optional[int] = chunk_length_s
lowerCamelCase : Union[str, Any] = ffmpeg_microphone(UpperCamelCase__ , UpperCamelCase__ , format_for_conversion=UpperCamelCase__ )
if format_for_conversion == "s16le":
lowerCamelCase : int = np.intaa
lowerCamelCase : Dict = 2
elif format_for_conversion == "f32le":
lowerCamelCase : Optional[int] = np.floataa
lowerCamelCase : Any = 4
else:
raise ValueError(F'Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`' )
if stride_length_s is None:
lowerCamelCase : Any = chunk_length_s / 6
lowerCamelCase : Optional[Any] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(UpperCamelCase__ , (int, float) ):
lowerCamelCase : List[Any] = [stride_length_s, stride_length_s]
lowerCamelCase : str = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
lowerCamelCase : Dict = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
lowerCamelCase : str = datetime.datetime.now()
lowerCamelCase : Any = datetime.timedelta(seconds=UpperCamelCase__ )
for item in chunk_bytes_iter(UpperCamelCase__ , UpperCamelCase__ , stride=(stride_left, stride_right) , stream=UpperCamelCase__ ):
# Put everything back in numpy scale
lowerCamelCase : Tuple = np.frombuffer(item["""raw"""] , dtype=UpperCamelCase__ )
lowerCamelCase : List[str] = (
item["""stride"""][0] // size_of_sample,
item["""stride"""][1] // size_of_sample,
)
lowerCamelCase : List[Any] = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def snake_case ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple[int, int] , UpperCamelCase__ : bool = False ) -> Optional[Any]:
lowerCamelCase : Dict = B""""""
lowerCamelCase : Optional[int] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F'Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}' )
lowerCamelCase : Dict = 0
for raw in iterator:
acc += raw
if stream and len(UpperCamelCase__ ) < chunk_len:
lowerCamelCase : Union[str, Any] = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(UpperCamelCase__ ) >= chunk_len:
# We are flushing the accumulator
lowerCamelCase : List[str] = (_stride_left, stride_right)
lowerCamelCase : int = {"""raw""": acc[:chunk_len], """stride""": stride}
if stream:
lowerCamelCase : str = False
yield item
lowerCamelCase : Union[str, Any] = stride_left
lowerCamelCase : Dict = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(UpperCamelCase__ ) > stride_left:
lowerCamelCase : Dict = {"""raw""": acc, """stride""": (_stride_left, 0)}
if stream:
lowerCamelCase : List[str] = False
yield item
def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : int ) -> List[Any]:
lowerCamelCase : Optional[int] = 2**24 # 16Mo
try:
with subprocess.Popen(UpperCamelCase__ , stdout=subprocess.PIPE , bufsize=UpperCamelCase__ ) as ffmpeg_process:
while True:
lowerCamelCase : List[str] = ffmpeg_process.stdout.read(UpperCamelCase__ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to stream audio files from filename""" ) from error
| 705
|
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class A__ :
"""simple docstring"""
def __init__( self: List[str] , __a: List[str] , __a: Dict=13 , __a: Tuple=7 , __a: Dict=False , __a: str=True , __a: List[Any]=False , __a: Dict=True , __a: Any=33 , __a: Optional[Any]=32 , __a: List[Any]=5 , __a: Any=4 , __a: Dict=37 , __a: str="gelu" , __a: str=0.1 , __a: int=0.1 , __a: Optional[int]=512 , __a: List[Any]=16 , __a: int=2 , __a: int=0.02 , __a: Optional[int]=3 , __a: str=4 , __a: Tuple=None , )-> Tuple:
lowerCamelCase : Union[str, Any] = parent
lowerCamelCase : Tuple = batch_size
lowerCamelCase : Any = seq_length
lowerCamelCase : Any = is_training
lowerCamelCase : Tuple = use_input_mask
lowerCamelCase : int = use_token_type_ids
lowerCamelCase : List[str] = use_labels
lowerCamelCase : Optional[int] = vocab_size
lowerCamelCase : Tuple = hidden_size
lowerCamelCase : List[str] = num_hidden_layers
lowerCamelCase : Optional[int] = num_attention_heads
lowerCamelCase : Optional[Any] = intermediate_size
lowerCamelCase : Optional[Any] = hidden_act
lowerCamelCase : Union[str, Any] = hidden_dropout_prob
lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
lowerCamelCase : Any = max_position_embeddings
lowerCamelCase : str = type_vocab_size
lowerCamelCase : List[Any] = type_sequence_label_size
lowerCamelCase : Optional[Any] = initializer_range
lowerCamelCase : Union[str, Any] = num_labels
lowerCamelCase : Optional[Any] = num_choices
lowerCamelCase : Any = scope
def a__ ( self: Optional[int] )-> List[Any]:
lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase : Dict = None
if self.use_input_mask:
lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase : Any = None
lowerCamelCase : int = None
lowerCamelCase : Union[str, Any] = None
if self.use_labels:
lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase : List[str] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self: Tuple )-> Union[str, Any]:
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def a__ ( self: List[Any] , __a: List[str] , __a: str , __a: Tuple , __a: List[str] , __a: List[str] , __a: str )-> int:
lowerCamelCase : Optional[int] = EsmModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : int = model(__a , attention_mask=__a )
lowerCamelCase : str = model(__a )
lowerCamelCase : Optional[Any] = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a__ ( self: int , __a: Union[str, Any] , __a: Optional[int] , __a: List[str] , __a: str , __a: List[str] , __a: Tuple )-> int:
lowerCamelCase : str = EsmForMaskedLM(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : List[Any] = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self: List[str] , __a: List[Any] , __a: List[str] , __a: int , __a: Union[str, Any] , __a: List[Any] , __a: Tuple )-> List[str]:
lowerCamelCase : Tuple = self.num_labels
lowerCamelCase : Dict = EsmForTokenClassification(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : int = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self: Optional[int] )-> Optional[int]:
lowerCamelCase : Any = self.prepare_config_and_inputs()
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Tuple = config_and_inputs
lowerCamelCase : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Any =False
snake_case__ : Dict =(
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case__ : Dict =()
snake_case__ : Optional[int] =(
{
'''feature-extraction''': EsmModel,
'''fill-mask''': EsmForMaskedLM,
'''text-classification''': EsmForSequenceClassification,
'''token-classification''': EsmForTokenClassification,
'''zero-shot''': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : Any =True
def a__ ( self: Optional[int] )-> Optional[int]:
lowerCamelCase : Optional[Any] = EsmModelTester(self )
lowerCamelCase : Any = ConfigTester(self , config_class=__a , hidden_size=37 )
def a__ ( self: List[Any] )-> Optional[Any]:
self.config_tester.run_common_tests()
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self: Tuple )-> Any:
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase : Tuple = type
self.model_tester.create_and_check_model(*__a )
def a__ ( self: List[str] )-> Tuple:
lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def a__ ( self: Any )-> List[Any]:
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : int = EsmModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def a__ ( self: str )-> List[str]:
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase : Union[str, Any] = EsmEmbeddings(config=__a )
lowerCamelCase : List[str] = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
lowerCamelCase : Union[str, Any] = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
lowerCamelCase : Optional[Any] = create_position_ids_from_input_ids(__a , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
def a__ ( self: Optional[int] )-> int:
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase : Any = EsmEmbeddings(config=__a )
lowerCamelCase : Dict = torch.empty(2 , 4 , 30 )
lowerCamelCase : List[Any] = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
lowerCamelCase : Any = torch.as_tensor([expected_single_positions, expected_single_positions] )
lowerCamelCase : List[str] = embeddings.create_position_ids_from_inputs_embeds(__a )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def a__ ( self: Any )-> Optional[Any]:
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def a__ ( self: Dict )-> Dict:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def a__ ( self: List[str] )-> Dict:
pass
@require_torch
class A__ ( __lowercase):
"""simple docstring"""
@slow
def a__ ( self: Any )-> Union[str, Any]:
with torch.no_grad():
lowerCamelCase : Union[str, Any] = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
lowerCamelCase : List[str] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase : Tuple = model(__a )[0]
lowerCamelCase : Dict = 33
lowerCamelCase : List[str] = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , __a )
lowerCamelCase : Tuple = torch.tensor(
[[[8.92_15, -10.58_98, -6.46_71], [-6.39_67, -13.91_14, -1.12_12], [-7.78_12, -13.95_16, -3.74_06]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
@slow
def a__ ( self: Dict )-> str:
with torch.no_grad():
lowerCamelCase : Any = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
lowerCamelCase : Optional[Any] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase : Any = model(__a )[0]
# compare the actual values for a slice.
lowerCamelCase : Tuple = torch.tensor(
[[[0.14_44, 0.54_13, 0.32_48], [0.30_34, 0.00_53, 0.31_08], [0.32_28, -0.24_99, 0.34_15]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
| 42
| 0
|
"""simple docstring"""
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase :str = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class A__ ( __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : List[str] =AlbertTokenizer
snake_case__ : Optional[Any] =AlbertTokenizerFast
snake_case__ : Optional[int] =True
snake_case__ : Any =True
snake_case__ : Optional[int] =True
def a__ ( self: Dict )-> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase : int = AlbertTokenizer(__a )
tokenizer.save_pretrained(self.tmpdirname )
def a__ ( self: Tuple , __a: Tuple )-> Union[str, Any]:
lowerCamelCase : List[str] = """this is a test"""
lowerCamelCase : int = """this is a test"""
return input_text, output_text
def a__ ( self: Any )-> List[Any]:
lowerCamelCase : int = """<pad>"""
lowerCamelCase : Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def a__ ( self: Tuple )-> str:
lowerCamelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """▁eloquent""" )
self.assertEqual(len(__a ) , 30_000 )
def a__ ( self: List[str] )-> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 30_000 )
def a__ ( self: Optional[Any] )-> Union[str, Any]:
if not self.test_rust_tokenizer:
return
lowerCamelCase : str = self.get_tokenizer()
lowerCamelCase : Tuple = self.get_rust_tokenizer()
lowerCamelCase : Union[str, Any] = """I was born in 92000, and this is falsé."""
lowerCamelCase : List[str] = tokenizer.tokenize(__a )
lowerCamelCase : Tuple = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
lowerCamelCase : Dict = tokenizer.encode(__a , add_special_tokens=__a )
lowerCamelCase : List[str] = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
lowerCamelCase : Any = self.get_rust_tokenizer()
lowerCamelCase : List[str] = tokenizer.encode(__a )
lowerCamelCase : str = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
def a__ ( self: Tuple )-> List[Any]:
lowerCamelCase : List[str] = AlbertTokenizer(__a , keep_accents=__a )
lowerCamelCase : int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__a , ["""▁this""", """▁is""", """▁a""", """▁test"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [48, 25, 21, 1_289] )
lowerCamelCase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__a , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] )
lowerCamelCase : List[str] = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(__a , [31, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9] )
lowerCamelCase : Tuple = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , )
def a__ ( self: Tuple )-> str:
lowerCamelCase : str = AlbertTokenizer(__a )
lowerCamelCase : Union[str, Any] = tokenizer.encode("""sequence builders""" )
lowerCamelCase : List[Any] = tokenizer.encode("""multi-sequence build""" )
lowerCamelCase : Any = tokenizer.build_inputs_with_special_tokens(__a )
lowerCamelCase : str = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def a__ ( self: Any )-> Dict:
# fmt: off
lowerCamelCase : Optional[Any] = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 21_970, 13, 5, 6_092, 167, 28, 7_103, 2_153, 673, 8, 7_028, 12_051, 18, 17, 7_103, 2_153, 673, 8, 3_515, 18_684, 8, 4_461, 6, 1_927, 297, 8, 12_060, 2_607, 18, 13, 5, 4_461, 15, 10_538, 38, 8, 135, 15, 822, 58, 15, 993, 10_363, 15, 1_460, 8_005, 4_461, 15, 993, 255, 2_328, 9, 9, 9, 6, 26, 1_112, 816, 3_260, 13, 5, 103, 2_377, 6, 17, 1_112, 816, 2_782, 13, 5, 103, 10_641, 6, 29, 84, 2_512, 2_430, 782, 18_684, 2_761, 19, 808, 2_430, 2_556, 17, 855, 1_480, 9_477, 4_091, 128, 11_712, 15, 7_103, 2_153, 673, 17, 24_883, 9_990, 9, 3], [2, 11_502, 25, 1_006, 20, 782, 8, 11_809, 855, 1_732, 19_393, 18_667, 37, 367, 21_018, 69, 1_854, 34, 11_860, 19_124, 27, 156, 225, 17, 193, 4_141, 19, 65, 9_124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2_231, 886, 2_385, 17_659, 84, 14, 16_792, 1_952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
| 706
|
"""simple docstring"""
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase :str = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class A__ ( __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : List[str] =AlbertTokenizer
snake_case__ : Optional[Any] =AlbertTokenizerFast
snake_case__ : Optional[int] =True
snake_case__ : Any =True
snake_case__ : Optional[int] =True
def a__ ( self: Dict )-> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase : int = AlbertTokenizer(__a )
tokenizer.save_pretrained(self.tmpdirname )
def a__ ( self: Tuple , __a: Tuple )-> Union[str, Any]:
lowerCamelCase : List[str] = """this is a test"""
lowerCamelCase : int = """this is a test"""
return input_text, output_text
def a__ ( self: Any )-> List[Any]:
lowerCamelCase : int = """<pad>"""
lowerCamelCase : Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def a__ ( self: Tuple )-> str:
lowerCamelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """▁eloquent""" )
self.assertEqual(len(__a ) , 30_000 )
def a__ ( self: List[str] )-> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 30_000 )
def a__ ( self: Optional[Any] )-> Union[str, Any]:
if not self.test_rust_tokenizer:
return
lowerCamelCase : str = self.get_tokenizer()
lowerCamelCase : Tuple = self.get_rust_tokenizer()
lowerCamelCase : Union[str, Any] = """I was born in 92000, and this is falsé."""
lowerCamelCase : List[str] = tokenizer.tokenize(__a )
lowerCamelCase : Tuple = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
lowerCamelCase : Dict = tokenizer.encode(__a , add_special_tokens=__a )
lowerCamelCase : List[str] = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
lowerCamelCase : Any = self.get_rust_tokenizer()
lowerCamelCase : List[str] = tokenizer.encode(__a )
lowerCamelCase : str = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
def a__ ( self: Tuple )-> List[Any]:
lowerCamelCase : List[str] = AlbertTokenizer(__a , keep_accents=__a )
lowerCamelCase : int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__a , ["""▁this""", """▁is""", """▁a""", """▁test"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [48, 25, 21, 1_289] )
lowerCamelCase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__a , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] )
lowerCamelCase : List[str] = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(__a , [31, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9] )
lowerCamelCase : Tuple = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , )
def a__ ( self: Tuple )-> str:
lowerCamelCase : str = AlbertTokenizer(__a )
lowerCamelCase : Union[str, Any] = tokenizer.encode("""sequence builders""" )
lowerCamelCase : List[Any] = tokenizer.encode("""multi-sequence build""" )
lowerCamelCase : Any = tokenizer.build_inputs_with_special_tokens(__a )
lowerCamelCase : str = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def a__ ( self: Any )-> Dict:
# fmt: off
lowerCamelCase : Optional[Any] = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 21_970, 13, 5, 6_092, 167, 28, 7_103, 2_153, 673, 8, 7_028, 12_051, 18, 17, 7_103, 2_153, 673, 8, 3_515, 18_684, 8, 4_461, 6, 1_927, 297, 8, 12_060, 2_607, 18, 13, 5, 4_461, 15, 10_538, 38, 8, 135, 15, 822, 58, 15, 993, 10_363, 15, 1_460, 8_005, 4_461, 15, 993, 255, 2_328, 9, 9, 9, 6, 26, 1_112, 816, 3_260, 13, 5, 103, 2_377, 6, 17, 1_112, 816, 2_782, 13, 5, 103, 10_641, 6, 29, 84, 2_512, 2_430, 782, 18_684, 2_761, 19, 808, 2_430, 2_556, 17, 855, 1_480, 9_477, 4_091, 128, 11_712, 15, 7_103, 2_153, 673, 17, 24_883, 9_990, 9, 3], [2, 11_502, 25, 1_006, 20, 782, 8, 11_809, 855, 1_732, 19_393, 18_667, 37, 367, 21_018, 69, 1_854, 34, 11_860, 19_124, 27, 156, 225, 17, 193, 4_141, 19, 65, 9_124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2_231, 886, 2_385, 17_659, 84, 14, 16_792, 1_952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
| 42
| 0
|
import math
import tensorflow as tf
from packaging import version
def snake_case ( UpperCamelCase__ : List[Any] ) -> Tuple:
lowerCamelCase : Optional[Any] = tf.convert_to_tensor(UpperCamelCase__ )
lowerCamelCase : List[str] = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def snake_case ( UpperCamelCase__ : List[str] ) -> str:
lowerCamelCase : List[str] = tf.convert_to_tensor(UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = tf.cast(math.pi , x.dtype )
lowerCamelCase : Any = tf.cast(0.0_4_4_7_1_5 , x.dtype )
lowerCamelCase : Dict = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(UpperCamelCase__ , 3 )) ))
return x * cdf
def snake_case ( UpperCamelCase__ : List[Any] ) -> Tuple:
lowerCamelCase : Union[str, Any] = tf.convert_to_tensor(UpperCamelCase__ )
return x * tf.tanh(tf.math.softplus(UpperCamelCase__ ) )
def snake_case ( UpperCamelCase__ : Optional[Any] ) -> Optional[Any]:
lowerCamelCase : Tuple = tf.convert_to_tensor(UpperCamelCase__ )
lowerCamelCase : Dict = tf.cast(0.0_4_4_7_1_5 , x.dtype )
lowerCamelCase : List[str] = tf.cast(0.7_9_7_8_8_4_5_6_0_8 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def snake_case ( UpperCamelCase__ : Dict ) -> str:
lowerCamelCase : int = tf.convert_to_tensor(UpperCamelCase__ )
lowerCamelCase : List[Any] = tf.cast(1.7_0_2 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def snake_case ( UpperCamelCase__ : Dict ) -> Optional[Any]:
return tf.clip_by_value(_gelu(UpperCamelCase__ ) , -10 , 10 )
def snake_case ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple=-1 ) -> str:
lowerCamelCase : List[Any] = tf.split(UpperCamelCase__ , 2 , axis=UpperCamelCase__ )
return a * tf.math.sigmoid(UpperCamelCase__ )
if version.parse(tf.version.VERSION) >= version.parse('2.4'):
def snake_case ( UpperCamelCase__ : List[str] ) -> Dict:
return tf.keras.activations.gelu(UpperCamelCase__ , approximate=UpperCamelCase__ )
__lowerCamelCase :Optional[Any] = tf.keras.activations.gelu
__lowerCamelCase :Union[str, Any] = approximate_gelu_wrap
else:
__lowerCamelCase :Any = _gelu
__lowerCamelCase :Any = _gelu_new
__lowerCamelCase :Union[str, Any] = {
'gelu': gelu,
'gelu_10': gelu_aa,
'gelu_fast': gelu_fast,
'gelu_new': gelu_new,
'glu': glu,
'mish': mish,
'quick_gelu': quick_gelu,
'relu': tf.keras.activations.relu,
'sigmoid': tf.keras.activations.sigmoid,
'silu': tf.keras.activations.swish,
'swish': tf.keras.activations.swish,
'tanh': tf.keras.activations.tanh,
}
def snake_case ( UpperCamelCase__ : int ) -> Any:
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F'function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}' )
| 707
|
"""simple docstring"""
__lowerCamelCase :List[Any] = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__lowerCamelCase :Union[str, Any] = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def snake_case ( UpperCamelCase__ : dict[int, list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : list[bool] ) -> list[int]:
lowerCamelCase : Tuple = True
lowerCamelCase : Any = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
order.append(UpperCamelCase__ )
return order
def snake_case ( UpperCamelCase__ : dict[int, list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : list[bool] ) -> list[int]:
lowerCamelCase : List[Any] = True
lowerCamelCase : int = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return component
def snake_case ( UpperCamelCase__ : dict[int, list[int]] ) -> list[list[int]]:
lowerCamelCase : int = len(UpperCamelCase__ ) * [False]
lowerCamelCase : dict[int, list[int]] = {vert: [] for vert in range(len(UpperCamelCase__ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(UpperCamelCase__ )
lowerCamelCase : int = []
for i, was_visited in enumerate(UpperCamelCase__ ):
if not was_visited:
order += topology_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Tuple = []
lowerCamelCase : str = len(UpperCamelCase__ ) * [False]
for i in range(len(UpperCamelCase__ ) ):
lowerCamelCase : Any = order[len(UpperCamelCase__ ) - i - 1]
if not visited[vert]:
lowerCamelCase : List[str] = find_components(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
components_list.append(UpperCamelCase__ )
return components_list
| 42
| 0
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: Dict )-> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def a__ ( self: str )-> List[Any]:
torch.manual_seed(0 )
lowerCamelCase : Optional[int] = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , )
return model
@property
def a__ ( self: Optional[int] )-> Dict:
torch.manual_seed(0 )
lowerCamelCase : Any = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , cross_attention_dim=10 , )
return model
@property
def a__ ( self: int )-> Union[str, Any]:
torch.manual_seed(0 )
lowerCamelCase : Tuple = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , )
lowerCamelCase : Dict = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , )
return vqvae, unet
@slow
def a__ ( self: Optional[Any] )-> Tuple:
lowerCamelCase : Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase : List[str] = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
lowerCamelCase : Any = DDPMScheduler()
lowerCamelCase : Optional[Any] = AudioDiffusionPipeline(vqvae=__a , unet=self.dummy_unet , mel=__a , scheduler=__a )
lowerCamelCase : Any = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Optional[Any] = torch.Generator(device=__a ).manual_seed(42 )
lowerCamelCase : Tuple = pipe(generator=__a , steps=4 )
lowerCamelCase : str = output.audios[0]
lowerCamelCase : Tuple = output.images[0]
lowerCamelCase : Optional[int] = torch.Generator(device=__a ).manual_seed(42 )
lowerCamelCase : Any = pipe(generator=__a , steps=4 , return_dict=__a )
lowerCamelCase : Any = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
lowerCamelCase : List[Any] = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
lowerCamelCase : Optional[int] = np.frombuffer(image_from_tuple.tobytes() , dtype="""uint8""" )[:10]
lowerCamelCase : Any = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
lowerCamelCase : Optional[Any] = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
lowerCamelCase : int = DDIMScheduler()
lowerCamelCase : int = self.dummy_vqvae_and_unet
lowerCamelCase : Dict = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=__a , scheduler=__a )
lowerCamelCase : List[str] = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
np.random.seed(0 )
lowerCamelCase : List[Any] = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
lowerCamelCase : List[str] = torch.Generator(device=__a ).manual_seed(42 )
lowerCamelCase : Optional[int] = pipe(raw_audio=__a , generator=__a , start_step=5 , steps=10 )
lowerCamelCase : int = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
lowerCamelCase : Optional[Any] = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
lowerCamelCase : str = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
lowerCamelCase : List[str] = self.dummy_unet_condition
lowerCamelCase : Union[str, Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=__a , mel=__a , scheduler=__a )
lowerCamelCase : Union[str, Any] = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
np.random.seed(0 )
lowerCamelCase : Tuple = torch.rand((1, 1, 10) )
lowerCamelCase : Any = pipe(generator=__a , encoding=__a )
lowerCamelCase : int = output.images[0]
lowerCamelCase : str = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
lowerCamelCase : List[str] = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: Dict )-> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self: Optional[int] )-> Optional[Any]:
lowerCamelCase : Tuple = torch_device
lowerCamelCase : List[str] = DiffusionPipeline.from_pretrained("""teticio/audio-diffusion-ddim-256""" )
lowerCamelCase : List[Any] = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Tuple = torch.Generator(device=__a ).manual_seed(42 )
lowerCamelCase : int = pipe(generator=__a )
lowerCamelCase : int = output.audios[0]
lowerCamelCase : int = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
lowerCamelCase : Optional[Any] = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
lowerCamelCase : List[str] = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 708
|
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :str = logging.get_logger(__name__)
__lowerCamelCase :Any = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : List[Any] ='''time_series_transformer'''
snake_case__ : List[Any] ={
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self: List[str] , __a: Optional[int] = None , __a: Optional[int] = None , __a: str = "student_t" , __a: str = "nll" , __a: int = 1 , __a: List[int] = [1, 2, 3, 4, 5, 6, 7] , __a: Optional[Union[str, bool]] = "mean" , __a: int = 0 , __a: int = 0 , __a: int = 0 , __a: int = 0 , __a: Optional[List[int]] = None , __a: Optional[List[int]] = None , __a: int = 32 , __a: int = 32 , __a: int = 2 , __a: int = 2 , __a: int = 2 , __a: int = 2 , __a: bool = True , __a: str = "gelu" , __a: int = 64 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: int = 100 , __a: float = 0.02 , __a: Tuple=True , **__a: str , )-> Any:
# time series specific configuration
lowerCamelCase : str = prediction_length
lowerCamelCase : Optional[Any] = context_length or prediction_length
lowerCamelCase : Tuple = distribution_output
lowerCamelCase : Any = loss
lowerCamelCase : List[Any] = input_size
lowerCamelCase : int = num_time_features
lowerCamelCase : Dict = lags_sequence
lowerCamelCase : Optional[int] = scaling
lowerCamelCase : int = num_dynamic_real_features
lowerCamelCase : Tuple = num_static_real_features
lowerCamelCase : Any = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(__a ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
lowerCamelCase : int = cardinality
else:
lowerCamelCase : Dict = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(__a ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
lowerCamelCase : str = embedding_dimension
else:
lowerCamelCase : str = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCamelCase : Any = num_parallel_samples
# Transformer architecture configuration
lowerCamelCase : Any = input_size * len(__a ) + self._number_of_features
lowerCamelCase : List[str] = d_model
lowerCamelCase : Tuple = encoder_attention_heads
lowerCamelCase : Optional[int] = decoder_attention_heads
lowerCamelCase : Union[str, Any] = encoder_ffn_dim
lowerCamelCase : str = decoder_ffn_dim
lowerCamelCase : str = encoder_layers
lowerCamelCase : Any = decoder_layers
lowerCamelCase : Optional[int] = dropout
lowerCamelCase : List[str] = attention_dropout
lowerCamelCase : Tuple = activation_dropout
lowerCamelCase : Optional[int] = encoder_layerdrop
lowerCamelCase : int = decoder_layerdrop
lowerCamelCase : Optional[int] = activation_function
lowerCamelCase : Optional[Any] = init_std
lowerCamelCase : Optional[Any] = use_cache
super().__init__(is_encoder_decoder=__a , **__a )
@property
def a__ ( self: int )-> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 42
| 0
|
"""simple docstring"""
def snake_case ( UpperCamelCase__ : int = 4000000 ) -> int:
lowerCamelCase : List[Any] = [0, 1]
lowerCamelCase : Optional[Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
lowerCamelCase : Any = 0
for j in range(len(UpperCamelCase__ ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 709
|
"""simple docstring"""
from __future__ import annotations
__lowerCamelCase :int = 10
def snake_case ( UpperCamelCase__ : list[int] ) -> list[int]:
lowerCamelCase : int = 1
lowerCamelCase : Union[str, Any] = max(UpperCamelCase__ )
while placement <= max_digit:
# declare and initialize empty buckets
lowerCamelCase : list[list] = [[] for _ in range(UpperCamelCase__ )]
# split list_of_ints between the buckets
for i in list_of_ints:
lowerCamelCase : Any = int((i / placement) % RADIX )
buckets[tmp].append(UpperCamelCase__ )
# put each buckets' contents into list_of_ints
lowerCamelCase : Dict = 0
for b in range(UpperCamelCase__ ):
for i in buckets[b]:
lowerCamelCase : List[str] = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42
| 0
|
"""simple docstring"""
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def snake_case ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any ) -> Tuple:
# Construct model
if openai_config_file == "":
lowerCamelCase : Optional[Any] = OpenAIGPTConfig()
else:
lowerCamelCase : List[Any] = OpenAIGPTConfig.from_json_file(UpperCamelCase__ )
lowerCamelCase : Dict = OpenAIGPTModel(UpperCamelCase__ )
# Load weights from numpy
load_tf_weights_in_openai_gpt(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
lowerCamelCase : List[str] = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
lowerCamelCase : Optional[Any] = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() , UpperCamelCase__ )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__lowerCamelCase :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--openai_checkpoint_folder_path',
default=None,
type=str,
required=True,
help='Path to the TensorFlow checkpoint path.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--openai_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
__lowerCamelCase :str = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 710
|
"""simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any]=None ) -> Tuple:
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, F'{torch_layer} layer.weight does not match'
lowerCamelCase : Dict = nn.Parameter(UpperCamelCase__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'{torch_layer} layer.bias does not match'
lowerCamelCase : Any = nn.Parameter(UpperCamelCase__ )
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple ) -> Union[str, Any]:
# set torch weights for 1-to-1 comparison
lowerCamelCase : Dict = np.asarray(weights[0] )
lowerCamelCase : List[Any] = np.asarray(weights[1] )
lowerCamelCase : List[str] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCamelCase__ ).view(-1 , UpperCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any ) -> List[Any]:
# set torch weights for 1-to-1 comparison
lowerCamelCase : Tuple = np.asarray(weights[0] )
lowerCamelCase : Any = np.asarray(weights[1] )
lowerCamelCase : List[Any] = np.asarray(weights[2] )
lowerCamelCase : List[str] = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCamelCase__ ).view(-1 , UpperCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def snake_case ( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ) -> Optional[Any]:
# layernorm 1
lowerCamelCase : str = weights[0][0][0]
lowerCamelCase : Optional[int] = np.asarray(layer_norm_a[0] )
lowerCamelCase : Tuple = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# lsh weights + output
lowerCamelCase : List[Any] = weights[0][1]
if len(UpperCamelCase__ ) < 4:
set_layer_weights_in_torch_lsh(UpperCamelCase__ , torch_block.attention , UpperCamelCase__ )
else:
set_layer_weights_in_torch_local(UpperCamelCase__ , torch_block.attention , UpperCamelCase__ )
# intermediate weighs
lowerCamelCase : int = weights[2][0][1][2]
# Chunked Feed Forward
if len(UpperCamelCase__ ) == 4:
lowerCamelCase : Dict = intermediate_weights[2]
# layernorm 2
lowerCamelCase : Optional[int] = np.asarray(intermediate_weights[0][0] )
lowerCamelCase : Tuple = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# intermediate dense
lowerCamelCase : Optional[Any] = np.asarray(intermediate_weights[1][0] )
lowerCamelCase : Union[str, Any] = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
# intermediate out
lowerCamelCase : Optional[int] = np.asarray(intermediate_weights[4][0] )
lowerCamelCase : List[Any] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int ) -> List[Any]:
# reformer model
lowerCamelCase : List[Any] = torch_model.reformer
# word embeds
lowerCamelCase : Union[str, Any] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(UpperCamelCase__ ) , )
if isinstance(weights[3] , UpperCamelCase__ ):
lowerCamelCase : Union[str, Any] = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
lowerCamelCase : str = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'{position_embeddings[emb_idx]} emb does not match'
lowerCamelCase : Dict = nn.Parameter(torch.tensor(UpperCamelCase__ ) )
lowerCamelCase : int = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
UpperCamelCase__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
lowerCamelCase : Dict = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# output layer norm
lowerCamelCase : Any = np.asarray(weights[7][0] )
lowerCamelCase : List[str] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# output embeddings
lowerCamelCase : List[Any] = np.asarray(weights[9][0] )
lowerCamelCase : Optional[int] = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str ) -> Optional[int]:
# Initialise PyTorch model
lowerCamelCase : Union[str, Any] = ReformerConfig.from_json_file(UpperCamelCase__ )
print(F'Building PyTorch model from configuration: {config}' )
lowerCamelCase : str = ReformerModelWithLMHead(UpperCamelCase__ )
with open(UpperCamelCase__ , """rb""" ) as f:
lowerCamelCase : str = pickle.load(UpperCamelCase__ )["""weights"""]
set_model_weights_in_torch(UpperCamelCase__ , UpperCamelCase__ , config.hidden_size )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , UpperCamelCase__ )
if __name__ == "__main__":
__lowerCamelCase :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained Reformer model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__lowerCamelCase :Optional[int] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 42
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
"""simple docstring"""
def __init__( self: Optional[Any] , __a: Union[str, Any] , __a: Optional[Any]=13 , __a: Optional[Any]=32 , __a: Dict=3 , __a: int=4 , __a: Dict=[10, 20, 30, 40] , __a: int=[2, 2, 3, 2] , __a: Any=True , __a: List[Any]=True , __a: Any=37 , __a: Optional[int]="gelu" , __a: List[str]=10 , __a: Optional[int]=0.02 , __a: Dict=["stage2", "stage3", "stage4"] , __a: List[str]=[2, 3, 4] , __a: List[str]=None , )-> Union[str, Any]:
lowerCamelCase : Optional[int] = parent
lowerCamelCase : Optional[int] = batch_size
lowerCamelCase : Any = image_size
lowerCamelCase : Tuple = num_channels
lowerCamelCase : str = num_stages
lowerCamelCase : List[str] = hidden_sizes
lowerCamelCase : str = depths
lowerCamelCase : Dict = is_training
lowerCamelCase : Optional[Any] = use_labels
lowerCamelCase : List[str] = intermediate_size
lowerCamelCase : List[str] = hidden_act
lowerCamelCase : List[str] = num_labels
lowerCamelCase : Union[str, Any] = initializer_range
lowerCamelCase : List[Any] = out_features
lowerCamelCase : Optional[Any] = out_indices
lowerCamelCase : int = scope
def a__ ( self: str )-> Optional[Any]:
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : Dict = None
if self.use_labels:
lowerCamelCase : Dict = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase : Any = self.get_config()
return config, pixel_values, labels
def a__ ( self: Dict )-> Union[str, Any]:
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__a , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def a__ ( self: Optional[Any] , __a: List[Any] , __a: Any , __a: int )-> List[Any]:
lowerCamelCase : Optional[int] = ConvNextModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Any = model(__a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a__ ( self: int , __a: Union[str, Any] , __a: List[Any] , __a: Tuple )-> Optional[int]:
lowerCamelCase : str = ConvNextForImageClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : Any = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self: List[Any] , __a: Any , __a: Optional[int] , __a: Tuple )-> List[str]:
lowerCamelCase : List[str] = ConvNextBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : int = model(__a )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCamelCase : Tuple = None
lowerCamelCase : List[str] = ConvNextBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : List[Any] = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ ( self: Optional[Any] )-> Any:
lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
lowerCamelCase : Tuple = config_and_inputs
lowerCamelCase : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : int =(
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
snake_case__ : str =(
{'''feature-extraction''': ConvNextModel, '''image-classification''': ConvNextForImageClassification}
if is_torch_available()
else {}
)
snake_case__ : Union[str, Any] =True
snake_case__ : Optional[int] =False
snake_case__ : Tuple =False
snake_case__ : Union[str, Any] =False
snake_case__ : Tuple =False
def a__ ( self: Optional[Any] )-> Union[str, Any]:
lowerCamelCase : Tuple = ConvNextModelTester(self )
lowerCamelCase : List[Any] = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def a__ ( self: Optional[int] )-> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self: Optional[int] )-> Optional[Any]:
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""" )
def a__ ( self: int )-> Dict:
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""" )
def a__ ( self: Dict )-> Optional[Any]:
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""" )
def a__ ( self: int )-> List[Any]:
pass
def a__ ( self: Union[str, Any] )-> int:
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : Any = model_class(__a )
lowerCamelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Optional[Any] = [*signature.parameters.keys()]
lowerCamelCase : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def a__ ( self: Optional[int] )-> str:
lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self: str )-> int:
lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__a )
def a__ ( self: int )-> Optional[int]:
def check_hidden_states_output(__a: Tuple , __a: int , __a: Tuple ):
lowerCamelCase : str = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
lowerCamelCase : Tuple = model(**self._prepare_for_class(__a , __a ) )
lowerCamelCase : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(__a ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : List[Any] = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : Tuple = True
check_hidden_states_output(__a , __a , __a )
def a__ ( self: Dict )-> Optional[Any]:
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def a__ ( self: Optional[Any] )-> Tuple:
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : str = ConvNextModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case ( ) -> Optional[int]:
lowerCamelCase : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase):
"""simple docstring"""
@cached_property
def a__ ( self: Dict )-> Union[str, Any]:
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None
@slow
def a__ ( self: List[str] )-> Dict:
lowerCamelCase : Tuple = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(__a )
lowerCamelCase : Dict = self.default_image_processor
lowerCamelCase : Union[str, Any] = prepare_img()
lowerCamelCase : Optional[Any] = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
lowerCamelCase : Any = model(**__a )
# verify the logits
lowerCamelCase : Union[str, Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __a )
lowerCamelCase : Tuple = torch.tensor([-0.02_60, -0.47_39, 0.19_11] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
@require_torch
class A__ ( unittest.TestCase , __lowercase):
"""simple docstring"""
snake_case__ : Union[str, Any] =(ConvNextBackbone,) if is_torch_available() else ()
snake_case__ : Optional[Any] =ConvNextConfig
snake_case__ : Optional[Any] =False
def a__ ( self: List[str] )-> int:
lowerCamelCase : Dict = ConvNextModelTester(self )
| 711
|
"""simple docstring"""
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class A__ ( nn.Module):
"""simple docstring"""
def __init__( self: Dict )-> Dict:
super().__init__()
lowerCamelCase : Tuple = nn.Linear(3 , 4 )
lowerCamelCase : Optional[Any] = nn.BatchNormad(4 )
lowerCamelCase : Optional[Any] = nn.Linear(4 , 5 )
def a__ ( self: List[str] , __a: List[Any] )-> Optional[Any]:
return self.lineara(self.batchnorm(self.lineara(__a ) ) )
class A__ ( __lowercase):
"""simple docstring"""
def a__ ( self: Tuple , __a: int , *__a: Any , **__a: Tuple )-> Tuple:
return (args[0] + 1,) + args[1:], kwargs
class A__ ( __lowercase):
"""simple docstring"""
def a__ ( self: Optional[int] , __a: List[str] , __a: List[Any] )-> List[str]:
return output + 1
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: int )-> str:
lowerCamelCase : List[str] = ModelForTest()
lowerCamelCase : Dict = ModelHook()
add_hook_to_module(__a , __a )
self.assertEqual(test_model._hf_hook , __a )
self.assertTrue(hasattr(__a , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__a )
self.assertFalse(hasattr(__a , """_hf_hook""" ) )
self.assertFalse(hasattr(__a , """_old_forward""" ) )
def a__ ( self: int )-> str:
lowerCamelCase : List[str] = ModelForTest()
lowerCamelCase : Union[str, Any] = ModelHook()
add_hook_to_module(__a , __a )
add_hook_to_module(__a , __a , append=__a )
self.assertEqual(isinstance(test_model._hf_hook , __a ) , __a )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__a , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__a )
self.assertFalse(hasattr(__a , """_hf_hook""" ) )
self.assertFalse(hasattr(__a , """_old_forward""" ) )
def a__ ( self: List[Any] )-> List[str]:
lowerCamelCase : str = ModelForTest()
lowerCamelCase : Dict = torch.randn(2 , 3 )
lowerCamelCase : Union[str, Any] = test_model(x + 1 )
lowerCamelCase : Optional[int] = test_model(x + 2 )
lowerCamelCase : List[Any] = PreForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Optional[int] = test_model(__a )
self.assertTrue(torch.allclose(__a , __a , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowerCamelCase : Dict = PreForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Tuple = test_model(__a )
self.assertTrue(torch.allclose(__a , __a , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowerCamelCase : Any = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__a , __a )
lowerCamelCase : Optional[Any] = test_model(__a )
assert torch.allclose(__a , __a , atol=1e-5 )
def a__ ( self: Any )-> Optional[int]:
lowerCamelCase : str = ModelForTest()
lowerCamelCase : List[str] = torch.randn(2 , 3 )
lowerCamelCase : int = test_model(__a )
lowerCamelCase : Dict = PostForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Tuple = test_model(__a )
self.assertTrue(torch.allclose(__a , output + 1 , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowerCamelCase : str = PostForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Optional[Any] = test_model(__a )
self.assertTrue(torch.allclose(__a , output + 1 , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowerCamelCase : Union[str, Any] = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__a , __a )
lowerCamelCase : str = test_model(__a )
assert torch.allclose(__a , output + 2 , atol=1e-5 )
def a__ ( self: int )-> Dict:
lowerCamelCase : List[Any] = ModelForTest()
lowerCamelCase : Optional[int] = torch.randn(2 , 3 )
lowerCamelCase : List[str] = test_model(__a )
lowerCamelCase : Any = PostForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : str = test_model(__a )
self.assertTrue(torch.allclose(__a , output + 1 ) )
self.assertTrue(outputa.requires_grad )
lowerCamelCase : Optional[int] = True
lowerCamelCase : Optional[int] = test_model(__a )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def a__ ( self: List[str] )-> Union[str, Any]:
lowerCamelCase : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
lowerCamelCase : str = torch.randn(2 , 3 )
lowerCamelCase : Dict = model(__a )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__a , AlignDevicesHook(io_same_device=__a ) )
lowerCamelCase : Optional[int] = torch.randn(2 , 3 ).to(0 )
lowerCamelCase : str = model(__a )
self.assertEqual(output.device , torch.device(0 ) )
def a__ ( self: List[str] )-> Tuple:
lowerCamelCase : Union[str, Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
lowerCamelCase : Tuple = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__a ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCamelCase : List[Any] = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , __a )
lowerCamelCase : Optional[Any] = torch.randn(2 , 3 )
lowerCamelCase : Optional[Any] = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
lowerCamelCase : Any = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__a ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
lowerCamelCase : int = torch.randn(2 , 3 )
lowerCamelCase : Optional[int] = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def a__ ( self: Any )-> List[str]:
lowerCamelCase : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
lowerCamelCase : int = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(__a , execution_device=__a , offload=__a )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCamelCase : List[Any] = torch.device(__a )
self.assertEqual(model.batchnorm.running_mean.device , __a )
lowerCamelCase : Dict = torch.randn(2 , 3 )
lowerCamelCase : Optional[Any] = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(__a , execution_device=__a , offload=__a , offload_buffers=__a )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
lowerCamelCase : Optional[int] = torch.randn(2 , 3 )
lowerCamelCase : int = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def a__ ( self: Optional[Any] )-> List[Any]:
lowerCamelCase : List[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
lowerCamelCase : Any = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
__a , execution_device=__a , offload=__a , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCamelCase : List[Any] = torch.device(__a )
self.assertEqual(model.batchnorm.running_mean.device , __a )
lowerCamelCase : Dict = torch.randn(2 , 3 )
lowerCamelCase : int = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__a , execution_device=__a , offload=__a , weights_map=model.state_dict() , offload_buffers=__a , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
lowerCamelCase : Tuple = torch.randn(2 , 3 )
lowerCamelCase : Any = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 42
| 0
|
"""simple docstring"""
from __future__ import annotations
import math
def snake_case ( UpperCamelCase__ : float , UpperCamelCase__ : int ) -> float:
lowerCamelCase : Dict = u
for i in range(1 , UpperCamelCase__ ):
lowerCamelCase : List[str] = temp * (u - i)
return temp
def snake_case ( ) -> None:
lowerCamelCase : List[Any] = int(input("""enter the numbers of values: """ ) )
lowerCamelCase : list[list[float]] = []
for _ in range(UpperCamelCase__ ):
y.append([] )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
y[i].append(UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = 0
print("""enter the values of parameters in a list: """ )
lowerCamelCase : Any = list(map(UpperCamelCase__ , input().split() ) )
print("""enter the values of corresponding parameters: """ )
for i in range(UpperCamelCase__ ):
lowerCamelCase : int = float(input() )
lowerCamelCase : Dict = int(input("""enter the value to interpolate: """ ) )
lowerCamelCase : List[Any] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , UpperCamelCase__ ):
for j in range(n - i ):
lowerCamelCase : str = y[j + 1][i - 1] - y[j][i - 1]
lowerCamelCase : Any = y[0][0]
for i in range(1 , UpperCamelCase__ ):
summ += (ucal(UpperCamelCase__ , UpperCamelCase__ ) * y[0][i]) / math.factorial(UpperCamelCase__ )
print(F'the value at {value} is {summ}' )
if __name__ == "__main__":
main()
| 712
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowerCamelCase :Optional[Any] = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Union[str, Any] = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
__lowerCamelCase :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 42
| 0
|
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class A__ ( datasets.BuilderConfig):
"""simple docstring"""
snake_case__ : Optional[datasets.Features] =None
class A__ ( datasets.ArrowBasedBuilder):
"""simple docstring"""
snake_case__ : Optional[int] =PandasConfig
def a__ ( self: Tuple )-> Optional[Any]:
return datasets.DatasetInfo(features=self.config.features )
def a__ ( self: Optional[int] , __a: Optional[int] )-> str:
if not self.config.data_files:
raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' )
lowerCamelCase : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__a , (str, list, tuple) ):
lowerCamelCase : Any = data_files
if isinstance(__a , __a ):
lowerCamelCase : Union[str, Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCamelCase : Any = [dl_manager.iter_files(__a ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
lowerCamelCase : List[Any] = []
for split_name, files in data_files.items():
if isinstance(__a , __a ):
lowerCamelCase : List[str] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCamelCase : Dict = [dl_manager.iter_files(__a ) for file in files]
splits.append(datasets.SplitGenerator(name=__a , gen_kwargs={"""files""": files} ) )
return splits
def a__ ( self: Optional[Any] , __a: pa.Table )-> pa.Table:
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
lowerCamelCase : Dict = table_cast(__a , self.config.features.arrow_schema )
return pa_table
def a__ ( self: List[str] , __a: Any )-> Dict:
for i, file in enumerate(itertools.chain.from_iterable(__a ) ):
with open(__a , """rb""" ) as f:
lowerCamelCase : Union[str, Any] = pa.Table.from_pandas(pd.read_pickle(__a ) )
yield i, self._cast_table(__a )
| 713
|
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
"""simple docstring"""
def __init__( self: Optional[Any] , __a: Union[str, Any] , __a: Optional[Any]=13 , __a: Optional[Any]=32 , __a: Dict=3 , __a: int=4 , __a: Dict=[10, 20, 30, 40] , __a: int=[2, 2, 3, 2] , __a: Any=True , __a: List[Any]=True , __a: Any=37 , __a: Optional[int]="gelu" , __a: List[str]=10 , __a: Optional[int]=0.02 , __a: Dict=["stage2", "stage3", "stage4"] , __a: List[str]=[2, 3, 4] , __a: List[str]=None , )-> Union[str, Any]:
lowerCamelCase : Optional[int] = parent
lowerCamelCase : Optional[int] = batch_size
lowerCamelCase : Any = image_size
lowerCamelCase : Tuple = num_channels
lowerCamelCase : str = num_stages
lowerCamelCase : List[str] = hidden_sizes
lowerCamelCase : str = depths
lowerCamelCase : Dict = is_training
lowerCamelCase : Optional[Any] = use_labels
lowerCamelCase : List[str] = intermediate_size
lowerCamelCase : List[str] = hidden_act
lowerCamelCase : List[str] = num_labels
lowerCamelCase : Union[str, Any] = initializer_range
lowerCamelCase : List[Any] = out_features
lowerCamelCase : Optional[Any] = out_indices
lowerCamelCase : int = scope
def a__ ( self: str )-> Optional[Any]:
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : Dict = None
if self.use_labels:
lowerCamelCase : Dict = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase : Any = self.get_config()
return config, pixel_values, labels
def a__ ( self: Dict )-> Union[str, Any]:
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__a , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def a__ ( self: Optional[Any] , __a: List[Any] , __a: Any , __a: int )-> List[Any]:
lowerCamelCase : Optional[int] = ConvNextModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Any = model(__a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a__ ( self: int , __a: Union[str, Any] , __a: List[Any] , __a: Tuple )-> Optional[int]:
lowerCamelCase : str = ConvNextForImageClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : Any = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self: List[Any] , __a: Any , __a: Optional[int] , __a: Tuple )-> List[str]:
lowerCamelCase : List[str] = ConvNextBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : int = model(__a )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCamelCase : Tuple = None
lowerCamelCase : List[str] = ConvNextBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : List[Any] = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ ( self: Optional[Any] )-> Any:
lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = config_and_inputs
lowerCamelCase : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : int =(
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
snake_case__ : str =(
{'''feature-extraction''': ConvNextModel, '''image-classification''': ConvNextForImageClassification}
if is_torch_available()
else {}
)
snake_case__ : Union[str, Any] =True
snake_case__ : Optional[int] =False
snake_case__ : Tuple =False
snake_case__ : Union[str, Any] =False
snake_case__ : Tuple =False
def a__ ( self: Optional[Any] )-> Union[str, Any]:
lowerCamelCase : Tuple = ConvNextModelTester(self )
lowerCamelCase : List[Any] = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def a__ ( self: Optional[int] )-> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self: Optional[int] )-> Optional[Any]:
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""" )
def a__ ( self: int )-> Dict:
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""" )
def a__ ( self: Dict )-> Optional[Any]:
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""" )
def a__ ( self: int )-> List[Any]:
pass
def a__ ( self: Union[str, Any] )-> int:
lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : Any = model_class(__a )
lowerCamelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Optional[Any] = [*signature.parameters.keys()]
lowerCamelCase : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def a__ ( self: Optional[int] )-> str:
lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self: str )-> int:
lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__a )
def a__ ( self: int )-> Optional[int]:
def check_hidden_states_output(__a: Tuple , __a: int , __a: Tuple ):
lowerCamelCase : str = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
lowerCamelCase : Tuple = model(**self._prepare_for_class(__a , __a ) )
lowerCamelCase : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(__a ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase , lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : List[Any] = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : Tuple = True
check_hidden_states_output(__a , __a , __a )
def a__ ( self: Dict )-> Optional[Any]:
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def a__ ( self: Optional[Any] )-> Tuple:
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : str = ConvNextModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case ( ) -> Optional[int]:
lowerCamelCase : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase):
"""simple docstring"""
@cached_property
def a__ ( self: Dict )-> Union[str, Any]:
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None
@slow
def a__ ( self: List[str] )-> Dict:
lowerCamelCase : Tuple = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(__a )
lowerCamelCase : Dict = self.default_image_processor
lowerCamelCase : Union[str, Any] = prepare_img()
lowerCamelCase : Optional[Any] = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
lowerCamelCase : Any = model(**__a )
# verify the logits
lowerCamelCase : Union[str, Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __a )
lowerCamelCase : Tuple = torch.tensor([-0.02_60, -0.47_39, 0.19_11] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
@require_torch
class A__ ( unittest.TestCase , __lowercase):
"""simple docstring"""
snake_case__ : Union[str, Any] =(ConvNextBackbone,) if is_torch_available() else ()
snake_case__ : Optional[Any] =ConvNextConfig
snake_case__ : Optional[Any] =False
def a__ ( self: List[str] )-> int:
lowerCamelCase : Dict = ConvNextModelTester(self )
| 42
| 0
|
"""simple docstring"""
import json
import sys
def snake_case ( UpperCamelCase__ : Any , UpperCamelCase__ : Tuple ) -> Tuple:
with open(UpperCamelCase__ , encoding="""utf-8""" ) as f:
lowerCamelCase : str = json.load(UpperCamelCase__ )
lowerCamelCase : str = ["""<details>""", """<summary>Show updated benchmarks!</summary>""", """ """]
for benchmark_name in sorted(UpperCamelCase__ ):
lowerCamelCase : List[str] = results[benchmark_name]
lowerCamelCase : Union[str, Any] = benchmark_name.split("""/""" )[-1]
output_md.append(F'### Benchmark: {benchmark_file_name}' )
lowerCamelCase : Any = """| metric |"""
lowerCamelCase : Tuple = """|--------|"""
lowerCamelCase : Any = """| new / old (diff) |"""
for metric_name in sorted(UpperCamelCase__ ):
lowerCamelCase : Tuple = benchmark_res[metric_name]
lowerCamelCase : Optional[int] = metric_vals["""new"""]
lowerCamelCase : Dict = metric_vals.get("""old""" , UpperCamelCase__ )
lowerCamelCase : Optional[Any] = metric_vals.get("""diff""" , UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = F' {new_val:f}' if isinstance(UpperCamelCase__ , (int, float) ) else """None"""
if old_val is not None:
val_str += F' / {old_val:f}' if isinstance(UpperCamelCase__ , (int, float) ) else "None"
if dif_val is not None:
val_str += F' ({dif_val:f})' if isinstance(UpperCamelCase__ , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("""</details>""" )
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.writelines("""\n""".join(UpperCamelCase__ ) )
if __name__ == "__main__":
__lowerCamelCase :Dict = sys.argv[1]
__lowerCamelCase :List[str] = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 714
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :Optional[int] = logging.get_logger(__name__)
__lowerCamelCase :List[str] = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Optional[Any] ='''realm'''
def __init__( self: Union[str, Any] , __a: List[Any]=30_522 , __a: List[Any]=768 , __a: List[Any]=128 , __a: Union[str, Any]=12 , __a: Union[str, Any]=12 , __a: Optional[Any]=8 , __a: Dict=3_072 , __a: List[Any]="gelu_new" , __a: List[Any]=0.1 , __a: Tuple=0.1 , __a: Optional[Any]=512 , __a: Optional[int]=2 , __a: str=0.02 , __a: int=1e-1_2 , __a: Optional[Any]=256 , __a: Any=10 , __a: Dict=1e-3 , __a: Optional[Any]=5 , __a: Dict=320 , __a: Tuple=13_353_718 , __a: List[Any]=5_000 , __a: Dict=1 , __a: int=0 , __a: Dict=2 , **__a: List[str] , )-> Any:
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
# Common config
lowerCamelCase : Optional[Any] = vocab_size
lowerCamelCase : str = max_position_embeddings
lowerCamelCase : Dict = hidden_size
lowerCamelCase : Dict = retriever_proj_size
lowerCamelCase : Optional[Any] = num_hidden_layers
lowerCamelCase : List[str] = num_attention_heads
lowerCamelCase : Tuple = num_candidates
lowerCamelCase : int = intermediate_size
lowerCamelCase : Dict = hidden_act
lowerCamelCase : List[str] = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : Optional[int] = initializer_range
lowerCamelCase : Dict = type_vocab_size
lowerCamelCase : Optional[Any] = layer_norm_eps
# Reader config
lowerCamelCase : List[str] = span_hidden_size
lowerCamelCase : Dict = max_span_width
lowerCamelCase : Optional[Any] = reader_layer_norm_eps
lowerCamelCase : Optional[int] = reader_beam_size
lowerCamelCase : List[Any] = reader_seq_len
# Retrieval config
lowerCamelCase : int = num_block_records
lowerCamelCase : Dict = searcher_beam_size
| 42
| 0
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : torch.FloatTensor
snake_case__ : torch.FloatTensor
snake_case__ : Optional[torch.FloatTensor] =None
class A__ ( __lowercase , __lowercase):
"""simple docstring"""
snake_case__ : Any =2
@register_to_config
def __init__( self: Any , __a: float = 0.02 , __a: float = 100 , __a: float = 1.0_07 , __a: float = 80 , __a: float = 0.05 , __a: float = 50 , )-> Optional[int]:
# standard deviation of the initial noise distribution
lowerCamelCase : Tuple = sigma_max
# setable values
lowerCamelCase : int = None
lowerCamelCase : np.IntTensor = None
lowerCamelCase : torch.FloatTensor = None # sigma(t_i)
def a__ ( self: List[str] , __a: torch.FloatTensor , __a: Optional[int] = None )-> torch.FloatTensor:
return sample
def a__ ( self: Tuple , __a: int , __a: Union[str, torch.device] = None )-> Any:
lowerCamelCase : Optional[Any] = num_inference_steps
lowerCamelCase : Union[str, Any] = np.arange(0 , self.num_inference_steps )[::-1].copy()
lowerCamelCase : Dict = torch.from_numpy(__a ).to(__a )
lowerCamelCase : int = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
lowerCamelCase : List[Any] = torch.tensor(__a , dtype=torch.floataa , device=__a )
def a__ ( self: int , __a: torch.FloatTensor , __a: float , __a: Optional[torch.Generator] = None )-> Tuple[torch.FloatTensor, float]:
if self.config.s_min <= sigma <= self.config.s_max:
lowerCamelCase : str = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
lowerCamelCase : Optional[Any] = 0
# sample eps ~ N(0, S_noise^2 * I)
lowerCamelCase : str = self.config.s_noise * randn_tensor(sample.shape , generator=__a ).to(sample.device )
lowerCamelCase : str = sigma + gamma * sigma
lowerCamelCase : Dict = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def a__ ( self: Optional[Any] , __a: torch.FloatTensor , __a: float , __a: float , __a: torch.FloatTensor , __a: bool = True , )-> Union[KarrasVeOutput, Tuple]:
lowerCamelCase : List[str] = sample_hat + sigma_hat * model_output
lowerCamelCase : Tuple = (sample_hat - pred_original_sample) / sigma_hat
lowerCamelCase : Optional[Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=__a , derivative=__a , pred_original_sample=__a )
def a__ ( self: Dict , __a: torch.FloatTensor , __a: float , __a: float , __a: torch.FloatTensor , __a: torch.FloatTensor , __a: torch.FloatTensor , __a: bool = True , )-> Union[KarrasVeOutput, Tuple]:
lowerCamelCase : List[str] = sample_prev + sigma_prev * model_output
lowerCamelCase : List[str] = (sample_prev - pred_original_sample) / sigma_prev
lowerCamelCase : Any = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=__a , derivative=__a , pred_original_sample=__a )
def a__ ( self: Dict , __a: Dict , __a: Dict , __a: Union[str, Any] )-> str:
raise NotImplementedError()
| 715
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :Tuple = logging.get_logger(__name__)
__lowerCamelCase :Any = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Tuple ='''glpn'''
def __init__( self: Dict , __a: List[str]=3 , __a: Optional[int]=4 , __a: Dict=[2, 2, 2, 2] , __a: str=[8, 4, 2, 1] , __a: Optional[int]=[32, 64, 160, 256] , __a: Dict=[7, 3, 3, 3] , __a: Dict=[4, 2, 2, 2] , __a: Optional[Any]=[1, 2, 5, 8] , __a: Tuple=[4, 4, 4, 4] , __a: int="gelu" , __a: Union[str, Any]=0.0 , __a: str=0.0 , __a: Union[str, Any]=0.02 , __a: str=0.1 , __a: Union[str, Any]=1e-6 , __a: Any=64 , __a: Dict=10 , __a: Union[str, Any]=-1 , **__a: Optional[Any] , )-> Dict:
super().__init__(**__a )
lowerCamelCase : Dict = num_channels
lowerCamelCase : Any = num_encoder_blocks
lowerCamelCase : Dict = depths
lowerCamelCase : List[str] = sr_ratios
lowerCamelCase : Dict = hidden_sizes
lowerCamelCase : Tuple = patch_sizes
lowerCamelCase : Optional[int] = strides
lowerCamelCase : Optional[Any] = mlp_ratios
lowerCamelCase : Union[str, Any] = num_attention_heads
lowerCamelCase : List[str] = hidden_act
lowerCamelCase : Any = hidden_dropout_prob
lowerCamelCase : Optional[int] = attention_probs_dropout_prob
lowerCamelCase : List[Any] = initializer_range
lowerCamelCase : Dict = drop_path_rate
lowerCamelCase : Any = layer_norm_eps
lowerCamelCase : Optional[Any] = decoder_hidden_size
lowerCamelCase : Tuple = max_depth
lowerCamelCase : Optional[Any] = head_in_index
| 42
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase :str = {
'configuration_resnet': ['RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ResNetConfig', 'ResNetOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Optional[int] = [
'RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'ResNetForImageClassification',
'ResNetModel',
'ResNetPreTrainedModel',
'ResNetBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Union[str, Any] = [
'TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFResNetForImageClassification',
'TFResNetModel',
'TFResNetPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :str = [
'FlaxResNetForImageClassification',
'FlaxResNetModel',
'FlaxResNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
__lowerCamelCase :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 716
|
"""simple docstring"""
from __future__ import annotations
import math
def snake_case ( UpperCamelCase__ : float , UpperCamelCase__ : int ) -> float:
lowerCamelCase : Dict = u
for i in range(1 , UpperCamelCase__ ):
lowerCamelCase : List[str] = temp * (u - i)
return temp
def snake_case ( ) -> None:
lowerCamelCase : List[Any] = int(input("""enter the numbers of values: """ ) )
lowerCamelCase : list[list[float]] = []
for _ in range(UpperCamelCase__ ):
y.append([] )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
y[i].append(UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = 0
print("""enter the values of parameters in a list: """ )
lowerCamelCase : Any = list(map(UpperCamelCase__ , input().split() ) )
print("""enter the values of corresponding parameters: """ )
for i in range(UpperCamelCase__ ):
lowerCamelCase : int = float(input() )
lowerCamelCase : Dict = int(input("""enter the value to interpolate: """ ) )
lowerCamelCase : List[Any] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , UpperCamelCase__ ):
for j in range(n - i ):
lowerCamelCase : str = y[j + 1][i - 1] - y[j][i - 1]
lowerCamelCase : Any = y[0][0]
for i in range(1 , UpperCamelCase__ ):
summ += (ucal(UpperCamelCase__ , UpperCamelCase__ ) * y[0][i]) / math.factorial(UpperCamelCase__ )
print(F'the value at {value} is {summ}' )
if __name__ == "__main__":
main()
| 42
| 0
|
"""simple docstring"""
import math
class A__ :
"""simple docstring"""
def a__ ( self: str , __a: list[list[float]] , __a: list[int] )-> int:
lowerCamelCase : Dict = 0.0
lowerCamelCase : Tuple = 0.0
for i in range(len(__a ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def a__ ( self: Dict , __a: list[list[int | float]] , __a: list[int] , __a: int , __a: float )-> list[list[int | float]]:
for i in range(len(__a ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def snake_case ( ) -> None:
# Training Examples ( m, n )
lowerCamelCase : Union[str, Any] = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
lowerCamelCase : int = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
lowerCamelCase : Optional[int] = SelfOrganizingMap()
lowerCamelCase : List[Any] = 3
lowerCamelCase : Tuple = 0.5
for _ in range(UpperCamelCase__ ):
for j in range(len(UpperCamelCase__ ) ):
# training sample
lowerCamelCase : Any = training_samples[j]
# Compute the winning vector
lowerCamelCase : Optional[int] = self_organizing_map.get_winner(UpperCamelCase__ , UpperCamelCase__ )
# Update the winning vector
lowerCamelCase : List[Any] = self_organizing_map.update(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# classify test sample
lowerCamelCase : Dict = [0, 0, 0, 1]
lowerCamelCase : Dict = self_organizing_map.get_winner(UpperCamelCase__ , UpperCamelCase__ )
# results
print(F'Clusters that the test sample belongs to : {winner}' )
print(F'Weights that have been trained : {weights}' )
# running the main() function
if __name__ == "__main__":
main()
| 717
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__lowerCamelCase :str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Optional[Any] = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
__lowerCamelCase :Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 42
| 0
|
"""simple docstring"""
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Tuple =(KDPMaDiscreteScheduler,)
snake_case__ : Tuple =10
def a__ ( self: List[Any] , **__a: Optional[int] )-> Union[str, Any]:
lowerCamelCase : int = {
"""num_train_timesteps""": 1_100,
"""beta_start""": 0.00_01,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**__a )
return config
def a__ ( self: Union[str, Any] )-> Any:
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=__a )
def a__ ( self: str )-> int:
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=__a , beta_end=__a )
def a__ ( self: int )-> Union[str, Any]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__a )
def a__ ( self: List[Any] )-> List[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__a )
def a__ ( self: Union[str, Any] )-> int:
lowerCamelCase : List[str] = self.scheduler_classes[0]
lowerCamelCase : Union[str, Any] = self.get_scheduler_config(prediction_type="""v_prediction""" )
lowerCamelCase : List[str] = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase : Dict = self.dummy_model()
lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase : List[Any] = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase : Optional[Any] = scheduler.scale_model_input(__a , __a )
lowerCamelCase : Optional[int] = model(__a , __a )
lowerCamelCase : Tuple = scheduler.step(__a , __a , __a )
lowerCamelCase : Optional[Any] = output.prev_sample
lowerCamelCase : List[str] = torch.sum(torch.abs(__a ) )
lowerCamelCase : Tuple = torch.mean(torch.abs(__a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_9_3_4e-0_7 ) < 1e-2
assert abs(result_mean.item() - 6.1_1_1_2e-1_0 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2e-0_7 ) < 1e-2
assert abs(result_mean.item() - 0.00_02 ) < 1e-3
def a__ ( self: Any )-> Any:
if torch_device == "mps":
return
lowerCamelCase : Dict = self.scheduler_classes[0]
lowerCamelCase : Dict = self.get_scheduler_config()
lowerCamelCase : int = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase : List[Any] = self.dummy_model()
lowerCamelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase : Optional[int] = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase : Dict = scheduler.scale_model_input(__a , __a )
lowerCamelCase : Optional[Any] = model(__a , __a )
lowerCamelCase : Tuple = scheduler.step(__a , __a , __a )
lowerCamelCase : str = output.prev_sample
lowerCamelCase : Tuple = torch.sum(torch.abs(__a ) )
lowerCamelCase : Tuple = torch.mean(torch.abs(__a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
def a__ ( self: Optional[Any] )-> List[Any]:
if torch_device == "mps":
return
lowerCamelCase : Any = self.scheduler_classes[0]
lowerCamelCase : Union[str, Any] = self.get_scheduler_config()
lowerCamelCase : Optional[Any] = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps , device=__a )
lowerCamelCase : Union[str, Any] = self.dummy_model()
lowerCamelCase : List[str] = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCamelCase : Union[str, Any] = scheduler.scale_model_input(__a , __a )
lowerCamelCase : Optional[int] = model(__a , __a )
lowerCamelCase : int = scheduler.step(__a , __a , __a )
lowerCamelCase : int = output.prev_sample
lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(__a ) )
lowerCamelCase : int = torch.mean(torch.abs(__a ) )
if str(__a ).startswith("""cpu""" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
| 718
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase :Dict = logging.get_logger()
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : LevitConfig , UpperCamelCase__ : Path , UpperCamelCase__ : bool = True ) -> Dict:
print(F'Converting {name}...' )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
lowerCamelCase : Optional[Any] = timm.create_model("""levit_128s""" , pretrained=UpperCamelCase__ )
else:
lowerCamelCase : Dict = timm.create_model("""levit_128""" , pretrained=UpperCamelCase__ )
if hidden_sizes == 192:
lowerCamelCase : Tuple = timm.create_model("""levit_192""" , pretrained=UpperCamelCase__ )
if hidden_sizes == 256:
lowerCamelCase : Optional[int] = timm.create_model("""levit_256""" , pretrained=UpperCamelCase__ )
if hidden_sizes == 384:
lowerCamelCase : Dict = timm.create_model("""levit_384""" , pretrained=UpperCamelCase__ )
from_model.eval()
lowerCamelCase : Optional[Any] = LevitForImageClassificationWithTeacher(UpperCamelCase__ ).eval()
lowerCamelCase : Tuple = OrderedDict()
lowerCamelCase : Optional[Any] = from_model.state_dict()
lowerCamelCase : str = list(from_model.state_dict().keys() )
lowerCamelCase : List[Any] = list(our_model.state_dict().keys() )
print(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
for i in range(len(UpperCamelCase__ ) ):
lowerCamelCase : str = weights[og_keys[i]]
our_model.load_state_dict(UpperCamelCase__ )
lowerCamelCase : int = torch.randn((2, 3, 224, 224) )
lowerCamelCase : Any = from_model(UpperCamelCase__ )
lowerCamelCase : List[Any] = our_model(UpperCamelCase__ ).logits
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ ), "The model logits don't match the original one."
lowerCamelCase : Dict = name
print(UpperCamelCase__ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
lowerCamelCase : Optional[int] = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'Pushed {checkpoint_name}' )
def snake_case ( UpperCamelCase__ : Path , UpperCamelCase__ : str = None , UpperCamelCase__ : bool = True ) -> Optional[int]:
lowerCamelCase : Optional[Any] = """imagenet-1k-id2label.json"""
lowerCamelCase : List[Any] = 1000
lowerCamelCase : Dict = (1, num_labels)
lowerCamelCase : List[Any] = """huggingface/label-files"""
lowerCamelCase : Optional[int] = num_labels
lowerCamelCase : List[str] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase : Any = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase : List[Any] = idalabel
lowerCamelCase : str = {v: k for k, v in idalabel.items()}
lowerCamelCase : Tuple = partial(UpperCamelCase__ , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ )
lowerCamelCase : Optional[int] = {
"""levit-128S""": 128,
"""levit-128""": 128,
"""levit-192""": 192,
"""levit-256""": 256,
"""levit-384""": 384,
}
lowerCamelCase : List[Any] = {
"""levit-128S""": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-128""": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-192""": ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-256""": ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-384""": ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , UpperCamelCase__ , names_to_config[model_name] , UpperCamelCase__ , UpperCamelCase__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return config, expected_shape
if __name__ == "__main__":
__lowerCamelCase :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
__lowerCamelCase :List[Any] = parser.parse_args()
__lowerCamelCase :Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 42
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
__lowerCamelCase :Any = logging.get_logger(__name__)
__lowerCamelCase :List[str] = {
'openai/whisper-base': 'https://huggingface.co/openai/whisper-base/resolve/main/config.json',
}
# fmt: off
__lowerCamelCase :Optional[Any] = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1_058, 1_220, 1_267, 1_279, 1_303, 1_343, 1_377,
1_391, 1_635, 1_782, 1_875, 2_162, 2_361, 2_488, 3_467, 4_008, 4_211,
4_600, 4_808, 5_299, 5_855, 6_329, 7_203, 9_609, 9_959, 10_563, 10_786,
11_420, 11_709, 11_907, 13_163, 13_697, 13_700, 14_808, 15_306, 16_410, 16_791,
17_992, 19_203, 19_510, 20_724, 22_305, 22_935, 27_007, 30_109, 30_420, 33_409,
34_949, 40_283, 40_493, 40_549, 47_282, 49_146, 50_257, 50_359, 50_360, 50_361
]
__lowerCamelCase :List[Any] = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1_350, 1_853, 1_982, 2_460, 2_627,
3_246, 3_253, 3_268, 3_536, 3_846, 3_961, 4_183, 4_667, 6_585, 6_647,
7_273, 9_061, 9_383, 10_428, 10_929, 11_938, 12_033, 12_331, 12_562, 13_793,
14_157, 14_635, 15_265, 15_618, 16_553, 16_604, 18_362, 18_956, 20_075, 21_675,
22_520, 26_130, 26_161, 26_435, 28_279, 29_464, 31_650, 32_302, 32_470, 36_865,
42_863, 47_425, 49_870, 50_254, 50_258, 50_360, 50_361, 50_362
]
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Optional[Any] ='''whisper'''
snake_case__ : Optional[Any] =['''past_key_values''']
snake_case__ : Union[str, Any] ={'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self: Optional[int] , __a: int=51_865 , __a: str=80 , __a: int=6 , __a: Union[str, Any]=4 , __a: Union[str, Any]=6 , __a: Union[str, Any]=4 , __a: str=1_536 , __a: Any=1_536 , __a: int=0.0 , __a: Optional[Any]=0.0 , __a: Dict=50_257 , __a: str=True , __a: Optional[Any]=True , __a: List[Any]="gelu" , __a: Optional[int]=256 , __a: Tuple=0.0 , __a: Union[str, Any]=0.0 , __a: Union[str, Any]=0.0 , __a: str=0.02 , __a: Tuple=False , __a: Optional[Any]=1_500 , __a: Optional[Any]=448 , __a: List[str]=50_256 , __a: int=50_256 , __a: Tuple=50_256 , __a: str=None , __a: Union[str, Any]=[220, 50_256] , __a: Optional[int]=False , __a: str=256 , __a: str=False , __a: Tuple=0.05 , __a: Union[str, Any]=10 , __a: Tuple=2 , __a: Optional[int]=0.0 , __a: List[Any]=10 , __a: Any=0 , __a: Optional[int]=7 , **__a: Tuple , )-> str:
lowerCamelCase : Optional[int] = vocab_size
lowerCamelCase : List[str] = num_mel_bins
lowerCamelCase : Dict = d_model
lowerCamelCase : int = encoder_layers
lowerCamelCase : Union[str, Any] = encoder_attention_heads
lowerCamelCase : Any = decoder_layers
lowerCamelCase : Optional[Any] = decoder_attention_heads
lowerCamelCase : Dict = decoder_ffn_dim
lowerCamelCase : List[Any] = encoder_ffn_dim
lowerCamelCase : str = dropout
lowerCamelCase : Tuple = attention_dropout
lowerCamelCase : Tuple = activation_dropout
lowerCamelCase : List[Any] = activation_function
lowerCamelCase : List[str] = init_std
lowerCamelCase : Union[str, Any] = encoder_layerdrop
lowerCamelCase : List[Any] = decoder_layerdrop
lowerCamelCase : List[str] = use_cache
lowerCamelCase : int = encoder_layers
lowerCamelCase : Any = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCamelCase : Dict = max_source_positions
lowerCamelCase : Any = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
lowerCamelCase : int = classifier_proj_size
lowerCamelCase : int = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase : Any = apply_spec_augment
lowerCamelCase : int = mask_time_prob
lowerCamelCase : Optional[Any] = mask_time_length
lowerCamelCase : Dict = mask_time_min_masks
lowerCamelCase : Any = mask_feature_prob
lowerCamelCase : Dict = mask_feature_length
lowerCamelCase : List[str] = mask_feature_min_masks
lowerCamelCase : int = median_filter_width
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , decoder_start_token_id=__a , suppress_tokens=__a , begin_suppress_tokens=__a , **__a , )
class A__ ( __lowercase):
"""simple docstring"""
@property
def a__ ( self: Tuple )-> Mapping[str, Mapping[int, str]]:
lowerCamelCase : Tuple = OrderedDict(
[
("""input_features""", {0: """batch""", 1: """feature_size""", 2: """encoder_sequence"""}),
] )
if self.use_past:
lowerCamelCase : List[str] = {0: """batch"""}
else:
lowerCamelCase : Tuple = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(__a , direction="""inputs""" )
return common_inputs
def a__ ( self: int , __a: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , __a: int = -1 , __a: int = -1 , __a: bool = False , __a: Optional["TensorType"] = None , __a: int = 22_050 , __a: float = 5.0 , __a: int = 220 , )-> Mapping[str, Any]:
lowerCamelCase : Dict = OrderedDict()
lowerCamelCase : List[str] = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=__a , framework=__a , sampling_rate=__a , time_duration=__a , frequency=__a , )
lowerCamelCase : Any = encoder_inputs["""input_features"""].shape[2]
lowerCamelCase : str = encoder_sequence_length // 2 if self.use_past else seq_length
lowerCamelCase : List[str] = super().generate_dummy_inputs(
preprocessor.tokenizer , __a , __a , __a , __a )
lowerCamelCase : Union[str, Any] = encoder_inputs.pop("""input_features""" )
lowerCamelCase : List[Any] = decoder_inputs.pop("""decoder_input_ids""" )
if "past_key_values" in decoder_inputs:
lowerCamelCase : List[Any] = decoder_inputs.pop("""past_key_values""" )
return dummy_inputs
@property
def a__ ( self: List[str] )-> float:
return 1e-3
| 719
|
"""simple docstring"""
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Tuple =(KDPMaDiscreteScheduler,)
snake_case__ : Tuple =10
def a__ ( self: List[Any] , **__a: Optional[int] )-> Union[str, Any]:
lowerCamelCase : int = {
"""num_train_timesteps""": 1_100,
"""beta_start""": 0.00_01,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**__a )
return config
def a__ ( self: Union[str, Any] )-> Any:
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=__a )
def a__ ( self: str )-> int:
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=__a , beta_end=__a )
def a__ ( self: int )-> Union[str, Any]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__a )
def a__ ( self: List[Any] )-> List[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__a )
def a__ ( self: Union[str, Any] )-> int:
lowerCamelCase : List[str] = self.scheduler_classes[0]
lowerCamelCase : Union[str, Any] = self.get_scheduler_config(prediction_type="""v_prediction""" )
lowerCamelCase : List[str] = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase : Dict = self.dummy_model()
lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase : List[Any] = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase : Optional[Any] = scheduler.scale_model_input(__a , __a )
lowerCamelCase : Optional[int] = model(__a , __a )
lowerCamelCase : Tuple = scheduler.step(__a , __a , __a )
lowerCamelCase : Optional[Any] = output.prev_sample
lowerCamelCase : List[str] = torch.sum(torch.abs(__a ) )
lowerCamelCase : Tuple = torch.mean(torch.abs(__a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_9_3_4e-0_7 ) < 1e-2
assert abs(result_mean.item() - 6.1_1_1_2e-1_0 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2e-0_7 ) < 1e-2
assert abs(result_mean.item() - 0.00_02 ) < 1e-3
def a__ ( self: Any )-> Any:
if torch_device == "mps":
return
lowerCamelCase : Dict = self.scheduler_classes[0]
lowerCamelCase : Dict = self.get_scheduler_config()
lowerCamelCase : int = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase : List[Any] = self.dummy_model()
lowerCamelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase : Optional[int] = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase : Dict = scheduler.scale_model_input(__a , __a )
lowerCamelCase : Optional[Any] = model(__a , __a )
lowerCamelCase : Tuple = scheduler.step(__a , __a , __a )
lowerCamelCase : str = output.prev_sample
lowerCamelCase : Tuple = torch.sum(torch.abs(__a ) )
lowerCamelCase : Tuple = torch.mean(torch.abs(__a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
def a__ ( self: Optional[Any] )-> List[Any]:
if torch_device == "mps":
return
lowerCamelCase : Any = self.scheduler_classes[0]
lowerCamelCase : Union[str, Any] = self.get_scheduler_config()
lowerCamelCase : Optional[Any] = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps , device=__a )
lowerCamelCase : Union[str, Any] = self.dummy_model()
lowerCamelCase : List[str] = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCamelCase : Union[str, Any] = scheduler.scale_model_input(__a , __a )
lowerCamelCase : Optional[int] = model(__a , __a )
lowerCamelCase : int = scheduler.step(__a , __a , __a )
lowerCamelCase : int = output.prev_sample
lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(__a ) )
lowerCamelCase : int = torch.mean(torch.abs(__a ) )
if str(__a ).startswith("""cpu""" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
| 42
| 0
|
"""simple docstring"""
def snake_case ( UpperCamelCase__ : int ) -> bool:
if num < 0:
return False
lowerCamelCase : int = num
lowerCamelCase : int = 0
while num > 0:
lowerCamelCase : Optional[Any] = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : str =StableDiffusionXLImgaImgPipeline
snake_case__ : Any =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
snake_case__ : Optional[int] =PipelineTesterMixin.required_optional_params - {'''latents'''}
snake_case__ : Dict =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
snake_case__ : Tuple =IMAGE_TO_IMAGE_IMAGE_PARAMS
snake_case__ : List[str] =IMAGE_TO_IMAGE_IMAGE_PARAMS
def a__ ( self: List[str] )-> int:
torch.manual_seed(0 )
lowerCamelCase : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=__a , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
lowerCamelCase : Any = EulerDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , )
torch.manual_seed(0 )
lowerCamelCase : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=32 , )
lowerCamelCase : Dict = CLIPTextModel(__a )
lowerCamelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__a )
lowerCamelCase : Dict = CLIPTextModelWithProjection(__a )
lowerCamelCase : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__a )
lowerCamelCase : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""text_encoder_2""": text_encoder_a,
"""tokenizer_2""": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def a__ ( self: Any , __a: str , __a: Tuple=0 )-> Union[str, Any]:
lowerCamelCase : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
lowerCamelCase : Any = image / 2 + 0.5
if str(__a ).startswith("""mps""" ):
lowerCamelCase : Dict = torch.manual_seed(__a )
else:
lowerCamelCase : Tuple = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase : Tuple = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 5.0,
"""output_type""": """numpy""",
"""strength""": 0.75,
}
return inputs
def a__ ( self: Dict )-> Optional[Any]:
lowerCamelCase : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase : Union[str, Any] = self.get_dummy_components()
lowerCamelCase : Optional[int] = StableDiffusionXLImgaImgPipeline(**__a )
lowerCamelCase : int = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Optional[Any] = self.get_dummy_inputs(__a )
lowerCamelCase : Optional[int] = sd_pipe(**__a ).images
lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase : Any = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a__ ( self: Optional[int] )-> Union[str, Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def a__ ( self: Optional[Any] )-> str:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def a__ ( self: List[str] )-> Optional[Any]:
pass
def a__ ( self: List[Any] )-> Union[str, Any]:
lowerCamelCase : Tuple = self.get_dummy_components()
lowerCamelCase : Union[str, Any] = StableDiffusionXLImgaImgPipeline(**__a )
lowerCamelCase : str = sd_pipe.to(__a )
lowerCamelCase : Any = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
# forward without prompt embeds
lowerCamelCase : Dict = self.get_dummy_inputs(__a )
lowerCamelCase : Any = 3 * ["""this is a negative prompt"""]
lowerCamelCase : Optional[int] = negative_prompt
lowerCamelCase : Tuple = 3 * [inputs["""prompt"""]]
lowerCamelCase : List[Any] = sd_pipe(**__a )
lowerCamelCase : Optional[int] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
lowerCamelCase : Tuple = self.get_dummy_inputs(__a )
lowerCamelCase : List[Any] = 3 * ["""this is a negative prompt"""]
lowerCamelCase : Tuple = 3 * [inputs.pop("""prompt""" )]
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Union[str, Any] = sd_pipe.encode_prompt(__a , negative_prompt=__a )
lowerCamelCase : int = sd_pipe(
**__a , prompt_embeds=__a , negative_prompt_embeds=__a , pooled_prompt_embeds=__a , negative_pooled_prompt_embeds=__a , )
lowerCamelCase : Union[str, Any] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: Dict )-> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self: Union[str, Any] , __a: Any , __a: Any="cpu" , __a: str=torch.floataa , __a: Any=0 )-> Optional[Any]:
lowerCamelCase : Optional[Any] = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase : List[Any] = np.random.RandomState(__a ).standard_normal((1, 4, 64, 64) )
lowerCamelCase : List[str] = torch.from_numpy(__a ).to(device=__a , dtype=__a )
lowerCamelCase : int = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def a__ ( self: Optional[int] )-> List[str]:
lowerCamelCase : Tuple = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Optional[int] = self.get_inputs(__a )
lowerCamelCase : Optional[Any] = pipe(**__a ).images
lowerCamelCase : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowerCamelCase : List[str] = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 42
| 0
|
"""simple docstring"""
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
__lowerCamelCase :str = collections.namedtuple('_Datasets', ['train', 'validation', 'test'])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
__lowerCamelCase :int = 'https://storage.googleapis.com/cvdf-datasets/mnist/'
def snake_case ( UpperCamelCase__ : int ) -> Any:
lowerCamelCase : Dict = numpy.dtype(numpy.uintaa ).newbyteorder(""">""" )
return numpy.frombuffer(bytestream.read(4 ) , dtype=UpperCamelCase__ )[0]
@deprecated(UpperCamelCase__ , """Please use tf.data to implement this functionality.""" )
def snake_case ( UpperCamelCase__ : List[str] ) -> Any:
print("""Extracting""" , f.name )
with gzip.GzipFile(fileobj=UpperCamelCase__ ) as bytestream:
lowerCamelCase : Optional[Any] = _readaa(UpperCamelCase__ )
if magic != 2051:
raise ValueError(
"""Invalid magic number %d in MNIST image file: %s""" % (magic, f.name) )
lowerCamelCase : List[Any] = _readaa(UpperCamelCase__ )
lowerCamelCase : int = _readaa(UpperCamelCase__ )
lowerCamelCase : Optional[Any] = _readaa(UpperCamelCase__ )
lowerCamelCase : Dict = bytestream.read(rows * cols * num_images )
lowerCamelCase : Dict = numpy.frombuffer(UpperCamelCase__ , dtype=numpy.uinta )
lowerCamelCase : Optional[int] = data.reshape(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , 1 )
return data
@deprecated(UpperCamelCase__ , """Please use tf.one_hot on tensors.""" )
def snake_case ( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] ) -> Union[str, Any]:
lowerCamelCase : List[Any] = labels_dense.shape[0]
lowerCamelCase : Union[str, Any] = numpy.arange(UpperCamelCase__ ) * num_classes
lowerCamelCase : List[Any] = numpy.zeros((num_labels, num_classes) )
lowerCamelCase : Tuple = 1
return labels_one_hot
@deprecated(UpperCamelCase__ , """Please use tf.data to implement this functionality.""" )
def snake_case ( UpperCamelCase__ : Any , UpperCamelCase__ : Dict=False , UpperCamelCase__ : List[str]=10 ) -> str:
print("""Extracting""" , f.name )
with gzip.GzipFile(fileobj=UpperCamelCase__ ) as bytestream:
lowerCamelCase : Dict = _readaa(UpperCamelCase__ )
if magic != 2049:
raise ValueError(
"""Invalid magic number %d in MNIST label file: %s""" % (magic, f.name) )
lowerCamelCase : str = _readaa(UpperCamelCase__ )
lowerCamelCase : Any = bytestream.read(UpperCamelCase__ )
lowerCamelCase : Tuple = numpy.frombuffer(UpperCamelCase__ , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(UpperCamelCase__ , UpperCamelCase__ )
return labels
class A__ :
"""simple docstring"""
@deprecated(
__a , """Please use alternatives such as official/mnist/_DataSet.py"""
""" from tensorflow/models.""" , )
def __init__( self: Tuple , __a: Tuple , __a: Dict , __a: Tuple=False , __a: int=False , __a: Optional[int]=dtypes.floataa , __a: Tuple=True , __a: int=None , )-> Any:
lowerCamelCase : Dict = random_seed.get_seed(__a )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
lowerCamelCase : str = dtypes.as_dtype(__a ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("""Invalid image dtype %r, expected uint8 or float32""" % dtype )
if fake_data:
lowerCamelCase : Optional[Any] = 10_000
lowerCamelCase : int = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'images.shape: {images.shape} labels.shape: {labels.shape}'
lowerCamelCase : Union[str, Any] = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
lowerCamelCase : List[str] = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
lowerCamelCase : Optional[Any] = images.astype(numpy.floataa )
lowerCamelCase : List[str] = numpy.multiply(__a , 1.0 / 255.0 )
lowerCamelCase : Any = images
lowerCamelCase : List[str] = labels
lowerCamelCase : Dict = 0
lowerCamelCase : Union[str, Any] = 0
@property
def a__ ( self: Tuple )-> List[Any]:
return self._images
@property
def a__ ( self: Optional[int] )-> int:
return self._labels
@property
def a__ ( self: List[Any] )-> Optional[Any]:
return self._num_examples
@property
def a__ ( self: Optional[int] )-> Dict:
return self._epochs_completed
def a__ ( self: Any , __a: str , __a: Optional[int]=False , __a: str=True )-> Optional[int]:
if fake_data:
lowerCamelCase : List[str] = [1] * 784
lowerCamelCase : str = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(__a )],
[fake_label for _ in range(__a )],
)
lowerCamelCase : Dict = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
lowerCamelCase : Union[str, Any] = numpy.arange(self._num_examples )
numpy.random.shuffle(__a )
lowerCamelCase : Any = self.images[perma]
lowerCamelCase : Optional[int] = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
lowerCamelCase : Union[str, Any] = self._num_examples - start
lowerCamelCase : Tuple = self._images[start : self._num_examples]
lowerCamelCase : int = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
lowerCamelCase : Optional[int] = numpy.arange(self._num_examples )
numpy.random.shuffle(__a )
lowerCamelCase : Dict = self.images[perm]
lowerCamelCase : Optional[int] = self.labels[perm]
# Start next epoch
lowerCamelCase : str = 0
lowerCamelCase : Union[str, Any] = batch_size - rest_num_examples
lowerCamelCase : str = self._index_in_epoch
lowerCamelCase : List[Any] = self._images[start:end]
lowerCamelCase : Union[str, Any] = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
lowerCamelCase : List[Any] = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(UpperCamelCase__ , """Please write your own downloading logic.""" )
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Dict ) -> Any:
if not gfile.Exists(UpperCamelCase__ ):
gfile.MakeDirs(UpperCamelCase__ )
lowerCamelCase : Optional[Any] = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
if not gfile.Exists(UpperCamelCase__ ):
urllib.request.urlretrieve(UpperCamelCase__ , UpperCamelCase__ ) # noqa: S310
with gfile.GFile(UpperCamelCase__ ) as f:
lowerCamelCase : str = f.size()
print("""Successfully downloaded""" , UpperCamelCase__ , UpperCamelCase__ , """bytes.""" )
return filepath
@deprecated(
UpperCamelCase__ , """Please use alternatives such as:""" """ tensorflow_datasets.load('mnist')""" )
def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : str=dtypes.floataa , UpperCamelCase__ : str=True , UpperCamelCase__ : str=5000 , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : str=DEFAULT_SOURCE_URL , ) -> Tuple:
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=UpperCamelCase__ , one_hot=UpperCamelCase__ , dtype=UpperCamelCase__ , seed=UpperCamelCase__ )
lowerCamelCase : Tuple = fake()
lowerCamelCase : List[str] = fake()
lowerCamelCase : str = fake()
return _Datasets(train=UpperCamelCase__ , validation=UpperCamelCase__ , test=UpperCamelCase__ )
if not source_url: # empty string check
lowerCamelCase : List[str] = DEFAULT_SOURCE_URL
lowerCamelCase : Any = """train-images-idx3-ubyte.gz"""
lowerCamelCase : Optional[int] = """train-labels-idx1-ubyte.gz"""
lowerCamelCase : Optional[Any] = """t10k-images-idx3-ubyte.gz"""
lowerCamelCase : Any = """t10k-labels-idx1-ubyte.gz"""
lowerCamelCase : Any = _maybe_download(
UpperCamelCase__ , UpperCamelCase__ , source_url + train_images_file )
with gfile.Open(UpperCamelCase__ , """rb""" ) as f:
lowerCamelCase : List[Any] = _extract_images(UpperCamelCase__ )
lowerCamelCase : Tuple = _maybe_download(
UpperCamelCase__ , UpperCamelCase__ , source_url + train_labels_file )
with gfile.Open(UpperCamelCase__ , """rb""" ) as f:
lowerCamelCase : int = _extract_labels(UpperCamelCase__ , one_hot=UpperCamelCase__ )
lowerCamelCase : Optional[Any] = _maybe_download(
UpperCamelCase__ , UpperCamelCase__ , source_url + test_images_file )
with gfile.Open(UpperCamelCase__ , """rb""" ) as f:
lowerCamelCase : Optional[int] = _extract_images(UpperCamelCase__ )
lowerCamelCase : List[Any] = _maybe_download(
UpperCamelCase__ , UpperCamelCase__ , source_url + test_labels_file )
with gfile.Open(UpperCamelCase__ , """rb""" ) as f:
lowerCamelCase : Dict = _extract_labels(UpperCamelCase__ , one_hot=UpperCamelCase__ )
if not 0 <= validation_size <= len(UpperCamelCase__ ):
lowerCamelCase : Tuple = (
"""Validation size should be between 0 and """
F'{len(UpperCamelCase__ )}. Received: {validation_size}.'
)
raise ValueError(UpperCamelCase__ )
lowerCamelCase : Optional[Any] = train_images[:validation_size]
lowerCamelCase : Tuple = train_labels[:validation_size]
lowerCamelCase : Optional[int] = train_images[validation_size:]
lowerCamelCase : Optional[int] = train_labels[validation_size:]
lowerCamelCase : Union[str, Any] = {"""dtype""": dtype, """reshape""": reshape, """seed""": seed}
lowerCamelCase : Optional[int] = _DataSet(UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase : List[Any] = _DataSet(UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = _DataSet(UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
return _Datasets(train=UpperCamelCase__ , validation=UpperCamelCase__ , test=UpperCamelCase__ )
| 721
|
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class A__ :
"""simple docstring"""
def a__ ( self: Optional[int] , __a: Optional[int] , __a: Tuple , __a: Optional[int] )-> List[str]:
return None
class A__ :
"""simple docstring"""
def a__ ( self: Optional[int] , __a: Tuple , __a: str , __a: str , __a: str )-> Tuple:
return None
class A__ ( unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[Any] =[
# (model_name, model_kwargs)
('''bert-base-cased''', {}),
('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def a__ ( self: Optional[Any] )-> int:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__a , """tf""" , 12 , **__a )
@require_torch
@slow
def a__ ( self: str )-> int:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__a , """pt""" , 12 , **__a )
@require_torch
@slow
def a__ ( self: Union[str, Any] )-> Dict:
from transformers import BertModel
lowerCamelCase : int = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""]
with NamedTemporaryFile(mode="""w+t""" ) as vocab_file:
vocab_file.write("""\n""".join(__a ) )
vocab_file.flush()
lowerCamelCase : Dict = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowerCamelCase : List[str] = BertModel(BertConfig(vocab_size=len(__a ) ) )
model.save_pretrained(__a )
self._test_export(__a , """pt""" , 12 , __a )
@require_tf
@slow
def a__ ( self: Optional[Any] )-> Optional[int]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase : Optional[int] = self._test_export(__a , """tf""" , 12 , **__a )
lowerCamelCase : Tuple = quantize(Path(__a ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__a ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
@require_torch
@slow
def a__ ( self: Any )-> Optional[int]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase : Any = self._test_export(__a , """pt""" , 12 , **__a )
lowerCamelCase : Dict = quantize(__a )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__a ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
def a__ ( self: List[Any] , __a: Optional[Any] , __a: List[Any] , __a: Union[str, Any] , __a: Optional[Any]=None , **__a: Optional[int] )-> Any:
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowerCamelCase : Optional[Any] = Path(__a ).joinpath("""model.onnx""" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__a , __a , __a , __a , __a , **__a )
return path
except Exception as e:
self.fail(__a )
@require_torch
@require_tokenizers
@slow
def a__ ( self: Tuple )-> Dict:
from transformers import BertModel
lowerCamelCase : int = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
lowerCamelCase : List[Any] = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(__a , __a , """pt""" )
@require_tf
@require_tokenizers
@slow
def a__ ( self: Optional[Any] )-> List[Any]:
from transformers import TFBertModel
lowerCamelCase : Union[str, Any] = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
lowerCamelCase : str = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(__a , __a , """tf""" )
def a__ ( self: List[str] , __a: str , __a: Optional[Any] , __a: str )-> List[Any]:
lowerCamelCase : List[str] = FeatureExtractionPipeline(__a , __a )
lowerCamelCase : List[str] = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""]
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = infer_shapes(__a , __a )
# Assert all variables are present
self.assertEqual(len(__a ) , len(__a ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , __a )
self.assertSequenceEqual(variable_names[3:] , __a )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: """batch""", 1: """sequence"""} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["""output_0"""] , {0: """batch""", 1: """sequence"""} )
self.assertDictEqual(shapes["""output_1"""] , {0: """batch"""} )
def a__ ( self: List[Any] )-> int:
lowerCamelCase : List[str] = ["""input_ids""", """attention_mask""", """token_type_ids"""]
lowerCamelCase : str = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]}
lowerCamelCase , lowerCamelCase : List[Any] = ensure_valid_input(FuncContiguousArgs() , __a , __a )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__a ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(__a ) , set(__a ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__a , (tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowerCamelCase , lowerCamelCase : List[Any] = ensure_valid_input(FuncNonContiguousArgs() , __a , __a )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__a ) , 1 )
self.assertEqual(len(__a ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["""input_ids"""] )
self.assertEqual(ordered_input_names[0] , """input_ids""" )
def a__ ( self: Tuple )-> Tuple:
lowerCamelCase : Optional[int] = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) , """-test""" )
self.assertEqual("""/home/something/my_fake_model-test.onnx""" , generated.as_posix() )
| 42
| 0
|
'''simple docstring'''
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def __lowerCamelCase ( _UpperCamelCase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(_UpperCamelCase , _UpperCamelCase )
def __lowerCamelCase ( _UpperCamelCase : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = emb.weight.shape
UpperCAmelCase_ = nn.Linear(_UpperCamelCase , _UpperCamelCase , bias=_UpperCamelCase )
UpperCAmelCase_ = emb.weight.data
return lin_layer
def __lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Dict=None ):
'''simple docstring'''
UpperCAmelCase_ = {}
for old_key in state_dict.keys():
UpperCAmelCase_ = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
UpperCAmelCase_ = key.replace('''moe_layer.experts.0''' , F"""ffn.experts.expert_{expert_idx}""" )
else:
UpperCAmelCase_ = key.replace('''moe_layer.experts.''' , '''ffn.experts.expert_''' )
if "gate" in key:
UpperCAmelCase_ = key.replace('''.moe_layer.gate.wg''' , '''.ffn.router.classifier''' )
if "fc2" and "experts" not in key:
UpperCAmelCase_ = key.replace('''.fc2.''' , '''.ffn.fc2.''' )
if "fc1" and "experts" not in key:
UpperCAmelCase_ = key.replace('''.fc1.''' , '''.ffn.fc1.''' )
if ".encoder_attn." in key:
UpperCAmelCase_ = key.replace('''.encoder_attn.''' , '''.cross_attention.''' )
if "encoder_attn_layer_norm" in key:
UpperCAmelCase_ = key.replace('''encoder_attn_layer_norm''' , '''cross_attention_layer_norm''' )
if "final_layer_norm" in key:
UpperCAmelCase_ = key.replace('''final_layer_norm''' , '''ff_layer_norm''' )
UpperCAmelCase_ = state_dict[old_key]
return new_dict
def __lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : str , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any , _UpperCamelCase : str = WEIGHTS_NAME ):
'''simple docstring'''
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
for expert in range(_UpperCamelCase ):
UpperCAmelCase_ = switch_checkpoint_path + F"""-rank-{expert}.pt"""
if os.path.isfile(_UpperCamelCase ):
UpperCAmelCase_ = torch.load(_UpperCamelCase )['''model''']
remove_ignore_keys_(_UpperCamelCase )
UpperCAmelCase_ = rename_fairseq_keys(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ = os.path.join(
_UpperCamelCase , weights_name.replace('''.bin''' , F"""-{len(_UpperCamelCase )+1:05d}-of-???.bin""" ) )
torch.save(_UpperCamelCase , _UpperCamelCase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(_UpperCamelCase )[0]].dtype )
# Add the last block
UpperCAmelCase_ = os.path.join(_UpperCamelCase , weights_name.replace('''.bin''' , F"""-{len(_UpperCamelCase )+1:05d}-of-???.bin""" ) )
UpperCAmelCase_ = torch.load(switch_checkpoint_path + '''-shared.pt''' )['''model''']
remove_ignore_keys_(_UpperCamelCase )
UpperCAmelCase_ = rename_fairseq_keys(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ = shared_weights['''decoder.embed_tokens.weight''']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_UpperCamelCase ) == 1:
UpperCAmelCase_ = os.path.join(_UpperCamelCase , _UpperCamelCase )
torch.save(_UpperCamelCase , _UpperCamelCase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_UpperCamelCase , _UpperCamelCase )
# Otherwise, let's build the index
UpperCAmelCase_ = {}
for idx, shard in enumerate(_UpperCamelCase ):
UpperCAmelCase_ = weights_name.replace('''.bin''' , F"""-{idx+1:05d}-of-{len(_UpperCamelCase ):05d}.bin""" )
UpperCAmelCase_ = os.path.join(_UpperCamelCase , weights_name.replace('''.bin''' , F"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(_UpperCamelCase , os.path.join(_UpperCamelCase , _UpperCamelCase ) )
for key in shard:
UpperCAmelCase_ = shard_file
# Add the metadata
UpperCAmelCase_ = {'''total_size''': total_size}
UpperCAmelCase_ = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(_UpperCamelCase , _UpperCamelCase ) , '''w''' , encoding='''utf-8''' ) as f:
UpperCAmelCase_ = json.dumps(_UpperCamelCase , indent=2 , sort_keys=_UpperCamelCase ) + '''\n'''
f.write(_UpperCamelCase )
return metadata, index
if __name__ == "__main__":
lowercase__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--nllb_moe_checkpoint_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b",
type=str,
required=False,
help="Path to the output pytorch model.",
)
lowercase__ : Dict = parser.parse_args()
lowercase__ , lowercase__ : Union[str, Any] = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
lowercase__ : Tuple = NllbMoeConfig.from_pretrained(
"facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
lowercase__ : List[Any] = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("Done")
model.save_pretrained(args.pytorch_dump_folder_path)
| 43
|
'''simple docstring'''
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : list[str] ):
'''simple docstring'''
UpperCAmelCase_ = ''''''
for word_or_phrase in separated:
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise Exception('''join() accepts only strings to be joined''' )
joined += word_or_phrase + separator
return joined.strip(_UpperCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 43
| 1
|
'''simple docstring'''
lowercase__ : Dict = [0, 2, 4, 6, 8]
lowercase__ : str = [1, 3, 5, 7, 9]
def __lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : list[int] , _UpperCamelCase : int ):
'''simple docstring'''
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
UpperCAmelCase_ = 0
for digit in range(10 ):
UpperCAmelCase_ = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , _UpperCamelCase , _UpperCamelCase )
return result
UpperCAmelCase_ = 0
for digita in range(10 ):
UpperCAmelCase_ = digita
if (remainder + digita) % 2 == 0:
UpperCAmelCase_ = ODD_DIGITS
else:
UpperCAmelCase_ = EVEN_DIGITS
for digita in other_parity_digits:
UpperCAmelCase_ = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , _UpperCamelCase , _UpperCamelCase , )
return result
def __lowerCamelCase ( _UpperCamelCase : int = 9 ):
'''simple docstring'''
UpperCAmelCase_ = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(_UpperCamelCase , 0 , [0] * length , _UpperCamelCase )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 43
|
'''simple docstring'''
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowercase__ : Optional[int] = logging.get_logger(__name__)
def __lowerCamelCase ( _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[int, Iterable[int]] , _UpperCamelCase : bool , _UpperCamelCase : int ):
'''simple docstring'''
def constraint_to_multiple_of(_UpperCamelCase : int , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[int]=0 , _UpperCamelCase : str=None ):
UpperCAmelCase_ = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
UpperCAmelCase_ = math.floor(val / multiple ) * multiple
if x < min_val:
UpperCAmelCase_ = math.ceil(val / multiple ) * multiple
return x
UpperCAmelCase_ = (output_size, output_size) if isinstance(_UpperCamelCase , _UpperCamelCase ) else output_size
UpperCAmelCase_ , UpperCAmelCase_ = get_image_size(_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ = output_size
# determine new height and width
UpperCAmelCase_ = output_height / input_height
UpperCAmelCase_ = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
UpperCAmelCase_ = scale_width
else:
# fit height
UpperCAmelCase_ = scale_height
UpperCAmelCase_ = constraint_to_multiple_of(scale_height * input_height , multiple=_UpperCamelCase )
UpperCAmelCase_ = constraint_to_multiple_of(scale_width * input_width , multiple=_UpperCamelCase )
return (new_height, new_width)
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ['''pixel_values''']
def __init__( self : Any , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[int, float] = 1 / 255 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , **UpperCAmelCase__ : str , ) ->None:
super().__init__(**UpperCAmelCase__ )
UpperCAmelCase_ = size if size is not None else {'''height''': 384, '''width''': 384}
UpperCAmelCase_ = get_size_dict(UpperCAmelCase__ )
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = keep_aspect_ratio
UpperCAmelCase_ = ensure_multiple_of
UpperCAmelCase_ = resample
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase__ ( self : Union[str, Any] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : List[str] , ) ->np.ndarray:
UpperCAmelCase_ = get_size_dict(UpperCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
UpperCAmelCase_ = get_resize_output_image_size(
UpperCAmelCase__ , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=UpperCAmelCase__ , multiple=UpperCAmelCase__ , )
return resize(UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase__ ( self : int , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[int, float] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Optional[Any] , ) ->Any:
return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase__ ( self : List[Any] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[float, List[float]] , UpperCAmelCase__ : Union[float, List[float]] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Optional[int] , ) ->np.ndarray:
return normalize(UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase__ ( self : str , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : int = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : int = None , UpperCAmelCase__ : PILImageResampling = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : float = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase__ : Any , ) ->PIL.Image.Image:
UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ = size if size is not None else self.size
UpperCAmelCase_ = get_size_dict(UpperCAmelCase__ )
UpperCAmelCase_ = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
UpperCAmelCase_ = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
UpperCAmelCase_ = resample if resample is not None else self.resample
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ = image_std if image_std is not None else self.image_std
UpperCAmelCase_ = make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
UpperCAmelCase_ = [to_numpy_array(UpperCAmelCase__ ) for image in images]
if do_resize:
UpperCAmelCase_ = [self.resize(image=UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ ) for image in images]
if do_rescale:
UpperCAmelCase_ = [self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ ) for image in images]
if do_normalize:
UpperCAmelCase_ = [self.normalize(image=UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ ) for image in images]
UpperCAmelCase_ = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
UpperCAmelCase_ = {'''pixel_values''': images}
return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
def lowerCAmelCase__ ( self : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Tuple] = None ) ->Optional[Any]:
UpperCAmelCase_ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(UpperCAmelCase__ ):
UpperCAmelCase_ = target_sizes.numpy()
UpperCAmelCase_ = []
for idx in range(len(UpperCAmelCase__ ) ):
UpperCAmelCase_ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=UpperCAmelCase__ )
UpperCAmelCase_ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCAmelCase__ )
else:
UpperCAmelCase_ = logits.argmax(dim=1 )
UpperCAmelCase_ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 43
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowercase__ : Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Optional[Any] = ["BartphoTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
lowercase__ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 43
|
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : int = logging.get_logger(__name__)
lowercase__ : List[str] = {
"huggingface/time-series-transformer-tourism-monthly": (
"https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = '''time_series_transformer'''
lowerCAmelCase__ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self : Optional[int] , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : str = "student_t" , UpperCAmelCase__ : str = "nll" , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : List[int] = [1, 2, 3, 4, 5, 6, 7] , UpperCAmelCase__ : Optional[Union[str, bool]] = "mean" , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : int = 32 , UpperCAmelCase__ : int = 32 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : str = "gelu" , UpperCAmelCase__ : int = 64 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : int = 100 , UpperCAmelCase__ : float = 0.02 , UpperCAmelCase__ : Union[str, Any]=True , **UpperCAmelCase__ : Tuple , ) ->Optional[int]:
# time series specific configuration
UpperCAmelCase_ = prediction_length
UpperCAmelCase_ = context_length or prediction_length
UpperCAmelCase_ = distribution_output
UpperCAmelCase_ = loss
UpperCAmelCase_ = input_size
UpperCAmelCase_ = num_time_features
UpperCAmelCase_ = lags_sequence
UpperCAmelCase_ = scaling
UpperCAmelCase_ = num_dynamic_real_features
UpperCAmelCase_ = num_static_real_features
UpperCAmelCase_ = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(UpperCAmelCase__ ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
UpperCAmelCase_ = cardinality
else:
UpperCAmelCase_ = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(UpperCAmelCase__ ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
UpperCAmelCase_ = embedding_dimension
else:
UpperCAmelCase_ = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
UpperCAmelCase_ = num_parallel_samples
# Transformer architecture configuration
UpperCAmelCase_ = input_size * len(UpperCAmelCase__ ) + self._number_of_features
UpperCAmelCase_ = d_model
UpperCAmelCase_ = encoder_attention_heads
UpperCAmelCase_ = decoder_attention_heads
UpperCAmelCase_ = encoder_ffn_dim
UpperCAmelCase_ = decoder_ffn_dim
UpperCAmelCase_ = encoder_layers
UpperCAmelCase_ = decoder_layers
UpperCAmelCase_ = dropout
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = activation_dropout
UpperCAmelCase_ = encoder_layerdrop
UpperCAmelCase_ = decoder_layerdrop
UpperCAmelCase_ = activation_function
UpperCAmelCase_ = init_std
UpperCAmelCase_ = use_cache
super().__init__(is_encoder_decoder=UpperCAmelCase__ , **UpperCAmelCase__ )
@property
def lowerCAmelCase__ ( self : List[str] ) ->int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 43
| 1
|
'''simple docstring'''
from collections.abc import Callable
def __lowerCamelCase ( _UpperCamelCase : Callable[[float], float] , _UpperCamelCase : float , _UpperCamelCase : float ):
'''simple docstring'''
UpperCAmelCase_ = a
UpperCAmelCase_ = b
if function(_UpperCamelCase ) == 0: # one of the a or b is a root for the function
return a
elif function(_UpperCamelCase ) == 0:
return b
elif (
function(_UpperCamelCase ) * function(_UpperCamelCase ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('''could not find root in given interval.''' )
else:
UpperCAmelCase_ = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(_UpperCamelCase ) == 0:
return mid
elif function(_UpperCamelCase ) * function(_UpperCamelCase ) < 0:
UpperCAmelCase_ = mid
else:
UpperCAmelCase_ = mid
UpperCAmelCase_ = start + (end - start) / 2.0
return mid
def __lowerCamelCase ( _UpperCamelCase : float ):
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 43
|
'''simple docstring'''
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
lowercase__ : Dict = logging.get_logger(__name__)
lowercase__ : List[Any] = "T5Config"
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = '''mt5'''
lowerCAmelCase__ = MTaConfig
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = '''mt5'''
lowerCAmelCase__ = MTaConfig
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = '''mt5'''
lowerCAmelCase__ = MTaConfig
| 43
| 1
|
'''simple docstring'''
def __lowerCamelCase ( _UpperCamelCase : list , _UpperCamelCase : int = 0 ):
'''simple docstring'''
UpperCAmelCase_ = length or len(_UpperCamelCase )
UpperCAmelCase_ = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
UpperCAmelCase_ , UpperCAmelCase_ = list_data[i + 1], list_data[i]
UpperCAmelCase_ = True
return list_data if not swapped else bubble_sort(_UpperCamelCase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43
|
'''simple docstring'''
import comet # From: unbabel-comet
import torch
import datasets
lowercase__ : str = datasets.logging.get_logger(__name__)
lowercase__ : Dict = "\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel's Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = \"{COMET}: A Neural Framework for {MT} Evaluation\",\n author = \"Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon\",\n booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",\n pages = \"2685--2702\",\n}\n"
lowercase__ : str = "\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n"
lowercase__ : str = "\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric('comet')\n >>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use\n >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]\n >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]\n >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [0.19, 0.92]\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase__ ( self : List[Any] ) ->Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''sources''': datasets.Value('''string''' , id='''sequence''' ),
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[
'''https://github.com/Unbabel/COMET''',
'''https://www.aclweb.org/anthology/2020.emnlp-main.213/''',
'''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''',
] , )
def lowerCAmelCase__ ( self : int , UpperCAmelCase__ : Union[str, Any] ) ->Any:
if self.config_name == "default":
UpperCAmelCase_ = comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da''' ) )
else:
UpperCAmelCase_ = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def lowerCAmelCase__ ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : int=False ) ->Optional[Any]:
if gpus is None:
UpperCAmelCase_ = 1 if torch.cuda.is_available() else 0
UpperCAmelCase_ = {'''src''': sources, '''mt''': predictions, '''ref''': references}
UpperCAmelCase_ = [dict(zip(UpperCAmelCase__ , UpperCAmelCase__ ) ) for t in zip(*data.values() )]
UpperCAmelCase_ , UpperCAmelCase_ = self.scorer.predict(UpperCAmelCase__ , gpus=UpperCAmelCase__ , progress_bar=UpperCAmelCase__ )
return {"mean_score": mean_score, "scores": scores}
| 43
| 1
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
lowercase__ : int = False
@skip_mps
class lowerCamelCase ( lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = StableDiffusionAttendAndExcitePipeline
lowerCAmelCase__ = False
lowerCAmelCase__ = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} )
lowerCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def lowerCAmelCase__ ( cls : List[str] ) ->List[Any]:
super().setUpClass()
torch.use_deterministic_algorithms(UpperCAmelCase__ )
@classmethod
def lowerCAmelCase__ ( cls : List[str] ) ->str:
super().tearDownClass()
torch.use_deterministic_algorithms(UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Tuple ) ->str:
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCAmelCase__ , )
UpperCAmelCase_ = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=UpperCAmelCase__ , set_alpha_to_one=UpperCAmelCase__ , )
torch.manual_seed(0 )
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
UpperCAmelCase_ = CLIPTextModel(UpperCAmelCase__ )
UpperCAmelCase_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCAmelCase_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCAmelCase__ ( self : Union[str, Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str=0 ) ->List[str]:
if str(UpperCAmelCase__ ).startswith('''mps''' ):
UpperCAmelCase_ = torch.manual_seed(UpperCAmelCase__ )
else:
UpperCAmelCase_ = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
UpperCAmelCase_ = UpperCAmelCase_ = {
'''prompt''': '''a cat and a frog''',
'''token_indices''': [2, 5],
'''generator''': generator,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''max_iter_to_alter''': 2,
'''thresholds''': {0: 0.7},
}
return inputs
def lowerCAmelCase__ ( self : Dict ) ->Any:
UpperCAmelCase_ = '''cpu'''
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = self.pipeline_class(**UpperCAmelCase__ )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
UpperCAmelCase_ = self.get_dummy_inputs(UpperCAmelCase__ )
UpperCAmelCase_ = pipe(**UpperCAmelCase__ ).images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
UpperCAmelCase_ = np.array(
[0.6390_5364, 0.6289_7307, 0.4859_9017, 0.513_3624, 0.555_0048, 0.4576_9516, 0.5032_6973, 0.502_3139, 0.4538_4496] )
UpperCAmelCase_ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCAmelCase__ , 1e-3 )
def lowerCAmelCase__ ( self : int ) ->str:
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Union[str, Any]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCAmelCase__ ( self : int ) ->List[str]:
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def lowerCAmelCase__ ( self : int ) ->Optional[int]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[Any]:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def lowerCAmelCase__ ( self : Tuple ) ->Any:
super().test_save_load_local(expected_max_difference=5e-4 )
def lowerCAmelCase__ ( self : Optional[int] ) ->Tuple:
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def lowerCAmelCase__ ( cls : Tuple ) ->Dict:
super().setUpClass()
torch.use_deterministic_algorithms(UpperCAmelCase__ )
@classmethod
def lowerCAmelCase__ ( cls : Any ) ->Tuple:
super().tearDownClass()
torch.use_deterministic_algorithms(UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Any ) ->Tuple:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self : Dict ) ->List[str]:
UpperCAmelCase_ = torch.manual_seed(51 )
UpperCAmelCase_ = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , safety_checker=UpperCAmelCase__ , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
UpperCAmelCase_ = '''a painting of an elephant with glasses'''
UpperCAmelCase_ = [5, 7]
UpperCAmelCase_ = pipe(
prompt=UpperCAmelCase__ , token_indices=UpperCAmelCase__ , guidance_scale=7.5 , generator=UpperCAmelCase__ , num_inference_steps=5 , max_iter_to_alter=5 , output_type='''numpy''' , ).images[0]
UpperCAmelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy''' )
assert np.abs((expected_image - image).max() ) < 5e-1
| 43
|
'''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : int = 32 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[int, float] = 1 / 255 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Union[float, List[float]]] = [0.4814_5466, 0.457_8275, 0.4082_1073] , UpperCAmelCase__ : Optional[Union[float, List[float]]] = [0.2686_2954, 0.2613_0258, 0.2757_7711] , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[str, Any]=7 , UpperCAmelCase__ : Optional[int]=30 , UpperCAmelCase__ : Union[str, Any]=400 , UpperCAmelCase__ : List[Any]=3 , ) ->Dict:
UpperCAmelCase_ = parent
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size if size is not None else {'''shortest_edge''': 288}
UpperCAmelCase_ = size_divisor
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = image_mean
UpperCAmelCase_ = image_std
UpperCAmelCase_ = do_pad
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
def lowerCAmelCase__ ( self : Tuple ) ->List[Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def lowerCAmelCase__ ( self : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict=False ) ->Any:
if not batched:
UpperCAmelCase_ = self.size['''shortest_edge''']
UpperCAmelCase_ = image_inputs[0]
if isinstance(UpperCAmelCase__ , Image.Image ):
UpperCAmelCase_ , UpperCAmelCase_ = image.size
else:
UpperCAmelCase_ , UpperCAmelCase_ = image.shape[1], image.shape[2]
UpperCAmelCase_ = size / min(UpperCAmelCase__ , UpperCAmelCase__ )
if h < w:
UpperCAmelCase_ , UpperCAmelCase_ = size, scale * w
else:
UpperCAmelCase_ , UpperCAmelCase_ = scale * h, size
UpperCAmelCase_ = int((1333 / 800) * size )
if max(UpperCAmelCase__ , UpperCAmelCase__ ) > max_size:
UpperCAmelCase_ = max_size / max(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = newh * scale
UpperCAmelCase_ = neww * scale
UpperCAmelCase_ , UpperCAmelCase_ = int(newh + 0.5 ), int(neww + 0.5 )
UpperCAmelCase_ , UpperCAmelCase_ = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
UpperCAmelCase_ = []
for image in image_inputs:
UpperCAmelCase_ , UpperCAmelCase_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase_ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : item[0] )[0]
UpperCAmelCase_ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCamelCase ( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = BridgeTowerImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self : Optional[int] ) ->str:
UpperCAmelCase_ = BridgeTowerImageProcessingTester(self )
@property
def lowerCAmelCase__ ( self : List[str] ) ->Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self : Optional[int] ) ->int:
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''image_std''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''size''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''size_divisor''' ) )
def lowerCAmelCase__ ( self : Optional[int] ) ->Union[str, Any]:
pass
def lowerCAmelCase__ ( self : List[str] ) ->Union[str, Any]:
# Initialize image processor
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self : Any ) ->Optional[int]:
# Initialize image processor
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self : int ) ->List[str]:
# Initialize image processor
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 43
| 1
|
'''simple docstring'''
def __lowerCamelCase ( _UpperCamelCase : int = 100 ):
'''simple docstring'''
UpperCAmelCase_ = n * (n + 1) * (2 * n + 1) / 6
UpperCAmelCase_ = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 43
|
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Dict ) ->List[str]:
UpperCAmelCase_ = tf.convert_to_tensor(
[
[
8.222_0991, # 3rd highest value; idx. 0
-0.562_0044,
5.2322_9752,
4.038_6393,
-6.879_8378,
-0.5478_5802,
-3.201_2153,
2.9277_7176,
1.8817_1953,
7.3534_1276, # 5th highest value; idx. 9
8.4320_7833, # 2nd highest value; idx. 10
-9.8571_1836,
-5.9620_9236,
-1.1303_9161,
-7.111_5294,
-0.836_9633,
-5.318_6408,
7.0642_7407,
0.8136_9344,
-0.8202_3817,
-5.917_9796,
0.5881_3443,
-6.9977_8438,
4.7155_1189,
-0.1877_1637,
7.4402_0759, # 4th highest value; idx. 25
9.3845_0987, # 1st highest value; idx. 26
2.1266_2941,
-9.3256_2038,
2.3565_2522,
], # cummulative prob of 5 highest values <= 0.6
[
0.5842_5518,
4.5313_9238,
-5.5751_0464,
-6.2803_0699,
-7.1952_9503,
-4.0212_2551,
1.3933_7037,
-6.0670_7057,
1.5948_0517,
-9.64_3119,
0.0390_7799,
0.6723_1762,
-8.8820_6726,
6.2711_5922, # 4th highest value; idx. 13
2.2852_0723,
4.8276_7506,
4.3042_1368,
8.827_5313, # 2nd highest value; idx. 17
5.4402_9958, # 5th highest value; idx. 18
-4.473_5794,
7.3857_9536, # 3rd highest value; idx. 20
-2.9105_1663,
2.6194_6077,
-2.567_4762,
-9.4895_9302,
-4.0292_2645,
-1.3541_6918,
9.6770_2323, # 1st highest value; idx. 27
-5.8947_8553,
1.8537_0467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
UpperCAmelCase_ = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
UpperCAmelCase_ = tf.convert_to_tensor(
[8.22_2099, 7.353_4126, 8.43_2078, 7.440_2075, 9.3_8451, 6.27_1159, 8.82_7531, 5.440_2995, 7.385_7956, 9.67_7023] , dtype=tf.floataa , ) # expected non filtered values as noted above
UpperCAmelCase_ = tf_top_k_top_p_filtering(UpperCAmelCase__ , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
UpperCAmelCase_ = output[output != -float('''inf''' )]
UpperCAmelCase_ = tf.cast(
tf.where(tf.not_equal(UpperCAmelCase__ , tf.constant(-float('''inf''' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(UpperCAmelCase__ , UpperCAmelCase__ , rtol=1e-12 )
tf.debugging.assert_equal(UpperCAmelCase__ , UpperCAmelCase__ )
@require_tf
class lowerCamelCase ( unittest.TestCase , lowerCamelCase ):
'''simple docstring'''
if is_tf_available():
lowerCAmelCase__ = {
'''AutoModelForCausalLM''': TFAutoModelForCausalLM,
'''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq,
'''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM,
'''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq,
'''LogitsProcessorList''': TFLogitsProcessorList,
'''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor,
'''create_tensor_fn''': tf.convert_to_tensor,
'''floats_tensor''': floats_tensor,
'''return_tensors''': '''tf''',
}
@slow
def lowerCAmelCase__ ( self : Dict ) ->Optional[Any]:
# TF-only test: tf.saved_model export
UpperCAmelCase_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
UpperCAmelCase_ = 2
UpperCAmelCase_ = 2
class lowerCamelCase ( tf.Module ):
'''simple docstring'''
def __init__( self : List[str] , UpperCAmelCase__ : List[str] ) ->Dict:
super(UpperCAmelCase__ , self ).__init__()
UpperCAmelCase_ = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((None, input_length) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=UpperCAmelCase__ , )
def lowerCAmelCase__ ( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] ) ->int:
UpperCAmelCase_ = self.model.generate(
input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ , return_dict_in_generate=UpperCAmelCase__ , )
return {"sequences": outputs["sequences"]}
UpperCAmelCase_ = [[2, 0], [102, 103]]
UpperCAmelCase_ = [[1, 0], [1, 1]]
UpperCAmelCase_ = DummyModel(model=UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(UpperCAmelCase__ , UpperCAmelCase__ , signatures={'''serving_default''': dummy_model.serving} )
UpperCAmelCase_ = tf.saved_model.load(UpperCAmelCase__ ).signatures['''serving_default''']
for batch_size in range(1 , len(UpperCAmelCase__ ) + 1 ):
UpperCAmelCase_ = {
'''input_ids''': tf.constant(dummy_input_ids[:batch_size] ),
'''attention_mask''': tf.constant(dummy_attention_masks[:batch_size] ),
}
UpperCAmelCase_ = serving_func(**UpperCAmelCase__ )['''sequences''']
UpperCAmelCase_ = test_model.generate(**UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ )
tf.debugging.assert_equal(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def lowerCAmelCase__ ( self : Tuple ) ->Optional[int]:
# TF-only test: tf.saved_model export
UpperCAmelCase_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
UpperCAmelCase_ = 1
UpperCAmelCase_ = 2
class lowerCamelCase ( tf.Module ):
'''simple docstring'''
def __init__( self : int , UpperCAmelCase__ : int ) ->List[str]:
super(UpperCAmelCase__ , self ).__init__()
UpperCAmelCase_ = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((batch_size, None) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=UpperCAmelCase__ , )
def lowerCAmelCase__ ( self : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] ) ->int:
UpperCAmelCase_ = self.model.generate(
input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ , return_dict_in_generate=UpperCAmelCase__ , )
return {"sequences": outputs["sequences"]}
UpperCAmelCase_ = [[2], [102, 103]]
UpperCAmelCase_ = [[1], [1, 1]]
UpperCAmelCase_ = DummyModel(model=UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(UpperCAmelCase__ , UpperCAmelCase__ , signatures={'''serving_default''': dummy_model.serving} )
UpperCAmelCase_ = tf.saved_model.load(UpperCAmelCase__ ).signatures['''serving_default''']
for input_row in range(len(UpperCAmelCase__ ) ):
UpperCAmelCase_ = {
'''input_ids''': tf.constant([dummy_input_ids[input_row]] ),
'''attention_mask''': tf.constant([dummy_attention_masks[input_row]] ),
}
UpperCAmelCase_ = serving_func(**UpperCAmelCase__ )['''sequences''']
UpperCAmelCase_ = test_model.generate(**UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ )
tf.debugging.assert_equal(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
@require_tensorflow_text
def lowerCAmelCase__ ( self : Optional[Any] ) ->int:
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='''google/flan-t5-small''' , filename='''spiece.model''' , local_dir=UpperCAmelCase__ )
class lowerCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : List[str] ) ->Any:
super().__init__()
UpperCAmelCase_ = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(UpperCAmelCase__ , '''spiece.model''' ) , '''rb''' ).read() )
UpperCAmelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
def lowerCAmelCase__ ( self : Any , UpperCAmelCase__ : Union[str, Any] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : str ) ->List[str]:
UpperCAmelCase_ = self.tokenizer.tokenize(UpperCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = text.pad_model_inputs(
UpperCAmelCase__ , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
UpperCAmelCase_ = self.model.generate(input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
return self.tokenizer.detokenize(UpperCAmelCase__ )
UpperCAmelCase_ = CompleteSentenceTransformer()
UpperCAmelCase_ = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='''inputs''' )
UpperCAmelCase_ = complete_model(UpperCAmelCase__ )
UpperCAmelCase_ = tf.keras.Model(UpperCAmelCase__ , UpperCAmelCase__ )
keras_model.save(UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Optional[int] ) ->Tuple:
# Has PT equivalent: this test relies on random sampling
UpperCAmelCase_ = {
'''do_sample''': True,
'''num_beams''': 1,
'''top_p''': 0.7,
'''top_k''': 10,
'''temperature''': 0.7,
}
UpperCAmelCase_ = 14
UpperCAmelCase_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
UpperCAmelCase_ = '''Hello, my dog is cute and'''
UpperCAmelCase_ = tokenizer(UpperCAmelCase__ , return_tensors='''tf''' )
UpperCAmelCase_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
UpperCAmelCase_ = 638
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
UpperCAmelCase_ = model.generate(**UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
UpperCAmelCase_ = [638, 198]
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
UpperCAmelCase_ = model.generate(**UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def lowerCAmelCase__ ( self : Optional[int] ) ->List[str]:
# Has PT equivalent: ample use of framework-specific code
UpperCAmelCase_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
UpperCAmelCase_ = '''Hugging Face is a technology company based in New York and Paris.'''
UpperCAmelCase_ = bart_tokenizer(UpperCAmelCase__ , return_tensors='''tf''' ).input_ids
UpperCAmelCase_ = TFBartForConditionalGeneration.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
UpperCAmelCase_ = bart_model.generate(UpperCAmelCase__ ).numpy()
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any]=None , **UpperCAmelCase__ : int ) ->List[str]:
return super().call(UpperCAmelCase__ , **UpperCAmelCase__ )
UpperCAmelCase_ = FakeBart.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
UpperCAmelCase_ = bart_model.generate(UpperCAmelCase__ , foo='''bar''' ).numpy()
self.assertTrue(np.array_equal(UpperCAmelCase__ , UpperCAmelCase__ ) )
class lowerCamelCase ( bart_model.model.encoder.__class__ ):
'''simple docstring'''
def lowerCAmelCase__ ( self : str , UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Dict ) ->Any:
return super().call(UpperCAmelCase__ , **UpperCAmelCase__ )
UpperCAmelCase_ = FakeEncoder(bart_model.config , bart_model.model.shared )
UpperCAmelCase_ = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
UpperCAmelCase_ = bart_model.generate(UpperCAmelCase__ ).numpy()
with self.assertRaises(UpperCAmelCase__ ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(UpperCAmelCase__ , foo='''bar''' )
| 43
| 1
|
'''simple docstring'''
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase__ : List[Any] = logging.get_logger(__name__)
lowercase__ : List[str] = {
"vocab_file": "vocab.json",
"tokenizer_config_file": "tokenizer_config.json",
"merges_file": "merges.txt",
}
lowercase__ : Tuple = {
"vocab_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"
),
},
"tokenizer_config_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"
),
},
"merges_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"
),
},
}
lowercase__ : Union[str, Any] = "</w>"
lowercase__ : int = "@@ "
def __lowerCamelCase ( _UpperCamelCase : Dict ):
'''simple docstring'''
UpperCAmelCase_ = set()
UpperCAmelCase_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase_ = char
return pairs
# Speech2Text2 has no max input length
lowercase__ : Optional[Any] = {"facebook/s2t-wav2vec2-large-en-de": 1024}
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ['''input_ids''', '''attention_mask''']
def __init__( self : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int="<s>" , UpperCAmelCase__ : Optional[int]="<pad>" , UpperCAmelCase__ : str="</s>" , UpperCAmelCase__ : List[str]="<unk>" , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : str=None , **UpperCAmelCase__ : Tuple , ) ->Dict:
super().__init__(
unk_token=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ , **UpperCAmelCase__ , )
UpperCAmelCase_ = do_lower_case
with open(UpperCAmelCase__ , encoding='''utf-8''' ) as vocab_handle:
UpperCAmelCase_ = json.load(UpperCAmelCase__ )
UpperCAmelCase_ = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""" )
UpperCAmelCase_ = None
UpperCAmelCase_ = None
else:
with open(UpperCAmelCase__ , encoding='''utf-8''' ) as merges_handle:
UpperCAmelCase_ = merges_handle.read().split('''\n''' )[:-1]
UpperCAmelCase_ = [tuple(merge.split()[:2] ) for merge in merges]
UpperCAmelCase_ = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
UpperCAmelCase_ = {}
@property
def lowerCAmelCase__ ( self : List[Any] ) ->int:
return len(self.decoder )
def lowerCAmelCase__ ( self : str ) ->Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase__ ( self : Optional[int] , UpperCAmelCase__ : Optional[Any] ) ->Union[str, Any]:
UpperCAmelCase_ = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
UpperCAmelCase_ = get_pairs(UpperCAmelCase__ )
if not pairs:
return token
while True:
UpperCAmelCase_ = min(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : self.bpe_ranks.get(UpperCAmelCase__ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase_ , UpperCAmelCase_ = bigram
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
while i < len(UpperCAmelCase__ ):
try:
UpperCAmelCase_ = word.index(UpperCAmelCase__ , UpperCAmelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase_ = j
if word[i] == first and i < len(UpperCAmelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase_ = tuple(UpperCAmelCase__ )
UpperCAmelCase_ = new_word
if len(UpperCAmelCase__ ) == 1:
break
else:
UpperCAmelCase_ = get_pairs(UpperCAmelCase__ )
UpperCAmelCase_ = ''' '''.join(UpperCAmelCase__ )
if word == "\n " + BPE_TOKEN_MERGES:
UpperCAmelCase_ = '''\n''' + BPE_TOKEN_MERGES
if word.endswith(UpperCAmelCase__ ):
UpperCAmelCase_ = word.replace(UpperCAmelCase__ , '''''' )
UpperCAmelCase_ = word.replace(''' ''' , UpperCAmelCase__ )
UpperCAmelCase_ = word
return word
def lowerCAmelCase__ ( self : List[Any] , UpperCAmelCase__ : Dict ) ->Union[str, Any]:
if self.bpe_ranks is None:
raise ValueError(
'''This tokenizer was instantiated without a `merges.txt` file, so'''
''' that it can only be used for decoding, not for encoding.'''
'''Make sure to provide `merges.txt` file at instantiation to enable '''
'''encoding.''' )
if self.do_lower_case:
UpperCAmelCase_ = text.lower()
UpperCAmelCase_ = text.split()
UpperCAmelCase_ = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(UpperCAmelCase__ ).split(''' ''' ) ) )
return split_tokens
def lowerCAmelCase__ ( self : int , UpperCAmelCase__ : str ) ->int:
return self.encoder.get(UpperCAmelCase__ , self.encoder.get(self.unk_token ) )
def lowerCAmelCase__ ( self : Tuple , UpperCAmelCase__ : int ) ->str:
UpperCAmelCase_ = self.decoder.get(UpperCAmelCase__ , self.unk_token )
return result
def lowerCAmelCase__ ( self : str , UpperCAmelCase__ : List[str] ) ->str:
UpperCAmelCase_ = ''' '''.join(UpperCAmelCase__ )
# make sure @@ tokens are concatenated
UpperCAmelCase_ = ''''''.join(string.split(UpperCAmelCase__ ) )
return string
def lowerCAmelCase__ ( self : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) ->Tuple[str]:
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_ = os.path.join(
UpperCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase_ = os.path.join(
UpperCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(UpperCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__ ) + '''\n''' )
UpperCAmelCase_ = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(UpperCAmelCase__ , '''w''' , encoding='''utf-8''' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
UpperCAmelCase_ = token_index
writer.write(''' '''.join(UpperCAmelCase__ ) + '''\n''' )
index += 1
return (vocab_file, merges_file)
| 43
|
'''simple docstring'''
from collections.abc import Callable
def __lowerCamelCase ( _UpperCamelCase : Callable[[float], float] , _UpperCamelCase : float , _UpperCamelCase : float ):
'''simple docstring'''
UpperCAmelCase_ = a
UpperCAmelCase_ = b
if function(_UpperCamelCase ) == 0: # one of the a or b is a root for the function
return a
elif function(_UpperCamelCase ) == 0:
return b
elif (
function(_UpperCamelCase ) * function(_UpperCamelCase ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('''could not find root in given interval.''' )
else:
UpperCAmelCase_ = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(_UpperCamelCase ) == 0:
return mid
elif function(_UpperCamelCase ) * function(_UpperCamelCase ) < 0:
UpperCAmelCase_ = mid
else:
UpperCAmelCase_ = mid
UpperCAmelCase_ = start + (end - start) / 2.0
return mid
def __lowerCamelCase ( _UpperCamelCase : float ):
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 43
| 1
|
'''simple docstring'''
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
def __init__( self : Tuple , UpperCAmelCase__ : UNetaDModel , UpperCAmelCase__ : UNetaDModel , UpperCAmelCase__ : DDPMScheduler , UpperCAmelCase__ : str , ) ->Optional[int]:
super().__init__()
UpperCAmelCase_ = value_function
UpperCAmelCase_ = unet
UpperCAmelCase_ = scheduler
UpperCAmelCase_ = env
UpperCAmelCase_ = env.get_dataset()
UpperCAmelCase_ = {}
for key in self.data.keys():
try:
UpperCAmelCase_ = self.data[key].mean()
except: # noqa: E722
pass
UpperCAmelCase_ = {}
for key in self.data.keys():
try:
UpperCAmelCase_ = self.data[key].std()
except: # noqa: E722
pass
UpperCAmelCase_ = env.observation_space.shape[0]
UpperCAmelCase_ = env.action_space.shape[0]
def lowerCAmelCase__ ( self : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] ) ->List[Any]:
return (x_in - self.means[key]) / self.stds[key]
def lowerCAmelCase__ ( self : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] ) ->List[str]:
return x_in * self.stds[key] + self.means[key]
def lowerCAmelCase__ ( self : Tuple , UpperCAmelCase__ : Optional[Any] ) ->Dict:
if type(UpperCAmelCase__ ) is dict:
return {k: self.to_torch(UpperCAmelCase__ ) for k, v in x_in.items()}
elif torch.is_tensor(UpperCAmelCase__ ):
return x_in.to(self.unet.device )
return torch.tensor(UpperCAmelCase__ , device=self.unet.device )
def lowerCAmelCase__ ( self : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] ) ->Optional[Any]:
for key, val in cond.items():
UpperCAmelCase_ = val.clone()
return x_in
def lowerCAmelCase__ ( self : int , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any ) ->List[str]:
UpperCAmelCase_ = x.shape[0]
UpperCAmelCase_ = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
UpperCAmelCase_ = torch.full((batch_size,) , UpperCAmelCase__ , device=self.unet.device , dtype=torch.long )
for _ in range(UpperCAmelCase__ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
UpperCAmelCase_ = self.value_function(x.permute(0 , 2 , 1 ) , UpperCAmelCase__ ).sample
UpperCAmelCase_ = torch.autograd.grad([y.sum()] , [x] )[0]
UpperCAmelCase_ = self.scheduler._get_variance(UpperCAmelCase__ )
UpperCAmelCase_ = torch.exp(0.5 * posterior_variance )
UpperCAmelCase_ = model_std * grad
UpperCAmelCase_ = 0
UpperCAmelCase_ = x.detach()
UpperCAmelCase_ = x + scale * grad
UpperCAmelCase_ = self.reset_xa(UpperCAmelCase__ , UpperCAmelCase__ , self.action_dim )
UpperCAmelCase_ = self.unet(x.permute(0 , 2 , 1 ) , UpperCAmelCase__ ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
UpperCAmelCase_ = self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , predict_epsilon=UpperCAmelCase__ )['''prev_sample''']
# apply conditions to the trajectory (set the initial state)
UpperCAmelCase_ = self.reset_xa(UpperCAmelCase__ , UpperCAmelCase__ , self.action_dim )
UpperCAmelCase_ = self.to_torch(UpperCAmelCase__ )
return x, y
def __call__( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any]=64 , UpperCAmelCase__ : Union[str, Any]=32 , UpperCAmelCase__ : List[Any]=2 , UpperCAmelCase__ : Optional[int]=0.1 ) ->Optional[Any]:
# normalize the observations and create batch dimension
UpperCAmelCase_ = self.normalize(UpperCAmelCase__ , '''observations''' )
UpperCAmelCase_ = obs[None].repeat(UpperCAmelCase__ , axis=0 )
UpperCAmelCase_ = {0: self.to_torch(UpperCAmelCase__ )}
UpperCAmelCase_ = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
UpperCAmelCase_ = randn_tensor(UpperCAmelCase__ , device=self.unet.device )
UpperCAmelCase_ = self.reset_xa(UpperCAmelCase__ , UpperCAmelCase__ , self.action_dim )
UpperCAmelCase_ = self.to_torch(UpperCAmelCase__ )
# run the diffusion process
UpperCAmelCase_ , UpperCAmelCase_ = self.run_diffusion(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# sort output trajectories by value
UpperCAmelCase_ = y.argsort(0 , descending=UpperCAmelCase__ ).squeeze()
UpperCAmelCase_ = x[sorted_idx]
UpperCAmelCase_ = sorted_values[:, :, : self.action_dim]
UpperCAmelCase_ = actions.detach().cpu().numpy()
UpperCAmelCase_ = self.de_normalize(UpperCAmelCase__ , key='''actions''' )
# select the action with the highest value
if y is not None:
UpperCAmelCase_ = 0
else:
# if we didn't run value guiding, select a random action
UpperCAmelCase_ = np.random.randint(0 , UpperCAmelCase__ )
UpperCAmelCase_ = denorm_actions[selected_index, 0]
return denorm_actions
| 43
|
'''simple docstring'''
import re
def __lowerCamelCase ( _UpperCamelCase : str ):
'''simple docstring'''
return [char.split() for char in re.split(R'''[^ a-z A-Z 0-9 \s]''' , str_ )]
def __lowerCamelCase ( _UpperCamelCase : str ):
'''simple docstring'''
UpperCAmelCase_ = split_input(str_ )
return "".join(
[''''''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : bool , _UpperCamelCase : str ):
'''simple docstring'''
try:
UpperCAmelCase_ = split_input(_UpperCamelCase )
if upper:
UpperCAmelCase_ = ''''''.join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
UpperCAmelCase_ = ''''''.join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def __lowerCamelCase ( _UpperCamelCase : str ):
'''simple docstring'''
return to_simple_case(_UpperCamelCase )
def __lowerCamelCase ( _UpperCamelCase : str ):
'''simple docstring'''
try:
UpperCAmelCase_ = to_simple_case(_UpperCamelCase )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : bool ):
'''simple docstring'''
return to_complex_case(_UpperCamelCase , _UpperCamelCase , '''_''' )
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : bool ):
'''simple docstring'''
return to_complex_case(_UpperCamelCase , _UpperCamelCase , '''-''' )
if __name__ == "__main__":
__import__("doctest").testmod()
| 43
| 1
|
'''simple docstring'''
lowercase__ : List[Any] = {
"a": "AAAAA",
"b": "AAAAB",
"c": "AAABA",
"d": "AAABB",
"e": "AABAA",
"f": "AABAB",
"g": "AABBA",
"h": "AABBB",
"i": "ABAAA",
"j": "BBBAA",
"k": "ABAAB",
"l": "ABABA",
"m": "ABABB",
"n": "ABBAA",
"o": "ABBAB",
"p": "ABBBA",
"q": "ABBBB",
"r": "BAAAA",
"s": "BAAAB",
"t": "BAABA",
"u": "BAABB",
"v": "BBBAB",
"w": "BABAA",
"x": "BABAB",
"y": "BABBA",
"z": "BABBB",
" ": " ",
}
lowercase__ : List[str] = {value: key for key, value in encode_dict.items()}
def __lowerCamelCase ( _UpperCamelCase : str ):
'''simple docstring'''
UpperCAmelCase_ = ''''''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('''encode() accepts only letters of the alphabet and spaces''' )
return encoded
def __lowerCamelCase ( _UpperCamelCase : str ):
'''simple docstring'''
if set(_UpperCamelCase ) - {"A", "B", " "} != set():
raise Exception('''decode() accepts only \'A\', \'B\' and spaces''' )
UpperCAmelCase_ = ''''''
for word in coded.split():
while len(_UpperCamelCase ) != 0:
decoded += decode_dict[word[:5]]
UpperCAmelCase_ = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 43
|
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowercase__ : Optional[Any] = "platform"
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def __lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Any , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : Any=None , _UpperCamelCase : int=None , _UpperCamelCase : int=None , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : Any=None , ):
'''simple docstring'''
if attention_mask is None:
UpperCAmelCase_ = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
UpperCAmelCase_ = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
UpperCAmelCase_ = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase_ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase_ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCamelCase :
'''simple docstring'''
def __init__( self : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple=13 , UpperCAmelCase__ : Tuple=7 , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Optional[int]=99 , UpperCAmelCase__ : Dict=16 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : Dict=4 , UpperCAmelCase__ : str=4 , UpperCAmelCase__ : int="gelu" , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : List[Any]=32 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : Dict=1 , UpperCAmelCase__ : Optional[int]=0 , UpperCAmelCase__ : Union[str, Any]=0.02 , ) ->Optional[int]:
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = eos_token_id
UpperCAmelCase_ = pad_token_id
UpperCAmelCase_ = bos_token_id
UpperCAmelCase_ = initializer_range
def lowerCAmelCase__ ( self : int ) ->Any:
UpperCAmelCase_ = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
UpperCAmelCase_ = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
UpperCAmelCase_ = shift_tokens_right(UpperCAmelCase__ , 1 , 2 )
UpperCAmelCase_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=UpperCAmelCase__ , )
UpperCAmelCase_ = prepare_blenderbot_inputs_dict(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return config, inputs_dict
def lowerCAmelCase__ ( self : Union[str, Any] ) ->List[str]:
UpperCAmelCase_ , UpperCAmelCase_ = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase__ ( self : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple ) ->Tuple:
UpperCAmelCase_ = 20
UpperCAmelCase_ = model_class_name(UpperCAmelCase__ )
UpperCAmelCase_ = model.encode(inputs_dict['''input_ids'''] )
UpperCAmelCase_ , UpperCAmelCase_ = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
UpperCAmelCase_ = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
UpperCAmelCase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase_ = model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , decoder_position_ids=UpperCAmelCase__ , )
UpperCAmelCase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
UpperCAmelCase_ = model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCAmelCase__ , )
UpperCAmelCase_ = model.decode(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def lowerCAmelCase__ ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any ) ->Union[str, Any]:
UpperCAmelCase_ = 20
UpperCAmelCase_ = model_class_name(UpperCAmelCase__ )
UpperCAmelCase_ = model.encode(inputs_dict['''input_ids'''] )
UpperCAmelCase_ , UpperCAmelCase_ = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
UpperCAmelCase_ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
UpperCAmelCase_ = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase_ = model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , decoder_position_ids=UpperCAmelCase__ , )
UpperCAmelCase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
UpperCAmelCase_ = model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCAmelCase__ , decoder_position_ids=UpperCAmelCase__ , )
UpperCAmelCase_ = model.decode(UpperCAmelCase__ , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ )
UpperCAmelCase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = 99
def lowerCAmelCase__ ( self : List[str] ) ->Optional[Any]:
UpperCAmelCase_ = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
UpperCAmelCase_ = input_ids.shape[0]
UpperCAmelCase_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def lowerCAmelCase__ ( self : Any ) ->str:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self._get_config_and_data()
UpperCAmelCase_ = FlaxBlenderbotForConditionalGeneration(UpperCAmelCase__ )
UpperCAmelCase_ = lm_model(input_ids=UpperCAmelCase__ )
UpperCAmelCase_ = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , UpperCAmelCase__ )
def lowerCAmelCase__ ( self : str ) ->int:
UpperCAmelCase_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
UpperCAmelCase_ = FlaxBlenderbotForConditionalGeneration(UpperCAmelCase__ )
UpperCAmelCase_ = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
UpperCAmelCase_ = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
UpperCAmelCase_ = lm_model(input_ids=UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ )
UpperCAmelCase_ = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->List[Any]:
UpperCAmelCase_ = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
UpperCAmelCase_ = shift_tokens_right(UpperCAmelCase__ , 1 , 2 )
UpperCAmelCase_ = np.equal(UpperCAmelCase__ , 1 ).astype(np.floataa ).sum()
UpperCAmelCase_ = np.equal(UpperCAmelCase__ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(UpperCAmelCase__ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowerCamelCase ( lowerCamelCase , unittest.TestCase , lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = True
lowerCAmelCase__ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowerCAmelCase__ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def lowerCAmelCase__ ( self : Optional[int] ) ->List[Any]:
UpperCAmelCase_ = FlaxBlenderbotModelTester(self )
def lowerCAmelCase__ ( self : str ) ->Tuple:
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Tuple ) ->str:
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Dict ) ->Tuple:
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = model_class(UpperCAmelCase__ )
@jax.jit
def encode_jitted(UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any]=None , **UpperCAmelCase__ : Union[str, Any] ):
return model.encode(input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase_ = encode_jitted(**UpperCAmelCase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase_ = encode_jitted(**UpperCAmelCase__ ).to_tuple()
self.assertEqual(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) )
for jitted_output, output in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCAmelCase__ ( self : str ) ->str:
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ = model_class(UpperCAmelCase__ )
UpperCAmelCase_ = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
UpperCAmelCase_ = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int ):
return model.decode(
decoder_input_ids=UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , encoder_outputs=UpperCAmelCase__ , )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase_ = decode_jitted(**UpperCAmelCase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase_ = decode_jitted(**UpperCAmelCase__ ).to_tuple()
self.assertEqual(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) )
for jitted_output, output in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCAmelCase__ ( self : int ) ->int:
for model_class_name in self.all_model_classes:
UpperCAmelCase_ = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
UpperCAmelCase_ = np.ones((1, 1) ) * model.config.eos_token_id
UpperCAmelCase_ = model(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' )
@slow
def lowerCAmelCase__ ( self : Dict ) ->Optional[Any]:
UpperCAmelCase_ = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25}
UpperCAmelCase_ = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True}
UpperCAmelCase_ = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=UpperCAmelCase__ )
UpperCAmelCase_ = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' )
UpperCAmelCase_ = ['''Sam''']
UpperCAmelCase_ = tokenizer(UpperCAmelCase__ , return_tensors='''jax''' )
UpperCAmelCase_ = model.generate(**UpperCAmelCase__ , **UpperCAmelCase__ )
UpperCAmelCase_ = '''Sam is a great name. It means "sun" in Gaelic.'''
UpperCAmelCase_ = tokenizer.batch_decode(UpperCAmelCase__ , **UpperCAmelCase__ )
assert generated_txt[0].strip() == tgt_text
| 43
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase__ ( self : Any ) ->Optional[int]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
UpperCAmelCase_ = AutoConfig.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = TFAutoModel.from_pretrained(UpperCAmelCase__ , from_pt=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = AutoModel.from_pretrained(UpperCAmelCase__ , from_tf=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def lowerCAmelCase__ ( self : List[str] ) ->Tuple:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
UpperCAmelCase_ = AutoConfig.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = TFAutoModelForPreTraining.from_pretrained(UpperCAmelCase__ , from_pt=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = AutoModelForPreTraining.from_pretrained(UpperCAmelCase__ , from_tf=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = AutoConfig.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = TFAutoModelForCausalLM.from_pretrained(UpperCAmelCase__ , from_pt=UpperCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = TFAutoModelForCausalLM.from_pretrained(
UpperCAmelCase__ , output_loading_info=UpperCAmelCase__ , from_pt=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = AutoModelForCausalLM.from_pretrained(UpperCAmelCase__ , from_tf=UpperCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = AutoModelForCausalLM.from_pretrained(
UpperCAmelCase__ , output_loading_info=UpperCAmelCase__ , from_tf=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[int]:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = AutoConfig.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase__ , from_pt=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = AutoModelWithLMHead.from_pretrained(UpperCAmelCase__ , from_tf=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def lowerCAmelCase__ ( self : List[str] ) ->Union[str, Any]:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = AutoConfig.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = TFAutoModelForMaskedLM.from_pretrained(UpperCAmelCase__ , from_pt=UpperCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = TFAutoModelForMaskedLM.from_pretrained(
UpperCAmelCase__ , output_loading_info=UpperCAmelCase__ , from_pt=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = AutoModelForMaskedLM.from_pretrained(UpperCAmelCase__ , from_tf=UpperCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = AutoModelForMaskedLM.from_pretrained(
UpperCAmelCase__ , output_loading_info=UpperCAmelCase__ , from_tf=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def lowerCAmelCase__ ( self : Any ) ->List[str]:
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = AutoConfig.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(UpperCAmelCase__ , from_pt=UpperCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(
UpperCAmelCase__ , output_loading_info=UpperCAmelCase__ , from_pt=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = AutoModelForSeqaSeqLM.from_pretrained(UpperCAmelCase__ , from_tf=UpperCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = AutoModelForSeqaSeqLM.from_pretrained(
UpperCAmelCase__ , output_loading_info=UpperCAmelCase__ , from_tf=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def lowerCAmelCase__ ( self : Dict ) ->List[Any]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
UpperCAmelCase_ = AutoConfig.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = TFAutoModelForSequenceClassification.from_pretrained(UpperCAmelCase__ , from_pt=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = AutoModelForSequenceClassification.from_pretrained(UpperCAmelCase__ , from_tf=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def lowerCAmelCase__ ( self : List[str] ) ->Optional[int]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
UpperCAmelCase_ = AutoConfig.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = TFAutoModelForQuestionAnswering.from_pretrained(UpperCAmelCase__ , from_pt=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = AutoModelForQuestionAnswering.from_pretrained(UpperCAmelCase__ , from_tf=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase__ ( self : int ) ->Any:
UpperCAmelCase_ = TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase__ , from_pt=UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=UpperCAmelCase__ ) , 1_4410 )
UpperCAmelCase_ = AutoModelWithLMHead.from_pretrained(UpperCAmelCase__ , from_tf=UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=UpperCAmelCase__ ) , 1_4410 )
def lowerCAmelCase__ ( self : Any ) ->Optional[Any]:
UpperCAmelCase_ = TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase__ , from_pt=UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=UpperCAmelCase__ ) , 1_4410 )
UpperCAmelCase_ = AutoModelWithLMHead.from_pretrained(UpperCAmelCase__ , from_tf=UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=UpperCAmelCase__ ) , 1_4410 )
| 43
|
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowercase__ : Tuple = pytest.mark.integration
@pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] )
def __lowerCamelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : int ):
'''simple docstring'''
inspect_dataset(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ = path + '''.py'''
assert script_name in os.listdir(_UpperCamelCase )
assert "__pycache__" not in os.listdir(_UpperCamelCase )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' , ['''accuracy'''] )
def __lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Optional[Any] ):
'''simple docstring'''
inspect_metric(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ = path + '''.py'''
assert script_name in os.listdir(_UpperCamelCase )
assert "__pycache__" not in os.listdir(_UpperCamelCase )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def __lowerCamelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any , _UpperCamelCase : int ):
'''simple docstring'''
UpperCAmelCase_ = get_dataset_config_info(_UpperCamelCase , config_name=_UpperCamelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def __lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
with pytest.raises(_UpperCamelCase ):
get_dataset_config_info(_UpperCamelCase , config_name=_UpperCamelCase )
@pytest.mark.parametrize(
'''path, expected''' , [
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] , )
def __lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ = get_dataset_config_names(_UpperCamelCase )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' , [
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] , )
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : int ):
'''simple docstring'''
UpperCAmelCase_ = get_dataset_infos(_UpperCamelCase )
assert list(infos.keys() ) == expected_configs
UpperCAmelCase_ = expected_configs[0]
assert expected_config in infos
UpperCAmelCase_ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : int , _UpperCamelCase : str ):
'''simple docstring'''
UpperCAmelCase_ = get_dataset_infos(_UpperCamelCase )
assert expected_config in infos
UpperCAmelCase_ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def __lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[int] ):
'''simple docstring'''
with pytest.raises(_UpperCamelCase ):
get_dataset_split_names(_UpperCamelCase , config_name=_UpperCamelCase )
| 43
| 1
|
'''simple docstring'''
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
lowercase__ : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument(
"--original_config_file",
type=str,
required=True,
help="The YAML config file corresponding to the original architecture.",
)
parser.add_argument(
"--num_in_channels",
default=None,
type=int,
help="The number of input channels. If `None` number of input channels will be automatically inferred.",
)
parser.add_argument(
"--image_size",
default=512,
type=int,
help=(
"The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"
" Base. Use 768 for Stable Diffusion v2."
),
)
parser.add_argument(
"--extract_ema",
action="store_true",
help=(
"Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"
" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"
" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."
),
)
parser.add_argument(
"--upcast_attention",
action="store_true",
help=(
"Whether the attention computation should always be upcasted. This is necessary when running stable"
" diffusion 2.1."
),
)
parser.add_argument(
"--from_safetensors",
action="store_true",
help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.",
)
parser.add_argument(
"--to_safetensors",
action="store_true",
help="Whether to store pipeline in safetensors format or not.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
def __lowerCamelCase ( _UpperCamelCase : Dict ):
'''simple docstring'''
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F"""could not parse string as bool {string}""" )
parser.add_argument(
"--use_linear_projection", help="Override for use linear projection", required=False, type=parse_bool
)
parser.add_argument("--cross_attention_dim", help="Override for cross attention_dim", required=False, type=int)
lowercase__ : Optional[Any] = parser.parse_args()
lowercase__ : Union[str, Any] = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 43
|
'''simple docstring'''
import collections
import os
import re
from pathlib import Path
lowercase__ : List[Any] = "src/transformers"
# Matches is_xxx_available()
lowercase__ : Optional[Any] = re.compile(R"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
lowercase__ : Any = re.compile(R"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowercase__ : Union[str, Any] = re.compile(R"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
lowercase__ : Optional[int] = re.compile(R"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
lowercase__ : List[str] = re.compile(R"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowercase__ : Any = re.compile(R"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
lowercase__ : List[Any] = re.compile(R"^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
lowercase__ : Optional[Any] = re.compile(R"^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
lowercase__ : Union[str, Any] = re.compile(R"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
lowercase__ : int = re.compile(R"^\s*try:")
# Catches a line with else:
lowercase__ : Any = re.compile(R"^\s*else:")
def __lowerCamelCase ( _UpperCamelCase : Optional[Any] ):
'''simple docstring'''
if _re_test_backend.search(_UpperCamelCase ) is None:
return None
UpperCAmelCase_ = [b[0] for b in _re_backend.findall(_UpperCamelCase )]
backends.sort()
return "_and_".join(_UpperCamelCase )
def __lowerCamelCase ( _UpperCamelCase : int ):
'''simple docstring'''
with open(_UpperCamelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase_ = f.readlines()
UpperCAmelCase_ = 0
while line_index < len(_UpperCamelCase ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_UpperCamelCase ):
return None
# First grab the objects without a specific backend in _import_structure
UpperCAmelCase_ = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
UpperCAmelCase_ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_UpperCamelCase ):
UpperCAmelCase_ = _re_one_line_import_struct.search(_UpperCamelCase ).groups()[0]
UpperCAmelCase_ = re.findall(R'''\[([^\]]+)\]''' , _UpperCamelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
UpperCAmelCase_ = _re_import_struct_key_value.search(_UpperCamelCase )
if single_line_import_search is not None:
UpperCAmelCase_ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(_UpperCamelCase ) > 0]
objects.extend(_UpperCamelCase )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
UpperCAmelCase_ = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
UpperCAmelCase_ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCAmelCase_ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCAmelCase_ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
UpperCAmelCase_ = lines[line_index]
if _re_import_struct_add_one.search(_UpperCamelCase ) is not None:
objects.append(_re_import_struct_add_one.search(_UpperCamelCase ).groups()[0] )
elif _re_import_struct_add_many.search(_UpperCamelCase ) is not None:
UpperCAmelCase_ = _re_import_struct_add_many.search(_UpperCamelCase ).groups()[0].split(''', ''' )
UpperCAmelCase_ = [obj[1:-1] for obj in imports if len(_UpperCamelCase ) > 0]
objects.extend(_UpperCamelCase )
elif _re_between_brackets.search(_UpperCamelCase ) is not None:
UpperCAmelCase_ = _re_between_brackets.search(_UpperCamelCase ).groups()[0].split(''', ''' )
UpperCAmelCase_ = [obj[1:-1] for obj in imports if len(_UpperCamelCase ) > 0]
objects.extend(_UpperCamelCase )
elif _re_quote_object.search(_UpperCamelCase ) is not None:
objects.append(_re_quote_object.search(_UpperCamelCase ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
UpperCAmelCase_ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
UpperCAmelCase_ = []
while (
line_index < len(_UpperCamelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
UpperCAmelCase_ = lines[line_index]
UpperCAmelCase_ = _re_import.search(_UpperCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
UpperCAmelCase_ = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(_UpperCamelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
UpperCAmelCase_ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCAmelCase_ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCAmelCase_ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
UpperCAmelCase_ = lines[line_index]
UpperCAmelCase_ = _re_import.search(_UpperCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
UpperCAmelCase_ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : Optional[Any] ):
'''simple docstring'''
def find_duplicates(_UpperCamelCase : Tuple ):
return [k for k, v in collections.Counter(_UpperCamelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
UpperCAmelCase_ = []
for key in import_dict_objects.keys():
UpperCAmelCase_ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
UpperCAmelCase_ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
UpperCAmelCase_ = '''base imports''' if key == '''none''' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def __lowerCamelCase ( ):
'''simple docstring'''
UpperCAmelCase_ = []
for root, _, files in os.walk(_UpperCamelCase ):
if "__init__.py" in files:
UpperCAmelCase_ = os.path.join(_UpperCamelCase , '''__init__.py''' )
UpperCAmelCase_ = parse_init(_UpperCamelCase )
if objects is not None:
UpperCAmelCase_ = analyze_results(*_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
UpperCAmelCase_ = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('''\n'''.join(_UpperCamelCase ) )
if len(_UpperCamelCase ) > 0:
raise ValueError('''\n\n'''.join(_UpperCamelCase ) )
def __lowerCamelCase ( ):
'''simple docstring'''
UpperCAmelCase_ = []
for path, directories, files in os.walk(_UpperCamelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(_UpperCamelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_UpperCamelCase ) / folder).glob('''*.py''' ) ) ) == 0:
continue
UpperCAmelCase_ = str((Path(_UpperCamelCase ) / folder).relative_to(_UpperCamelCase ) )
UpperCAmelCase_ = short_path.replace(os.path.sep , '''.''' )
submodules.append(_UpperCamelCase )
for fname in files:
if fname == "__init__.py":
continue
UpperCAmelCase_ = str((Path(_UpperCamelCase ) / fname).relative_to(_UpperCamelCase ) )
UpperCAmelCase_ = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(_UpperCamelCase )
return submodules
lowercase__ : Union[str, Any] = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
"models.esm.openfold_utils",
]
def __lowerCamelCase ( ):
'''simple docstring'''
from transformers.utils import direct_transformers_import
UpperCAmelCase_ = direct_transformers_import(_UpperCamelCase )
UpperCAmelCase_ = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(_UpperCamelCase , '''__init__.py''' ) , '''r''' ) as f:
UpperCAmelCase_ = f.read()
import_structure_keys.update(set(re.findall(R'''import_structure\[\"([^\"]*)\"\]''' , _UpperCamelCase ) ) )
UpperCAmelCase_ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(_UpperCamelCase ) > 0:
UpperCAmelCase_ = '''\n'''.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registed in the main init of Transformers:\n'''
F"""{list_of_modules}\n"""
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 43
| 1
|
'''simple docstring'''
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
lowercase__ : Dict = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
def __init__( self : Tuple , *UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : str=None , UpperCAmelCase__ : str=None , **UpperCAmelCase__ : List[str] ) ->Tuple:
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
UpperCAmelCase_ = eval_examples
UpperCAmelCase_ = post_process_function
UpperCAmelCase_ = quant_trainer_args
UpperCAmelCase_ = 128 # default number of calibration samples
def lowerCAmelCase__ ( self : Dict , UpperCAmelCase__ : Tuple=None ) ->Union[str, Any]:
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('''Trainer: calibration requires an calib_dataset.''' )
UpperCAmelCase_ = calib_dataset if calib_dataset is not None else self.calib_dataset
UpperCAmelCase_ = self._remove_unused_columns(UpperCAmelCase__ , description='''Calibration''' )
return DataLoader(
UpperCAmelCase__ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=UpperCAmelCase__ , )
def lowerCAmelCase__ ( self : Any , UpperCAmelCase__ : Optional[int]=None ) ->Any:
UpperCAmelCase_ = self.train_dataset if calib_dataset is None else calib_dataset
UpperCAmelCase_ = self.get_calib_dataloader(UpperCAmelCase__ )
UpperCAmelCase_ = self.model
quant_trainer.configure_model(UpperCAmelCase__ , self.quant_trainer_args , calib=UpperCAmelCase__ )
model.eval()
quant_trainer.enable_calibration(UpperCAmelCase__ )
logger.info('''***** Running calibration *****''' )
logger.info(f""" Num examples = {self.calib_num}""" )
logger.info(f""" Batch size = {calib_dataloader.batch_size}""" )
for step, inputs in enumerate(UpperCAmelCase__ ):
# Prediction step
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.prediction_step(UpperCAmelCase__ , UpperCAmelCase__ , prediction_loss_only=UpperCAmelCase__ )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(UpperCAmelCase__ , self.quant_trainer_args )
UpperCAmelCase_ = model
def lowerCAmelCase__ ( self : List[str] , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : str = "eval" ) ->List[str]:
UpperCAmelCase_ = self.eval_dataset if eval_dataset is None else eval_dataset
UpperCAmelCase_ = self.get_eval_dataloader(UpperCAmelCase__ )
UpperCAmelCase_ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase_ = self.compute_metrics
UpperCAmelCase_ = None
UpperCAmelCase_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCAmelCase_ = eval_loop(
UpperCAmelCase__ , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCAmelCase__ , )
finally:
UpperCAmelCase_ = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
UpperCAmelCase_ = self.post_process_function(UpperCAmelCase__ , UpperCAmelCase__ , output.predictions )
UpperCAmelCase_ = self.compute_metrics(UpperCAmelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
UpperCAmelCase_ = metrics.pop(UpperCAmelCase__ )
self.log(UpperCAmelCase__ )
else:
UpperCAmelCase_ = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCAmelCase_ = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCAmelCase__ )
return metrics
def lowerCAmelCase__ ( self : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : str = "test" ) ->Dict:
UpperCAmelCase_ = self.get_test_dataloader(UpperCAmelCase__ )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase_ = self.compute_metrics
UpperCAmelCase_ = None
UpperCAmelCase_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCAmelCase_ = eval_loop(
UpperCAmelCase__ , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCAmelCase__ , )
finally:
UpperCAmelCase_ = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCAmelCase_ = self.post_process_function(UpperCAmelCase__ , UpperCAmelCase__ , output.predictions , '''predict''' )
UpperCAmelCase_ = self.compute_metrics(UpperCAmelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
UpperCAmelCase_ = metrics.pop(UpperCAmelCase__ )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Any , UpperCAmelCase__ : Dict="./" ) ->List[str]:
UpperCAmelCase_ = self.eval_dataset
UpperCAmelCase_ = self.get_eval_dataloader(UpperCAmelCase__ )
UpperCAmelCase_ = next(iter(UpperCAmelCase__ ) )
# saving device - to make it consistent
UpperCAmelCase_ = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
# convert to tuple
UpperCAmelCase_ = tuple(v.to(UpperCAmelCase__ ) for k, v in batch.items() )
logger.info('''Converting model to be onnx compatible''' )
from pytorch_quantization.nn import TensorQuantizer
UpperCAmelCase_ = True
UpperCAmelCase_ = self.model.to(UpperCAmelCase__ )
model.eval()
model.float()
UpperCAmelCase_ = model.module if hasattr(UpperCAmelCase__ , '''module''' ) else model
quant_trainer.configure_model(UpperCAmelCase__ , self.quant_trainer_args )
UpperCAmelCase_ = os.path.join(UpperCAmelCase__ , '''model.onnx''' )
logger.info(f"""exporting model to {output_model_file}""" )
UpperCAmelCase_ = {0: '''batch_size''', 1: '''seq_len'''}
torch.onnx.export(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , export_params=UpperCAmelCase__ , opset_version=13 , do_constant_folding=UpperCAmelCase__ , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={
'''input_ids''': axes,
'''attention_mask''': axes,
'''token_type_ids''': axes,
'''output_start_logits''': axes,
'''output_end_logits''': axes,
} , verbose=UpperCAmelCase__ , )
logger.info('''onnx export finished''' )
| 43
|
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( _UpperCamelCase : tuple[int, int] , _UpperCamelCase : int ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = position
UpperCAmelCase_ = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
UpperCAmelCase_ = []
for position in positions:
UpperCAmelCase_ , UpperCAmelCase_ = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(_UpperCamelCase )
return permissible_positions
def __lowerCamelCase ( _UpperCamelCase : list[list[int]] ):
'''simple docstring'''
return not any(elem == 0 for row in board for elem in row )
def __lowerCamelCase ( _UpperCamelCase : list[list[int]] , _UpperCamelCase : tuple[int, int] , _UpperCamelCase : int ):
'''simple docstring'''
if is_complete(_UpperCamelCase ):
return True
for position in get_valid_pos(_UpperCamelCase , len(_UpperCamelCase ) ):
UpperCAmelCase_ , UpperCAmelCase_ = position
if board[y][x] == 0:
UpperCAmelCase_ = curr + 1
if open_knight_tour_helper(_UpperCamelCase , _UpperCamelCase , curr + 1 ):
return True
UpperCAmelCase_ = 0
return False
def __lowerCamelCase ( _UpperCamelCase : int ):
'''simple docstring'''
UpperCAmelCase_ = [[0 for i in range(_UpperCamelCase )] for j in range(_UpperCamelCase )]
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
UpperCAmelCase_ = 1
if open_knight_tour_helper(_UpperCamelCase , (i, j) , 1 ):
return board
UpperCAmelCase_ = 0
UpperCAmelCase_ = F"""Open Kight Tour cannot be performed on a board of size {n}"""
raise ValueError(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43
| 1
|
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
lowercase__ : int = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def __lowerCamelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : tuple , _UpperCamelCase : Path , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str]=False , ):
'''simple docstring'''
output_path.parent.mkdir(parents=_UpperCamelCase , exist_ok=_UpperCamelCase )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
_UpperCamelCase , _UpperCamelCase , f=output_path.as_posix() , input_names=_UpperCamelCase , output_names=_UpperCamelCase , dynamic_axes=_UpperCamelCase , do_constant_folding=_UpperCamelCase , use_external_data_format=_UpperCamelCase , enable_onnx_checker=_UpperCamelCase , opset_version=_UpperCamelCase , )
else:
export(
_UpperCamelCase , _UpperCamelCase , f=output_path.as_posix() , input_names=_UpperCamelCase , output_names=_UpperCamelCase , dynamic_axes=_UpperCamelCase , do_constant_folding=_UpperCamelCase , opset_version=_UpperCamelCase , )
@torch.no_grad()
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : int , _UpperCamelCase : bool = False ):
'''simple docstring'''
UpperCAmelCase_ = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
UpperCAmelCase_ = '''cuda'''
elif fpaa and not torch.cuda.is_available():
raise ValueError('''`float16` model export is only supported on GPUs with CUDA''' )
else:
UpperCAmelCase_ = '''cpu'''
UpperCAmelCase_ = Path(_UpperCamelCase )
# VAE DECODER
UpperCAmelCase_ = AutoencoderKL.from_pretrained(model_path + '''/vae''' )
UpperCAmelCase_ = vae_decoder.config.latent_channels
# forward only through the decoder part
UpperCAmelCase_ = vae_decoder.decode
onnx_export(
_UpperCamelCase , model_args=(
torch.randn(1 , _UpperCamelCase , 25 , 25 ).to(device=_UpperCamelCase , dtype=_UpperCamelCase ),
False,
) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={
'''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=_UpperCamelCase , )
del vae_decoder
if __name__ == "__main__":
lowercase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
lowercase__ : Any = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("SD: Done: ONNX")
| 43
|
'''simple docstring'''
from __future__ import annotations
from typing import TypedDict
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
def __lowerCamelCase ( _UpperCamelCase : str ):
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError('''The parameter s type must be str.''' )
return [s[i:] + s[:i] for i in range(len(_UpperCamelCase ) )]
def __lowerCamelCase ( _UpperCamelCase : str ):
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError('''The parameter s type must be str.''' )
if not s:
raise ValueError('''The parameter s must not be empty.''' )
UpperCAmelCase_ = all_rotations(_UpperCamelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
UpperCAmelCase_ = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(_UpperCamelCase ),
}
return response
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : int ):
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError('''The parameter bwt_string type must be str.''' )
if not bwt_string:
raise ValueError('''The parameter bwt_string must not be empty.''' )
try:
UpperCAmelCase_ = int(_UpperCamelCase )
except ValueError:
raise TypeError(
'''The parameter idx_original_string type must be int or passive'''
''' of cast to int.''' )
if idx_original_string < 0:
raise ValueError('''The parameter idx_original_string must not be lower than 0.''' )
if idx_original_string >= len(_UpperCamelCase ):
raise ValueError(
'''The parameter idx_original_string must be lower than''' ''' len(bwt_string).''' )
UpperCAmelCase_ = [''''''] * len(_UpperCamelCase )
for _ in range(len(_UpperCamelCase ) ):
for i in range(len(_UpperCamelCase ) ):
UpperCAmelCase_ = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
lowercase__ : Optional[int] = "Provide a string that I will generate its BWT transform: "
lowercase__ : List[Any] = input(entry_msg).strip()
lowercase__ : Any = bwt_transform(s)
print(
F'''Burrows Wheeler transform for string \'{s}\' results '''
F'''in \'{result['bwt_string']}\''''
)
lowercase__ : Dict = reverse_bwt(result["bwt_string"], result["idx_original_string"])
print(
F'''Reversing Burrows Wheeler transform for entry \'{result['bwt_string']}\' '''
F'''we get original string \'{original_string}\''''
)
| 43
| 1
|
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Optional[int] = logging.get_logger()
@dataclass
class lowerCamelCase :
'''simple docstring'''
lowerCAmelCase__ = 42
lowerCAmelCase__ = field(default_factory=lowerCamelCase )
lowerCAmelCase__ = field(default_factory=lowerCamelCase )
def lowerCAmelCase__ ( self : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tensor , UpperCAmelCase__ : Tensor ) ->str:
UpperCAmelCase_ = len(list(m.modules() ) ) == 1 or isinstance(UpperCAmelCase__ , nn.Convad ) or isinstance(UpperCAmelCase__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(UpperCAmelCase__ )
def __call__( self : int , UpperCAmelCase__ : Tensor ) ->Dict:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(UpperCAmelCase__ )
[x.remove() for x in self.handles]
return self
@property
def lowerCAmelCase__ ( self : str ) ->Any:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda UpperCAmelCase__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class lowerCamelCase :
'''simple docstring'''
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 1
lowerCAmelCase__ = field(default_factory=lowerCamelCase )
lowerCAmelCase__ = field(default_factory=lowerCamelCase )
lowerCAmelCase__ = True
def __call__( self : str , UpperCAmelCase__ : Tensor ) ->List[Any]:
UpperCAmelCase_ = Tracker(self.dest )(UpperCAmelCase__ ).parametrized
UpperCAmelCase_ = Tracker(self.src )(UpperCAmelCase__ ).parametrized
UpperCAmelCase_ = list(filter(lambda UpperCAmelCase__ : type(UpperCAmelCase__ ) not in self.src_skip , UpperCAmelCase__ ) )
UpperCAmelCase_ = list(filter(lambda UpperCAmelCase__ : type(UpperCAmelCase__ ) not in self.dest_skip , UpperCAmelCase__ ) )
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ) and self.raise_if_mismatch:
raise Exception(
f"""Numbers of operations are different. Source module has {len(UpperCAmelCase__ )} operations while"""
f""" destination module has {len(UpperCAmelCase__ )}.""" )
for dest_m, src_m in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"""Transfered from={src_m} to={dest_m}""" )
class lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , UpperCAmelCase__ : nn.Module ) ->List[str]:
super().__init__()
UpperCAmelCase_ = []
# - get the stem
feature_blocks.append(('''conv1''', model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith('''block''' ), f"""Unexpected layer name {k}"""
UpperCAmelCase_ = len(UpperCAmelCase__ ) + 1
feature_blocks.append((f"""res{block_index}""", v) )
UpperCAmelCase_ = nn.ModuleDict(UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Dict , UpperCAmelCase__ : Tensor ) ->Optional[Any]:
return get_trunk_forward_outputs(
UpperCAmelCase__ , out_feat_keys=UpperCAmelCase__ , feature_blocks=self._feature_blocks , )
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Dict , UpperCAmelCase__ : str ) ->str:
UpperCAmelCase_ = x.split('''-''' )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self : int , UpperCAmelCase__ : str ) ->Callable[[], Tuple[nn.Module, Dict]]:
# default to timm!
if x not in self:
UpperCAmelCase_ = self.convert_name_to_timm(UpperCAmelCase__ )
UpperCAmelCase_ = partial(lambda: (timm.create_model(UpperCAmelCase__ , pretrained=UpperCAmelCase__ ).eval(), None) )
else:
UpperCAmelCase_ = super().__getitem__(UpperCAmelCase__ )
return val
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
def __getitem__( self : Optional[int] , UpperCAmelCase__ : str ) ->Callable[[], nn.Module]:
if "seer" in x and "in1k" not in x:
UpperCAmelCase_ = RegNetModel
else:
UpperCAmelCase_ = RegNetForImageClassification
return val
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : List[Any] , _UpperCamelCase : List[Tuple[str, str]] ):
'''simple docstring'''
for from_key, to_key in keys:
UpperCAmelCase_ = from_state_dict[from_key].clone()
print(F"""Copied key={from_key} to={to_key}""" )
return to_state_dict
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Callable[[], nn.Module] , _UpperCamelCase : Callable[[], nn.Module] , _UpperCamelCase : RegNetConfig , _UpperCamelCase : Path , _UpperCamelCase : bool = True , ):
'''simple docstring'''
print(F"""Converting {name}...""" )
with torch.no_grad():
UpperCAmelCase_ , UpperCAmelCase_ = from_model_func()
UpperCAmelCase_ = our_model_func(_UpperCamelCase ).eval()
UpperCAmelCase_ = ModuleTransfer(src=_UpperCamelCase , dest=_UpperCamelCase , raise_if_mismatch=_UpperCamelCase )
UpperCAmelCase_ = torch.randn((1, 3, 224, 224) )
module_transfer(_UpperCamelCase )
if from_state_dict is not None:
UpperCAmelCase_ = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
UpperCAmelCase_ = [('''0.clf.0.weight''', '''classifier.1.weight'''), ('''0.clf.0.bias''', '''classifier.1.bias''')]
UpperCAmelCase_ = manually_copy_vissl_head(_UpperCamelCase , our_model.state_dict() , _UpperCamelCase )
our_model.load_state_dict(_UpperCamelCase )
UpperCAmelCase_ = our_model(_UpperCamelCase , output_hidden_states=_UpperCamelCase )
UpperCAmelCase_ = (
our_outputs.logits if isinstance(_UpperCamelCase , _UpperCamelCase ) else our_outputs.last_hidden_state
)
UpperCAmelCase_ = from_model(_UpperCamelCase )
UpperCAmelCase_ = from_output[-1] if type(_UpperCamelCase ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
UpperCAmelCase_ = our_outputs.hidden_states[-1]
assert torch.allclose(_UpperCamelCase , _UpperCamelCase ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='''Add model''' , use_temp_dir=_UpperCamelCase , )
UpperCAmelCase_ = 224 if '''seer''' not in name else 384
# we can use the convnext one
UpperCAmelCase_ = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' , size=_UpperCamelCase )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='''Add image processor''' , use_temp_dir=_UpperCamelCase , )
print(F"""Pushed {name}""" )
def __lowerCamelCase ( _UpperCamelCase : Path , _UpperCamelCase : str = None , _UpperCamelCase : bool = True ):
'''simple docstring'''
UpperCAmelCase_ = '''imagenet-1k-id2label.json'''
UpperCAmelCase_ = 1000
UpperCAmelCase_ = (1, num_labels)
UpperCAmelCase_ = '''huggingface/label-files'''
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = json.load(open(cached_download(hf_hub_url(_UpperCamelCase , _UpperCamelCase , repo_type='''dataset''' ) ) , '''r''' ) )
UpperCAmelCase_ = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ = partial(_UpperCamelCase , num_labels=_UpperCamelCase , idalabel=_UpperCamelCase , labelaid=_UpperCamelCase )
UpperCAmelCase_ = {
'''regnet-x-002''': ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type='''x''' ),
'''regnet-x-004''': ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type='''x''' ),
'''regnet-x-006''': ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type='''x''' ),
'''regnet-x-008''': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type='''x''' ),
'''regnet-x-016''': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type='''x''' ),
'''regnet-x-032''': ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1008] , groups_width=48 , layer_type='''x''' ),
'''regnet-x-040''': ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1360] , groups_width=40 , layer_type='''x''' ),
'''regnet-x-064''': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1624] , groups_width=56 , layer_type='''x''' ),
'''regnet-x-080''': ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1920] , groups_width=120 , layer_type='''x''' ),
'''regnet-x-120''': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 , layer_type='''x''' ),
'''regnet-x-160''': ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2048] , groups_width=128 , layer_type='''x''' ),
'''regnet-x-320''': ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1344, 2520] , groups_width=168 , layer_type='''x''' ),
# y variant
'''regnet-y-002''': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ),
'''regnet-y-004''': ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ),
'''regnet-y-006''': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ),
'''regnet-y-008''': ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ),
'''regnet-y-016''': ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ),
'''regnet-y-032''': ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1512] , groups_width=24 ),
'''regnet-y-040''': ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1088] , groups_width=64 ),
'''regnet-y-064''': ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1296] , groups_width=72 ),
'''regnet-y-080''': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2016] , groups_width=56 ),
'''regnet-y-120''': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 ),
'''regnet-y-160''': ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1232, 3024] , groups_width=112 ),
'''regnet-y-320''': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
'''regnet-y-320-seer''': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
'''regnet-y-640-seer''': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
'''regnet-y-1280-seer''': RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
'''regnet-y-2560-seer''': RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
'''regnet-y-10b-seer''': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 1_1110, 2_8280] , groups_width=1010 ),
# finetuned on imagenet
'''regnet-y-320-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
'''regnet-y-640-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
'''regnet-y-1280-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
'''regnet-y-2560-seer-in1k''': ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
'''regnet-y-10b-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 1_1110, 2_8280] , groups_width=1010 ),
}
UpperCAmelCase_ = NameToOurModelFuncMap()
UpperCAmelCase_ = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(_UpperCamelCase : str , _UpperCamelCase : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
UpperCAmelCase_ = torch.hub.load_state_dict_from_url(_UpperCamelCase , model_dir=str(_UpperCamelCase ) , map_location='''cpu''' )
UpperCAmelCase_ = model_func()
# check if we have a head, if yes add it
UpperCAmelCase_ = files['''classy_state_dict''']['''base_model''']['''model''']
UpperCAmelCase_ = model_state_dict['''trunk''']
model.load_state_dict(_UpperCamelCase )
return model.eval(), model_state_dict["heads"]
# pretrained
UpperCAmelCase_ = partial(
_UpperCamelCase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
UpperCAmelCase_ = partial(
_UpperCamelCase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
UpperCAmelCase_ = partial(
_UpperCamelCase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
UpperCAmelCase_ = partial(
_UpperCamelCase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch''' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
UpperCAmelCase_ = partial(
_UpperCamelCase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
UpperCAmelCase_ = partial(
_UpperCamelCase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
UpperCAmelCase_ = partial(
_UpperCamelCase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
UpperCAmelCase_ = partial(
_UpperCamelCase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch''' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
_UpperCamelCase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , _UpperCamelCase , _UpperCamelCase , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
_UpperCamelCase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
return config, expected_shape
if __name__ == "__main__":
lowercase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported regnet* architecture,"
" currently: regnetx-*, regnety-*. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
lowercase__ : Union[str, Any] = parser.parse_args()
lowercase__ : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 43
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase__ : Union[str, Any] = {
"configuration_mobilevit": ["MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileViTConfig", "MobileViTOnnxConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Union[str, Any] = ["MobileViTFeatureExtractor"]
lowercase__ : List[Any] = ["MobileViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = [
"MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileViTForImageClassification",
"MobileViTForSemanticSegmentation",
"MobileViTModel",
"MobileViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Optional[int] = [
"TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileViTForImageClassification",
"TFMobileViTForSemanticSegmentation",
"TFMobileViTModel",
"TFMobileViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
lowercase__ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 43
| 1
|
'''simple docstring'''
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
lowercase__ : Optional[int] = logging.getLogger(__name__)
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : List[str]=None ) ->List[str]:
UpperCAmelCase_ = self.layer[current_layer](UpperCAmelCase__ , UpperCAmelCase__ , head_mask[current_layer] )
UpperCAmelCase_ = layer_outputs[0]
return hidden_states
@add_start_docstrings(
'''The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.''' , lowerCamelCase , )
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Any ) ->str:
super().__init__(UpperCAmelCase__ )
UpperCAmelCase_ = BertEncoderWithPabee(UpperCAmelCase__ )
self.init_weights()
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
def lowerCAmelCase__ ( self : List[str] , UpperCAmelCase__ : Dict ) ->Optional[Any]:
UpperCAmelCase_ = threshold
def lowerCAmelCase__ ( self : Optional[int] , UpperCAmelCase__ : int ) ->Dict:
UpperCAmelCase_ = patience
def lowerCAmelCase__ ( self : Any ) ->List[Any]:
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
def lowerCAmelCase__ ( self : Union[str, Any] ) ->int:
UpperCAmelCase_ = self.inference_layers_num / self.inference_instances_num
UpperCAmelCase_ = (
f"""*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="""
f""" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"""
)
print(UpperCAmelCase__ )
@add_start_docstrings_to_model_forward(UpperCAmelCase__ )
def lowerCAmelCase__ ( self : str , UpperCAmelCase__ : int=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : str=None , UpperCAmelCase__ : str=False , ) ->Any:
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
UpperCAmelCase_ = input_ids.size()
elif inputs_embeds is not None:
UpperCAmelCase_ = inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
UpperCAmelCase_ = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
UpperCAmelCase_ = torch.ones(UpperCAmelCase__ , device=UpperCAmelCase__ )
if token_type_ids is None:
UpperCAmelCase_ = torch.zeros(UpperCAmelCase__ , dtype=torch.long , device=UpperCAmelCase__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
UpperCAmelCase_ = self.get_extended_attention_mask(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = encoder_hidden_states.size()
UpperCAmelCase_ = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
UpperCAmelCase_ = torch.ones(UpperCAmelCase__ , device=UpperCAmelCase__ )
UpperCAmelCase_ = self.invert_attention_mask(UpperCAmelCase__ )
else:
UpperCAmelCase_ = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
UpperCAmelCase_ = self.get_head_mask(UpperCAmelCase__ , self.config.num_hidden_layers )
UpperCAmelCase_ = self.embeddings(
input_ids=UpperCAmelCase__ , position_ids=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , inputs_embeds=UpperCAmelCase__ )
UpperCAmelCase_ = embedding_output
if self.training:
UpperCAmelCase_ = []
for i in range(self.config.num_hidden_layers ):
UpperCAmelCase_ = self.encoder.adaptive_forward(
UpperCAmelCase__ , current_layer=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , head_mask=UpperCAmelCase__ )
UpperCAmelCase_ = self.pooler(UpperCAmelCase__ )
UpperCAmelCase_ = output_layers[i](output_dropout(UpperCAmelCase__ ) )
res.append(UpperCAmelCase__ )
elif self.patience == 0: # Use all layers for inference
UpperCAmelCase_ = self.encoder(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , head_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , )
UpperCAmelCase_ = self.pooler(encoder_outputs[0] )
UpperCAmelCase_ = [output_layers[self.config.num_hidden_layers - 1](UpperCAmelCase__ )]
else:
UpperCAmelCase_ = 0
UpperCAmelCase_ = None
UpperCAmelCase_ = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
UpperCAmelCase_ = self.encoder.adaptive_forward(
UpperCAmelCase__ , current_layer=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , head_mask=UpperCAmelCase__ )
UpperCAmelCase_ = self.pooler(UpperCAmelCase__ )
UpperCAmelCase_ = output_layers[i](UpperCAmelCase__ )
if regression:
UpperCAmelCase_ = logits.detach()
if patient_result is not None:
UpperCAmelCase_ = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
UpperCAmelCase_ = 0
else:
UpperCAmelCase_ = logits.detach().argmax(dim=1 )
if patient_result is not None:
UpperCAmelCase_ = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(UpperCAmelCase__ ) ):
patient_counter += 1
else:
UpperCAmelCase_ = 0
UpperCAmelCase_ = logits
if patient_counter == self.patience:
break
UpperCAmelCase_ = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
'''Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. ''' , lowerCamelCase , )
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
def __init__( self : Tuple , UpperCAmelCase__ : Tuple ) ->Any:
super().__init__(UpperCAmelCase__ )
UpperCAmelCase_ = config.num_labels
UpperCAmelCase_ = BertModelWithPabee(UpperCAmelCase__ )
UpperCAmelCase_ = nn.Dropout(config.hidden_dropout_prob )
UpperCAmelCase_ = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Tuple=None , ) ->List[Any]:
UpperCAmelCase_ = self.bert(
input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , position_ids=UpperCAmelCase__ , head_mask=UpperCAmelCase__ , inputs_embeds=UpperCAmelCase__ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
UpperCAmelCase_ = (logits[-1],)
if labels is not None:
UpperCAmelCase_ = None
UpperCAmelCase_ = 0
for ix, logits_item in enumerate(UpperCAmelCase__ ):
if self.num_labels == 1:
# We are doing regression
UpperCAmelCase_ = MSELoss()
UpperCAmelCase_ = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
UpperCAmelCase_ = CrossEntropyLoss()
UpperCAmelCase_ = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
UpperCAmelCase_ = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
UpperCAmelCase_ = (total_loss / total_weights,) + outputs
return outputs
| 43
|
'''simple docstring'''
lowercase__ : Union[str, Any] = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
lowercase__ : str = [{"type": "code", "content": INSTALL_CONTENT}]
lowercase__ : Any = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 43
| 1
|
'''simple docstring'''
def __lowerCamelCase ( _UpperCamelCase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ = len(_UpperCamelCase )
for i in range(length - 1 ):
UpperCAmelCase_ = i
for k in range(i + 1 , _UpperCamelCase ):
if collection[k] < collection[least]:
UpperCAmelCase_ = k
if least != i:
UpperCAmelCase_ , UpperCAmelCase_ = (collection[i], collection[least])
return collection
if __name__ == "__main__":
lowercase__ : Union[str, Any] = input("Enter numbers separated by a comma:\n").strip()
lowercase__ : List[Any] = [int(item) for item in user_input.split(",")]
print(selection_sort(unsorted))
| 43
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase__ : Optional[Any] = {
"configuration_encodec": [
"ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EncodecConfig",
],
"feature_extraction_encodec": ["EncodecFeatureExtractor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : List[str] = [
"ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST",
"EncodecModel",
"EncodecPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
lowercase__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 43
| 1
|
'''simple docstring'''
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
lowercase__ : str = float("nan")
class lowerCamelCase :
'''simple docstring'''
def __init__( self : str , UpperCAmelCase__ : Optional[int] ) ->Union[str, Any]:
UpperCAmelCase_ = sys.stdout
UpperCAmelCase_ = open(UpperCAmelCase__ , '''a''' )
def __getattr__( self : List[Any] , UpperCAmelCase__ : Tuple ) ->Tuple:
return getattr(self.stdout , UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] , UpperCAmelCase__ : List[Any] ) ->Union[str, Any]:
self.stdout.write(UpperCAmelCase__ )
# strip tqdm codes
self.file.write(re.sub(r'''^.*\r''' , '''''' , UpperCAmelCase__ , 0 , re.M ) )
def __lowerCamelCase ( _UpperCamelCase : List[str]=80 , _UpperCamelCase : Dict=False ):
'''simple docstring'''
UpperCAmelCase_ = []
# deal with critical env vars
UpperCAmelCase_ = ['''CUDA_VISIBLE_DEVICES''']
for key in env_keys:
UpperCAmelCase_ = os.environ.get(_UpperCamelCase , _UpperCamelCase )
if val is not None:
cmd.append(F"""{key}={val}""" )
# python executable (not always needed if the script is executable)
UpperCAmelCase_ = sys.executable if full_python_path else sys.executable.split('''/''' )[-1]
cmd.append(_UpperCamelCase )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
UpperCAmelCase_ = []
UpperCAmelCase_ = ''''''
while len(_UpperCamelCase ) > 0:
current_line += F"""{cmd.pop(0 )} """
if len(_UpperCamelCase ) == 0 or len(_UpperCamelCase ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(_UpperCamelCase )
UpperCAmelCase_ = ''''''
return "\\\n".join(_UpperCamelCase )
def __lowerCamelCase ( _UpperCamelCase : Dict , _UpperCamelCase : int ):
'''simple docstring'''
UpperCAmelCase_ = re.sub(R'''[\\\n]+''' , ''' ''' , args.base_cmd )
# remove --output_dir if any and set our own
UpperCAmelCase_ = re.sub('''--output_dir\s+[^\s]+''' , '''''' , args.base_cmd )
args.base_cmd += F""" --output_dir {output_dir}"""
# ensure we have --overwrite_output_dir
UpperCAmelCase_ = re.sub('''--overwrite_output_dir\s+''' , '''''' , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def __lowerCamelCase ( _UpperCamelCase : Dict , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] , _UpperCamelCase : Tuple , _UpperCamelCase : List[str] , _UpperCamelCase : int ):
'''simple docstring'''
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6_666, 222.22_222_222] )} , )
UpperCAmelCase_ = subprocess.run(_UpperCamelCase , capture_output=_UpperCamelCase , text=_UpperCamelCase )
if verbose:
print('''STDOUT''' , result.stdout )
print('''STDERR''' , result.stderr )
# save the streams
UpperCAmelCase_ = variation.replace(''' ''' , '''-''' )
with open(Path(_UpperCamelCase ) / F"""log.{prefix}.stdout.txt""" , '''w''' ) as f:
f.write(result.stdout )
with open(Path(_UpperCamelCase ) / F"""log.{prefix}.stderr.txt""" , '''w''' ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print('''failed''' )
return {target_metric_key: nan}
with io.open(F"""{output_dir}/all_results.json""" , '''r''' , encoding='''utf-8''' ) as f:
UpperCAmelCase_ = json.load(_UpperCamelCase )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Any , _UpperCamelCase : Dict , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] , _UpperCamelCase : Dict , ):
'''simple docstring'''
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = F"""{id}: {variation:<{longest_variation_len}}"""
UpperCAmelCase_ = F"""{preamble}: """
UpperCAmelCase_ = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(_UpperCamelCase ) , desc=_UpperCamelCase , leave=_UpperCamelCase ):
UpperCAmelCase_ = process_run_single(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ = single_run_metrics[target_metric_key]
if not math.isnan(_UpperCamelCase ):
metrics.append(_UpperCamelCase )
results.append(_UpperCamelCase )
outcome += "✓"
else:
outcome += "✘"
UpperCAmelCase_ = F"""\33[2K\r{outcome}"""
if len(_UpperCamelCase ) > 0:
UpperCAmelCase_ = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
UpperCAmelCase_ = round(mean_metrics[target_metric_key] , 2 )
UpperCAmelCase_ = F"""{outcome} {mean_target}"""
if len(_UpperCamelCase ) > 1:
results_str += F""" {tuple(round(_UpperCamelCase , 2 ) for x in results )}"""
print(_UpperCamelCase )
UpperCAmelCase_ = variation
return mean_metrics
else:
print(_UpperCamelCase )
return {variation_key: variation, target_metric_key: nan}
def __lowerCamelCase ( ):
'''simple docstring'''
UpperCAmelCase_ = torch.cuda.get_device_properties(torch.device('''cuda''' ) )
return F"""
Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
"""
def __lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Tuple , _UpperCamelCase : Dict , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Dict ):
'''simple docstring'''
UpperCAmelCase_ = pd.DataFrame(_UpperCamelCase )
UpperCAmelCase_ = '''variation'''
UpperCAmelCase_ = '''diff_%'''
UpperCAmelCase_ = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
UpperCAmelCase_ = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(_UpperCamelCase ):
# as a fallback, use the minimal value as the sentinel
UpperCAmelCase_ = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(_UpperCamelCase ):
UpperCAmelCase_ = df.apply(
lambda _UpperCamelCase : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis='''columns''' , )
# re-order columns
UpperCAmelCase_ = [variation_key, target_metric_key, diff_key, *report_metric_keys]
UpperCAmelCase_ = df.reindex(_UpperCamelCase , axis='''columns''' ) # reorder cols
# capitalize
UpperCAmelCase_ = df.rename(str.capitalize , axis='''columns''' )
# make the cols as narrow as possible
UpperCAmelCase_ = df.rename(lambda _UpperCamelCase : c.replace('''_''' , '''<br>''' ) , axis='''columns''' )
UpperCAmelCase_ = df.rename(lambda _UpperCamelCase : c.replace('''_''' , '''\n''' ) , axis='''columns''' )
UpperCAmelCase_ = ['''''', '''Copy between the cut-here-lines and paste as is to github or a forum''']
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=_UpperCamelCase , floatfmt='''.2f''' )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=_UpperCamelCase , floatfmt='''.2f''' )]
print('''\n\n'''.join(_UpperCamelCase ) )
def __lowerCamelCase ( ):
'''simple docstring'''
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'''--base-cmd''' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='''Base cmd''' , )
parser.add_argument(
'''--variations''' , default=_UpperCamelCase , type=_UpperCamelCase , nargs='''+''' , required=_UpperCamelCase , help='''Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'''' , )
parser.add_argument(
'''--base-variation''' , default=_UpperCamelCase , type=_UpperCamelCase , help='''Baseline variation to compare to. if None the minimal target value will be used to compare against''' , )
parser.add_argument(
'''--target-metric-key''' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='''Target metric key in output_dir/all_results.json, e.g., train_samples_per_second''' , )
parser.add_argument(
'''--report-metric-keys''' , default='''''' , type=_UpperCamelCase , help='''Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples''' , )
parser.add_argument(
'''--repeat-times''' , default=1 , type=_UpperCamelCase , help='''How many times to re-run each variation - an average will be reported''' , )
parser.add_argument(
'''--output_dir''' , default='''output_benchmark''' , type=_UpperCamelCase , help='''The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked''' , )
parser.add_argument(
'''--verbose''' , default=_UpperCamelCase , action='''store_true''' , help='''Whether to show the outputs of each run or just the benchmark progress''' , )
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = args.output_dir
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
UpperCAmelCase_ = get_base_command(_UpperCamelCase , _UpperCamelCase )
# split each dimension into its --foo variations
UpperCAmelCase_ = [list(map(str.strip , re.split(R'''\|''' , _UpperCamelCase ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
UpperCAmelCase_ = list(map(str.strip , map(''' '''.join , itertools.product(*_UpperCamelCase ) ) ) )
UpperCAmelCase_ = max(len(_UpperCamelCase ) for x in variations )
# split wanted keys
UpperCAmelCase_ = args.report_metric_keys.split()
# capture prints into a log file for convenience
UpperCAmelCase_ = F"""benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt"""
print(F"""\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt""" )
print(F"""and this script's output is also piped into {report_fn}""" )
UpperCAmelCase_ = Tee(_UpperCamelCase )
print(F"""\n*** Running {len(_UpperCamelCase )} benchmarks:""" )
print(F"""Base command: {" ".join(_UpperCamelCase )}""" )
UpperCAmelCase_ = '''variation'''
UpperCAmelCase_ = []
for id, variation in enumerate(tqdm(_UpperCamelCase , desc='''Total completion: ''' , leave=_UpperCamelCase ) ):
UpperCAmelCase_ = base_cmd + variation.split()
results.append(
process_run(
id + 1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , args.target_metric_key , _UpperCamelCase , args.repeat_times , _UpperCamelCase , args.verbose , ) )
process_results(_UpperCamelCase , args.target_metric_key , _UpperCamelCase , args.base_variation , _UpperCamelCase )
if __name__ == "__main__":
main()
| 43
|
'''simple docstring'''
from heapq import heappop, heappush
import numpy as np
def __lowerCamelCase ( _UpperCamelCase : np.ndarray , _UpperCamelCase : tuple[int, int] , _UpperCamelCase : tuple[int, int] , _UpperCamelCase : bool , ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = grid.shape
UpperCAmelCase_ = [-1, 1, 0, 0]
UpperCAmelCase_ = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
UpperCAmelCase_ , UpperCAmelCase_ = [(0, source)], set()
UpperCAmelCase_ = np.full((rows, cols) , np.inf )
UpperCAmelCase_ = 0
UpperCAmelCase_ = np.empty((rows, cols) , dtype=_UpperCamelCase )
UpperCAmelCase_ = None
while queue:
((UpperCAmelCase_) , (UpperCAmelCase_)) = heappop(_UpperCamelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
UpperCAmelCase_ = []
while (x, y) != source:
path.append((x, y) )
UpperCAmelCase_ , UpperCAmelCase_ = predecessors[x, y]
path.append(_UpperCamelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(_UpperCamelCase ) ):
UpperCAmelCase_ , UpperCAmelCase_ = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
UpperCAmelCase_ = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(_UpperCamelCase , (dist + 1, (nx, ny)) )
UpperCAmelCase_ = dist + 1
UpperCAmelCase_ = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43
| 1
|
'''simple docstring'''
from __future__ import annotations
from typing import TypedDict
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
def __lowerCamelCase ( _UpperCamelCase : str ):
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError('''The parameter s type must be str.''' )
return [s[i:] + s[:i] for i in range(len(_UpperCamelCase ) )]
def __lowerCamelCase ( _UpperCamelCase : str ):
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError('''The parameter s type must be str.''' )
if not s:
raise ValueError('''The parameter s must not be empty.''' )
UpperCAmelCase_ = all_rotations(_UpperCamelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
UpperCAmelCase_ = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(_UpperCamelCase ),
}
return response
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : int ):
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError('''The parameter bwt_string type must be str.''' )
if not bwt_string:
raise ValueError('''The parameter bwt_string must not be empty.''' )
try:
UpperCAmelCase_ = int(_UpperCamelCase )
except ValueError:
raise TypeError(
'''The parameter idx_original_string type must be int or passive'''
''' of cast to int.''' )
if idx_original_string < 0:
raise ValueError('''The parameter idx_original_string must not be lower than 0.''' )
if idx_original_string >= len(_UpperCamelCase ):
raise ValueError(
'''The parameter idx_original_string must be lower than''' ''' len(bwt_string).''' )
UpperCAmelCase_ = [''''''] * len(_UpperCamelCase )
for _ in range(len(_UpperCamelCase ) ):
for i in range(len(_UpperCamelCase ) ):
UpperCAmelCase_ = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
lowercase__ : Optional[int] = "Provide a string that I will generate its BWT transform: "
lowercase__ : List[Any] = input(entry_msg).strip()
lowercase__ : Any = bwt_transform(s)
print(
F'''Burrows Wheeler transform for string \'{s}\' results '''
F'''in \'{result['bwt_string']}\''''
)
lowercase__ : Dict = reverse_bwt(result["bwt_string"], result["idx_original_string"])
print(
F'''Reversing Burrows Wheeler transform for entry \'{result['bwt_string']}\' '''
F'''we get original string \'{original_string}\''''
)
| 43
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCamelCase ( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = XLMTokenizer
lowerCAmelCase__ = False
def lowerCAmelCase__ ( self : int ) ->Union[str, Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase_ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
UpperCAmelCase_ = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
UpperCAmelCase_ = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(UpperCAmelCase__ ) )
def lowerCAmelCase__ ( self : Optional[int] , UpperCAmelCase__ : Any ) ->List[Any]:
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = '''lower newer'''
return input_text, output_text
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Tuple:
UpperCAmelCase_ = XLMTokenizer(self.vocab_file , self.merges_file )
UpperCAmelCase_ = '''lower'''
UpperCAmelCase_ = ['''low''', '''er</w>''']
UpperCAmelCase_ = tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = tokens + ['''<unk>''']
UpperCAmelCase_ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ )
@slow
def lowerCAmelCase__ ( self : Any ) ->str:
UpperCAmelCase_ = XLMTokenizer.from_pretrained('''xlm-mlm-en-2048''' )
UpperCAmelCase_ = tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCAmelCase__ )
UpperCAmelCase_ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCAmelCase__ )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 43
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : Optional[int] = logging.get_logger(__name__)
lowercase__ : Any = {
"tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json",
"tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json",
}
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = '''falcon'''
lowerCAmelCase__ = ['''past_key_values''']
def __init__( self : Optional[Any] , UpperCAmelCase__ : Optional[int]=6_5024 , UpperCAmelCase__ : List[str]=4544 , UpperCAmelCase__ : Dict=32 , UpperCAmelCase__ : Optional[int]=71 , UpperCAmelCase__ : Dict=1e-5 , UpperCAmelCase__ : Union[str, Any]=0.02 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : int=False , UpperCAmelCase__ : List[str]=False , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : List[str]=False , UpperCAmelCase__ : List[Any]=11 , UpperCAmelCase__ : int=11 , **UpperCAmelCase__ : Dict , ) ->Dict:
UpperCAmelCase_ = vocab_size
# Backward compatibility with n_embed kwarg
UpperCAmelCase_ = kwargs.pop('''n_embed''' , UpperCAmelCase__ )
UpperCAmelCase_ = hidden_size if n_embed is None else n_embed
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = layer_norm_epsilon
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = hidden_dropout
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = bos_token_id
UpperCAmelCase_ = eos_token_id
UpperCAmelCase_ = num_attention_heads if num_kv_heads is None else num_kv_heads
UpperCAmelCase_ = alibi
UpperCAmelCase_ = new_decoder_architecture
UpperCAmelCase_ = multi_query # Ignored when new_decoder_architecture is True
UpperCAmelCase_ = parallel_attn
UpperCAmelCase_ = bias
super().__init__(bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
@property
def lowerCAmelCase__ ( self : Dict ) ->Dict:
return self.hidden_size // self.num_attention_heads
@property
def lowerCAmelCase__ ( self : Optional[Any] ) ->Tuple:
return not self.alibi
| 43
|
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def __lowerCamelCase ( _UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
for param in module.parameters():
UpperCAmelCase_ = False
def __lowerCamelCase ( ):
'''simple docstring'''
UpperCAmelCase_ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
UpperCAmelCase_ = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def __lowerCamelCase ( _UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ = plt.imshow(_UpperCamelCase )
fig.axes.get_xaxis().set_visible(_UpperCamelCase )
fig.axes.get_yaxis().set_visible(_UpperCamelCase )
plt.show()
def __lowerCamelCase ( ):
'''simple docstring'''
UpperCAmelCase_ = datetime.now()
UpperCAmelCase_ = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 43
| 1
|
'''simple docstring'''
from collections.abc import Sequence
def __lowerCamelCase ( _UpperCamelCase : Sequence[int] | None = None ):
'''simple docstring'''
if nums is None or not nums:
raise ValueError('''Input sequence should not be empty''' )
UpperCAmelCase_ = nums[0]
for i in range(1 , len(_UpperCamelCase ) ):
UpperCAmelCase_ = nums[i]
UpperCAmelCase_ = max(_UpperCamelCase , ans + num , _UpperCamelCase )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
lowercase__ : str = int(input("Enter number of elements : ").strip())
lowercase__ : int = list(map(int, input("\nEnter the numbers : ").strip().split()))[:n]
print(max_subsequence_sum(array))
| 43
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Any ) ->Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self : str ) ->List[str]:
UpperCAmelCase_ = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
UpperCAmelCase_ = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('''sample_euler''' )
UpperCAmelCase_ = '''A painting of a squirrel eating a burger'''
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase__ ( self : List[str] ) ->int:
UpperCAmelCase_ = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
UpperCAmelCase_ = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('''sample_euler''' )
UpperCAmelCase_ = '''A painting of a squirrel eating a burger'''
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[int]:
UpperCAmelCase_ = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
UpperCAmelCase_ = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('''sample_dpmpp_2m''' )
UpperCAmelCase_ = '''A painting of a squirrel eating a burger'''
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = sd_pipe(
[prompt] , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=15 , output_type='''np''' , use_karras_sigmas=UpperCAmelCase__ , )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array(
[0.1138_1689, 0.1211_2921, 0.138_9457, 0.1254_9606, 0.124_4964, 0.1083_1517, 0.1156_2866, 0.1086_7816, 0.1049_9048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 43
| 1
|
'''simple docstring'''
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : int , _UpperCamelCase : List[str] , _UpperCamelCase : Tuple ):
'''simple docstring'''
UpperCAmelCase_ = TapasConfig.from_json_file(_UpperCamelCase )
# set absolute/relative position embeddings parameter
UpperCAmelCase_ = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
UpperCAmelCase_ = TapasForQuestionAnswering(config=_UpperCamelCase )
elif task == "WTQ":
# run_task_main.py hparams
UpperCAmelCase_ = 4
UpperCAmelCase_ = True
# hparam_utils.py hparams
UpperCAmelCase_ = 0.664_694
UpperCAmelCase_ = 0.207_951
UpperCAmelCase_ = 0.121_194
UpperCAmelCase_ = True
UpperCAmelCase_ = True
UpperCAmelCase_ = False
UpperCAmelCase_ = 0.0_352_513
UpperCAmelCase_ = TapasForQuestionAnswering(config=_UpperCamelCase )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
UpperCAmelCase_ = 4
UpperCAmelCase_ = False
# hparam_utils.py hparams
UpperCAmelCase_ = 36.4_519
UpperCAmelCase_ = 0.903_421
UpperCAmelCase_ = 222.088
UpperCAmelCase_ = True
UpperCAmelCase_ = True
UpperCAmelCase_ = True
UpperCAmelCase_ = 0.763_141
UpperCAmelCase_ = TapasForQuestionAnswering(config=_UpperCamelCase )
elif task == "TABFACT":
UpperCAmelCase_ = TapasForSequenceClassification(config=_UpperCamelCase )
elif task == "MLM":
UpperCAmelCase_ = TapasForMaskedLM(config=_UpperCamelCase )
elif task == "INTERMEDIATE_PRETRAINING":
UpperCAmelCase_ = TapasModel(config=_UpperCamelCase )
else:
raise ValueError(F"""Task {task} not supported.""" )
print(F"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save pytorch-model (weights and configuration)
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(_UpperCamelCase )
# Save tokenizer files
print(F"""Save tokenizer files to {pytorch_dump_path}""" )
UpperCAmelCase_ = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + '''vocab.txt''' , model_max_length=512 )
tokenizer.save_pretrained(_UpperCamelCase )
print('''Used relative position embeddings:''' , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
lowercase__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task", default="SQA", type=str, help="Model task for which to convert a checkpoint. Defaults to SQA."
)
parser.add_argument(
"--reset_position_index_per_cell",
default=False,
action="store_true",
help="Whether to use relative position embeddings or not. Defaults to True.",
)
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--tapas_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained TAPAS model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowercase__ : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 43
|
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class lowerCamelCase ( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = MvpTokenizer
lowerCAmelCase__ = MvpTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = filter_roberta_detectors
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Tuple:
super().setUp()
UpperCAmelCase_ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
UpperCAmelCase_ = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
UpperCAmelCase_ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
UpperCAmelCase_ = {'''unk_token''': '''<unk>'''}
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCAmelCase__ ) )
def lowerCAmelCase__ ( self : Tuple , **UpperCAmelCase__ : List[str] ) ->Dict:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Optional[int] , **UpperCAmelCase__ : int ) ->Tuple:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any] ) ->Union[str, Any]:
return "lower newer", "lower newer"
@cached_property
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[int]:
return MvpTokenizer.from_pretrained('''RUCAIBox/mvp''' )
@cached_property
def lowerCAmelCase__ ( self : Tuple ) ->Tuple:
return MvpTokenizerFast.from_pretrained('''RUCAIBox/mvp''' )
@require_torch
def lowerCAmelCase__ ( self : Any ) ->Dict:
UpperCAmelCase_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
UpperCAmelCase_ = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase_ = tokenizer(UpperCAmelCase__ , max_length=len(UpperCAmelCase__ ) , padding=UpperCAmelCase__ , return_tensors='''pt''' )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
UpperCAmelCase_ = batch.input_ids.tolist()[0]
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
# Test that special tokens are reset
@require_torch
def lowerCAmelCase__ ( self : str ) ->int:
UpperCAmelCase_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase_ = tokenizer(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors='''pt''' )
# check if input_ids are returned and no labels
self.assertIn('''input_ids''' , UpperCAmelCase__ )
self.assertIn('''attention_mask''' , UpperCAmelCase__ )
self.assertNotIn('''labels''' , UpperCAmelCase__ )
self.assertNotIn('''decoder_attention_mask''' , UpperCAmelCase__ )
@require_torch
def lowerCAmelCase__ ( self : Tuple ) ->Optional[Any]:
UpperCAmelCase_ = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase_ = tokenizer(text_target=UpperCAmelCase__ , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def lowerCAmelCase__ ( self : List[str] ) ->int:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase_ = tokenizer(
['''I am a small frog''' * 1024, '''I am a small frog'''] , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , return_tensors='''pt''' )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual(batch.input_ids.shape , (2, 1024) )
@require_torch
def lowerCAmelCase__ ( self : Dict ) ->Optional[int]:
UpperCAmelCase_ = ['''A long paragraph for summarization.''']
UpperCAmelCase_ = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase_ = tokenizer(UpperCAmelCase__ , text_target=UpperCAmelCase__ , return_tensors='''pt''' )
UpperCAmelCase_ = inputs['''input_ids''']
UpperCAmelCase_ = inputs['''labels''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def lowerCAmelCase__ ( self : str ) ->Optional[Any]:
pass
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
UpperCAmelCase_ = self.tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
UpperCAmelCase_ = '''A, <mask> AllenNLP sentence.'''
UpperCAmelCase_ = tokenizer_r.encode_plus(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ )
UpperCAmelCase_ = tokenizer_p.encode_plus(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
UpperCAmelCase_ = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
UpperCAmelCase_ = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
UpperCAmelCase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
UpperCAmelCase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 43
| 1
|
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def __lowerCamelCase ( _UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
for param in module.parameters():
UpperCAmelCase_ = False
def __lowerCamelCase ( ):
'''simple docstring'''
UpperCAmelCase_ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
UpperCAmelCase_ = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def __lowerCamelCase ( _UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ = plt.imshow(_UpperCamelCase )
fig.axes.get_xaxis().set_visible(_UpperCamelCase )
fig.axes.get_yaxis().set_visible(_UpperCamelCase )
plt.show()
def __lowerCamelCase ( ):
'''simple docstring'''
UpperCAmelCase_ = datetime.now()
UpperCAmelCase_ = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 43
|
'''simple docstring'''
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : list[str] ):
'''simple docstring'''
UpperCAmelCase_ = ''''''
for word_or_phrase in separated:
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise Exception('''join() accepts only strings to be joined''' )
joined += word_or_phrase + separator
return joined.strip(_UpperCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 43
| 1
|
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ : int = get_tests_dir("fixtures/test_sentencepiece.model")
lowercase__ : Optional[Any] = {"target_lang": "fi", "source_lang": "en"}
lowercase__ : Any = ">>zh<<"
lowercase__ : Optional[int] = "Helsinki-NLP/"
if is_torch_available():
lowercase__ : Optional[Any] = "pt"
elif is_tf_available():
lowercase__ : Tuple = "tf"
else:
lowercase__ : int = "jax"
@require_sentencepiece
class lowerCamelCase ( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = MarianTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = True
def lowerCAmelCase__ ( self : List[Any] ) ->int:
super().setUp()
UpperCAmelCase_ = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
UpperCAmelCase_ = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
UpperCAmelCase_ = Path(self.tmpdirname )
save_json(UpperCAmelCase__ , save_dir / VOCAB_FILES_NAMES['''vocab'''] )
save_json(UpperCAmelCase__ , save_dir / VOCAB_FILES_NAMES['''tokenizer_config_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(UpperCAmelCase__ , save_dir / VOCAB_FILES_NAMES['''source_spm'''] )
copyfile(UpperCAmelCase__ , save_dir / VOCAB_FILES_NAMES['''target_spm'''] )
UpperCAmelCase_ = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self : str , **UpperCAmelCase__ : int ) ->MarianTokenizer:
return MarianTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Tuple , UpperCAmelCase__ : List[str] ) ->int:
return (
"This is a test",
"This is a test",
)
def lowerCAmelCase__ ( self : Any ) ->int:
UpperCAmelCase_ = '''</s>'''
UpperCAmelCase_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Any ) ->Union[str, Any]:
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''</s>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''<pad>''' )
self.assertEqual(len(UpperCAmelCase__ ) , 9 )
def lowerCAmelCase__ ( self : Optional[int] ) ->List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def lowerCAmelCase__ ( self : str ) ->str:
UpperCAmelCase_ = MarianTokenizer.from_pretrained(f"""{ORG_NAME}opus-mt-en-de""" )
UpperCAmelCase_ = en_de_tokenizer(['''I am a small frog'''] , return_tensors=UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = [38, 121, 14, 697, 3_8848, 0]
self.assertListEqual(UpperCAmelCase__ , batch.input_ids[0] )
UpperCAmelCase_ = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(UpperCAmelCase__ )
UpperCAmelCase_ = [x.name for x in Path(UpperCAmelCase__ ).glob('''*''' )]
self.assertIn('''source.spm''' , UpperCAmelCase__ )
MarianTokenizer.from_pretrained(UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->List[str]:
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = tok(
['''I am a small frog''' * 1000, '''I am a small frog'''] , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def lowerCAmelCase__ ( self : Any ) ->Optional[Any]:
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = tok(['''I am a tiny frog''', '''I am a small frog'''] , padding=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def lowerCAmelCase__ ( self : int ) ->Any:
# fmt: off
UpperCAmelCase_ = {'''input_ids''': [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name='''Helsinki-NLP/opus-mt-en-de''' , revision='''1a8c2263da11e68e50938f97e10cd57820bd504c''' , decode_kwargs={'''use_source_tokenizer''': True} , )
def lowerCAmelCase__ ( self : int ) ->Any:
UpperCAmelCase_ = MarianTokenizer.from_pretrained('''hf-internal-testing/test-marian-two-vocabs''' )
UpperCAmelCase_ = '''Tämä on testi'''
UpperCAmelCase_ = '''This is a test'''
UpperCAmelCase_ = [76, 7, 2047, 2]
UpperCAmelCase_ = [69, 12, 11, 940, 2]
UpperCAmelCase_ = tokenizer(UpperCAmelCase__ ).input_ids
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = tokenizer(text_target=UpperCAmelCase__ ).input_ids
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = tokenizer.decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
| 43
|
'''simple docstring'''
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowercase__ : Optional[int] = logging.get_logger(__name__)
def __lowerCamelCase ( _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[int, Iterable[int]] , _UpperCamelCase : bool , _UpperCamelCase : int ):
'''simple docstring'''
def constraint_to_multiple_of(_UpperCamelCase : int , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[int]=0 , _UpperCamelCase : str=None ):
UpperCAmelCase_ = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
UpperCAmelCase_ = math.floor(val / multiple ) * multiple
if x < min_val:
UpperCAmelCase_ = math.ceil(val / multiple ) * multiple
return x
UpperCAmelCase_ = (output_size, output_size) if isinstance(_UpperCamelCase , _UpperCamelCase ) else output_size
UpperCAmelCase_ , UpperCAmelCase_ = get_image_size(_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ = output_size
# determine new height and width
UpperCAmelCase_ = output_height / input_height
UpperCAmelCase_ = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
UpperCAmelCase_ = scale_width
else:
# fit height
UpperCAmelCase_ = scale_height
UpperCAmelCase_ = constraint_to_multiple_of(scale_height * input_height , multiple=_UpperCamelCase )
UpperCAmelCase_ = constraint_to_multiple_of(scale_width * input_width , multiple=_UpperCamelCase )
return (new_height, new_width)
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ['''pixel_values''']
def __init__( self : Any , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[int, float] = 1 / 255 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , **UpperCAmelCase__ : str , ) ->None:
super().__init__(**UpperCAmelCase__ )
UpperCAmelCase_ = size if size is not None else {'''height''': 384, '''width''': 384}
UpperCAmelCase_ = get_size_dict(UpperCAmelCase__ )
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = keep_aspect_ratio
UpperCAmelCase_ = ensure_multiple_of
UpperCAmelCase_ = resample
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase__ ( self : Union[str, Any] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : List[str] , ) ->np.ndarray:
UpperCAmelCase_ = get_size_dict(UpperCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
UpperCAmelCase_ = get_resize_output_image_size(
UpperCAmelCase__ , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=UpperCAmelCase__ , multiple=UpperCAmelCase__ , )
return resize(UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase__ ( self : int , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[int, float] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Optional[Any] , ) ->Any:
return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase__ ( self : List[Any] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[float, List[float]] , UpperCAmelCase__ : Union[float, List[float]] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Optional[int] , ) ->np.ndarray:
return normalize(UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase__ ( self : str , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : int = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : int = None , UpperCAmelCase__ : PILImageResampling = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : float = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase__ : Any , ) ->PIL.Image.Image:
UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ = size if size is not None else self.size
UpperCAmelCase_ = get_size_dict(UpperCAmelCase__ )
UpperCAmelCase_ = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
UpperCAmelCase_ = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
UpperCAmelCase_ = resample if resample is not None else self.resample
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ = image_std if image_std is not None else self.image_std
UpperCAmelCase_ = make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
UpperCAmelCase_ = [to_numpy_array(UpperCAmelCase__ ) for image in images]
if do_resize:
UpperCAmelCase_ = [self.resize(image=UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ ) for image in images]
if do_rescale:
UpperCAmelCase_ = [self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ ) for image in images]
if do_normalize:
UpperCAmelCase_ = [self.normalize(image=UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ ) for image in images]
UpperCAmelCase_ = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
UpperCAmelCase_ = {'''pixel_values''': images}
return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
def lowerCAmelCase__ ( self : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Tuple] = None ) ->Optional[Any]:
UpperCAmelCase_ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(UpperCAmelCase__ ):
UpperCAmelCase_ = target_sizes.numpy()
UpperCAmelCase_ = []
for idx in range(len(UpperCAmelCase__ ) ):
UpperCAmelCase_ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=UpperCAmelCase__ )
UpperCAmelCase_ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCAmelCase__ )
else:
UpperCAmelCase_ = logits.argmax(dim=1 )
UpperCAmelCase_ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 43
| 1
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : List[Any] ) ->Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCAmelCase__ ( self : Any ) ->str:
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return model
@property
def lowerCAmelCase__ ( self : str ) ->Union[str, Any]:
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , cross_attention_dim=10 , )
return model
@property
def lowerCAmelCase__ ( self : List[Any] ) ->Any:
torch.manual_seed(0 )
UpperCAmelCase_ = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , )
UpperCAmelCase_ = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return vqvae, unet
@slow
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[Any]:
UpperCAmelCase_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
UpperCAmelCase_ = DDPMScheduler()
UpperCAmelCase_ = AudioDiffusionPipeline(vqvae=UpperCAmelCase__ , unet=self.dummy_unet , mel=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
UpperCAmelCase_ = pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
UpperCAmelCase_ = torch.Generator(device=UpperCAmelCase__ ).manual_seed(42 )
UpperCAmelCase_ = pipe(generator=UpperCAmelCase__ , steps=4 )
UpperCAmelCase_ = output.audios[0]
UpperCAmelCase_ = output.images[0]
UpperCAmelCase_ = torch.Generator(device=UpperCAmelCase__ ).manual_seed(42 )
UpperCAmelCase_ = pipe(generator=UpperCAmelCase__ , steps=4 , return_dict=UpperCAmelCase__ )
UpperCAmelCase_ = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
UpperCAmelCase_ = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
UpperCAmelCase_ = np.frombuffer(image_from_tuple.tobytes() , dtype='''uint8''' )[:10]
UpperCAmelCase_ = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
UpperCAmelCase_ = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
UpperCAmelCase_ = DDIMScheduler()
UpperCAmelCase_ = self.dummy_vqvae_and_unet
UpperCAmelCase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
UpperCAmelCase_ = pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
np.random.seed(0 )
UpperCAmelCase_ = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
UpperCAmelCase_ = torch.Generator(device=UpperCAmelCase__ ).manual_seed(42 )
UpperCAmelCase_ = pipe(raw_audio=UpperCAmelCase__ , generator=UpperCAmelCase__ , start_step=5 , steps=10 )
UpperCAmelCase_ = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
UpperCAmelCase_ = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
UpperCAmelCase_ = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
UpperCAmelCase_ = self.dummy_unet_condition
UpperCAmelCase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=UpperCAmelCase__ , mel=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
UpperCAmelCase_ = pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
np.random.seed(0 )
UpperCAmelCase_ = torch.rand((1, 1, 10) )
UpperCAmelCase_ = pipe(generator=UpperCAmelCase__ , encoding=UpperCAmelCase__ )
UpperCAmelCase_ = output.images[0]
UpperCAmelCase_ = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
UpperCAmelCase_ = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : List[str] ) ->Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
UpperCAmelCase_ = torch_device
UpperCAmelCase_ = DiffusionPipeline.from_pretrained('''teticio/audio-diffusion-ddim-256''' )
UpperCAmelCase_ = pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
UpperCAmelCase_ = torch.Generator(device=UpperCAmelCase__ ).manual_seed(42 )
UpperCAmelCase_ = pipe(generator=UpperCAmelCase__ )
UpperCAmelCase_ = output.audios[0]
UpperCAmelCase_ = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
UpperCAmelCase_ = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
UpperCAmelCase_ = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 43
|
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : int = logging.get_logger(__name__)
lowercase__ : List[str] = {
"huggingface/time-series-transformer-tourism-monthly": (
"https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = '''time_series_transformer'''
lowerCAmelCase__ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self : Optional[int] , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : str = "student_t" , UpperCAmelCase__ : str = "nll" , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : List[int] = [1, 2, 3, 4, 5, 6, 7] , UpperCAmelCase__ : Optional[Union[str, bool]] = "mean" , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : int = 32 , UpperCAmelCase__ : int = 32 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : str = "gelu" , UpperCAmelCase__ : int = 64 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : int = 100 , UpperCAmelCase__ : float = 0.02 , UpperCAmelCase__ : Union[str, Any]=True , **UpperCAmelCase__ : Tuple , ) ->Optional[int]:
# time series specific configuration
UpperCAmelCase_ = prediction_length
UpperCAmelCase_ = context_length or prediction_length
UpperCAmelCase_ = distribution_output
UpperCAmelCase_ = loss
UpperCAmelCase_ = input_size
UpperCAmelCase_ = num_time_features
UpperCAmelCase_ = lags_sequence
UpperCAmelCase_ = scaling
UpperCAmelCase_ = num_dynamic_real_features
UpperCAmelCase_ = num_static_real_features
UpperCAmelCase_ = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(UpperCAmelCase__ ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
UpperCAmelCase_ = cardinality
else:
UpperCAmelCase_ = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(UpperCAmelCase__ ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
UpperCAmelCase_ = embedding_dimension
else:
UpperCAmelCase_ = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
UpperCAmelCase_ = num_parallel_samples
# Transformer architecture configuration
UpperCAmelCase_ = input_size * len(UpperCAmelCase__ ) + self._number_of_features
UpperCAmelCase_ = d_model
UpperCAmelCase_ = encoder_attention_heads
UpperCAmelCase_ = decoder_attention_heads
UpperCAmelCase_ = encoder_ffn_dim
UpperCAmelCase_ = decoder_ffn_dim
UpperCAmelCase_ = encoder_layers
UpperCAmelCase_ = decoder_layers
UpperCAmelCase_ = dropout
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = activation_dropout
UpperCAmelCase_ = encoder_layerdrop
UpperCAmelCase_ = decoder_layerdrop
UpperCAmelCase_ = activation_function
UpperCAmelCase_ = init_std
UpperCAmelCase_ = use_cache
super().__init__(is_encoder_decoder=UpperCAmelCase__ , **UpperCAmelCase__ )
@property
def lowerCAmelCase__ ( self : List[str] ) ->int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 43
| 1
|
'''simple docstring'''
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
lowercase__ : Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(
lowerCamelCase , r'''
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
''' , )
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Dict , UpperCAmelCase__ : GenericTensor ) ->np.ndarray:
if self.framework == "tf":
UpperCAmelCase_ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
UpperCAmelCase_ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=UpperCAmelCase__ )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def lowerCAmelCase__ ( self : Any , UpperCAmelCase__ : GenericTensor ) ->np.ndarray:
UpperCAmelCase_ = self.get_masked_index(UpperCAmelCase__ )
UpperCAmelCase_ = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , f"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def lowerCAmelCase__ ( self : Tuple , UpperCAmelCase__ : GenericTensor ) ->Optional[Any]:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(UpperCAmelCase__ )
def lowerCAmelCase__ ( self : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int]=None , **UpperCAmelCase__ : int ) ->Dict[str, GenericTensor]:
if return_tensors is None:
UpperCAmelCase_ = self.framework
UpperCAmelCase_ = self.tokenizer(UpperCAmelCase__ , return_tensors=UpperCAmelCase__ )
self.ensure_exactly_one_mask_token(UpperCAmelCase__ )
return model_inputs
def lowerCAmelCase__ ( self : int , UpperCAmelCase__ : Optional[Any] ) ->Any:
UpperCAmelCase_ = self.model(**UpperCAmelCase__ )
UpperCAmelCase_ = model_inputs['''input_ids''']
return model_outputs
def lowerCAmelCase__ ( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any]=5 , UpperCAmelCase__ : Union[str, Any]=None ) ->List[str]:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
UpperCAmelCase_ = target_ids.shape[0]
UpperCAmelCase_ = model_outputs['''input_ids'''][0]
UpperCAmelCase_ = model_outputs['''logits''']
if self.framework == "tf":
UpperCAmelCase_ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
UpperCAmelCase_ = outputs.numpy()
UpperCAmelCase_ = outputs[0, masked_index, :]
UpperCAmelCase_ = stable_softmax(UpperCAmelCase__ , axis=-1 )
if target_ids is not None:
UpperCAmelCase_ = tf.gather_nd(tf.squeeze(UpperCAmelCase__ , 0 ) , target_ids.reshape(-1 , 1 ) )
UpperCAmelCase_ = tf.expand_dims(UpperCAmelCase__ , 0 )
UpperCAmelCase_ = tf.math.top_k(UpperCAmelCase__ , k=UpperCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = topk.values.numpy(), topk.indices.numpy()
else:
UpperCAmelCase_ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=UpperCAmelCase__ ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
UpperCAmelCase_ = outputs[0, masked_index, :]
UpperCAmelCase_ = logits.softmax(dim=-1 )
if target_ids is not None:
UpperCAmelCase_ = probs[..., target_ids]
UpperCAmelCase_ , UpperCAmelCase_ = probs.topk(UpperCAmelCase__ )
UpperCAmelCase_ = []
UpperCAmelCase_ = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
UpperCAmelCase_ = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
UpperCAmelCase_ = input_ids.numpy().copy()
if target_ids is not None:
UpperCAmelCase_ = target_ids[p].tolist()
UpperCAmelCase_ = p
# Filter padding out:
UpperCAmelCase_ = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
UpperCAmelCase_ = self.tokenizer.decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )
UpperCAmelCase_ = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence}
row.append(UpperCAmelCase__ )
result.append(UpperCAmelCase__ )
if single_mask:
return result[0]
return result
def lowerCAmelCase__ ( self : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any]=None ) ->Union[str, Any]:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
UpperCAmelCase_ = [targets]
try:
UpperCAmelCase_ = self.tokenizer.get_vocab()
except Exception:
UpperCAmelCase_ = {}
UpperCAmelCase_ = []
for target in targets:
UpperCAmelCase_ = vocab.get(UpperCAmelCase__ , UpperCAmelCase__ )
if id_ is None:
UpperCAmelCase_ = self.tokenizer(
UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , max_length=1 , truncation=UpperCAmelCase__ , )['''input_ids''']
if len(UpperCAmelCase__ ) == 0:
logger.warning(
f"""The specified target token `{target}` does not exist in the model vocabulary. """
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
UpperCAmelCase_ = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f"""The specified target token `{target}` does not exist in the model vocabulary. """
f"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
UpperCAmelCase_ = list(set(UpperCAmelCase__ ) )
if len(UpperCAmelCase__ ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
UpperCAmelCase_ = np.array(UpperCAmelCase__ )
return target_ids
def lowerCAmelCase__ ( self : Dict , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Optional[Any]=None ) ->Dict:
UpperCAmelCase_ = {}
if targets is not None:
UpperCAmelCase_ = self.get_target_ids(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = target_ids
if top_k is not None:
UpperCAmelCase_ = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__( self : List[str] , UpperCAmelCase__ : Union[str, Any] , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Tuple ) ->str:
UpperCAmelCase_ = super().__call__(UpperCAmelCase__ , **UpperCAmelCase__ )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and len(UpperCAmelCase__ ) == 1:
return outputs[0]
return outputs
| 43
|
'''simple docstring'''
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
lowercase__ : Dict = logging.get_logger(__name__)
lowercase__ : List[Any] = "T5Config"
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = '''mt5'''
lowerCAmelCase__ = MTaConfig
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = '''mt5'''
lowerCAmelCase__ = MTaConfig
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = '''mt5'''
lowerCAmelCase__ = MTaConfig
| 43
| 1
|
'''simple docstring'''
import os
def __lowerCamelCase ( ):
'''simple docstring'''
with open(os.path.dirname(_UpperCamelCase ) + '''/grid.txt''' ) as f:
UpperCAmelCase_ = [] # noqa: E741
for _ in range(20 ):
l.append([int(_UpperCamelCase ) for x in f.readline().split()] )
UpperCAmelCase_ = 0
# right
for i in range(20 ):
for j in range(17 ):
UpperCAmelCase_ = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
UpperCAmelCase_ = temp
# down
for i in range(17 ):
for j in range(20 ):
UpperCAmelCase_ = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
UpperCAmelCase_ = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
UpperCAmelCase_ = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
UpperCAmelCase_ = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
UpperCAmelCase_ = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
UpperCAmelCase_ = temp
return maximum
if __name__ == "__main__":
print(solution())
| 43
|
'''simple docstring'''
import comet # From: unbabel-comet
import torch
import datasets
lowercase__ : str = datasets.logging.get_logger(__name__)
lowercase__ : Dict = "\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel's Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = \"{COMET}: A Neural Framework for {MT} Evaluation\",\n author = \"Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon\",\n booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",\n pages = \"2685--2702\",\n}\n"
lowercase__ : str = "\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n"
lowercase__ : str = "\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric('comet')\n >>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use\n >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]\n >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]\n >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [0.19, 0.92]\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase__ ( self : List[Any] ) ->Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''sources''': datasets.Value('''string''' , id='''sequence''' ),
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[
'''https://github.com/Unbabel/COMET''',
'''https://www.aclweb.org/anthology/2020.emnlp-main.213/''',
'''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''',
] , )
def lowerCAmelCase__ ( self : int , UpperCAmelCase__ : Union[str, Any] ) ->Any:
if self.config_name == "default":
UpperCAmelCase_ = comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da''' ) )
else:
UpperCAmelCase_ = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def lowerCAmelCase__ ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : int=False ) ->Optional[Any]:
if gpus is None:
UpperCAmelCase_ = 1 if torch.cuda.is_available() else 0
UpperCAmelCase_ = {'''src''': sources, '''mt''': predictions, '''ref''': references}
UpperCAmelCase_ = [dict(zip(UpperCAmelCase__ , UpperCAmelCase__ ) ) for t in zip(*data.values() )]
UpperCAmelCase_ , UpperCAmelCase_ = self.scorer.predict(UpperCAmelCase__ , gpus=UpperCAmelCase__ , progress_bar=UpperCAmelCase__ )
return {"mean_score": mean_score, "scores": scores}
| 43
| 1
|
'''simple docstring'''
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
lowercase__ : List[str] = {
"facebook/maskformer-swin-base-ade": (
"https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
lowercase__ : Optional[int] = logging.get_logger(__name__)
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = '''maskformer'''
lowerCAmelCase__ = {'''hidden_size''': '''mask_feature_size'''}
lowerCAmelCase__ = ['''resnet''', '''swin''']
lowerCAmelCase__ = ['''detr''']
def __init__( self : List[str] , UpperCAmelCase__ : int = 256 , UpperCAmelCase__ : int = 256 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional[Dict] = None , UpperCAmelCase__ : Optional[Dict] = None , UpperCAmelCase__ : float = 0.02 , UpperCAmelCase__ : float = 1.0 , UpperCAmelCase__ : float = 1.0 , UpperCAmelCase__ : float = 1.0 , UpperCAmelCase__ : float = 20.0 , UpperCAmelCase__ : Optional[bool] = None , **UpperCAmelCase__ : Optional[Any] , ) ->str:
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
UpperCAmelCase_ = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
UpperCAmelCase_ = backbone_config.pop('''model_type''' )
UpperCAmelCase_ = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase_ = config_class.from_dict(UpperCAmelCase__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
f"""Supported model types: {",".join(self.backbones_supported )}""" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
UpperCAmelCase_ = DetrConfig()
else:
# verify that the decoder is supported
UpperCAmelCase_ = (
decoder_config.pop('''model_type''' ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f"""Transformer Decoder {decoder_type} not supported, please use one of"""
f""" {",".join(self.decoders_supported )}""" )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
UpperCAmelCase_ = CONFIG_MAPPING[decoder_type]
UpperCAmelCase_ = config_class.from_dict(UpperCAmelCase__ )
UpperCAmelCase_ = backbone_config
UpperCAmelCase_ = decoder_config
# main feature dimension for the model
UpperCAmelCase_ = fpn_feature_size
UpperCAmelCase_ = mask_feature_size
# initializer
UpperCAmelCase_ = init_std
UpperCAmelCase_ = init_xavier_std
# Hungarian matcher && loss
UpperCAmelCase_ = cross_entropy_weight
UpperCAmelCase_ = dice_weight
UpperCAmelCase_ = mask_weight
UpperCAmelCase_ = use_auxiliary_loss
UpperCAmelCase_ = no_object_weight
UpperCAmelCase_ = output_auxiliary_logits
UpperCAmelCase_ = self.decoder_config.encoder_attention_heads
UpperCAmelCase_ = self.decoder_config.num_hidden_layers
super().__init__(**UpperCAmelCase__ )
@classmethod
def lowerCAmelCase__ ( cls : Tuple , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : PretrainedConfig , **UpperCAmelCase__ : List[Any] ) ->Dict:
return cls(
backbone_config=UpperCAmelCase__ , decoder_config=UpperCAmelCase__ , **UpperCAmelCase__ , )
def lowerCAmelCase__ ( self : Dict ) ->Dict[str, any]:
UpperCAmelCase_ = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ = self.backbone_config.to_dict()
UpperCAmelCase_ = self.decoder_config.to_dict()
UpperCAmelCase_ = self.__class__.model_type
return output
| 43
|
'''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : int = 32 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[int, float] = 1 / 255 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Union[float, List[float]]] = [0.4814_5466, 0.457_8275, 0.4082_1073] , UpperCAmelCase__ : Optional[Union[float, List[float]]] = [0.2686_2954, 0.2613_0258, 0.2757_7711] , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[str, Any]=7 , UpperCAmelCase__ : Optional[int]=30 , UpperCAmelCase__ : Union[str, Any]=400 , UpperCAmelCase__ : List[Any]=3 , ) ->Dict:
UpperCAmelCase_ = parent
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size if size is not None else {'''shortest_edge''': 288}
UpperCAmelCase_ = size_divisor
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = image_mean
UpperCAmelCase_ = image_std
UpperCAmelCase_ = do_pad
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
def lowerCAmelCase__ ( self : Tuple ) ->List[Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def lowerCAmelCase__ ( self : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict=False ) ->Any:
if not batched:
UpperCAmelCase_ = self.size['''shortest_edge''']
UpperCAmelCase_ = image_inputs[0]
if isinstance(UpperCAmelCase__ , Image.Image ):
UpperCAmelCase_ , UpperCAmelCase_ = image.size
else:
UpperCAmelCase_ , UpperCAmelCase_ = image.shape[1], image.shape[2]
UpperCAmelCase_ = size / min(UpperCAmelCase__ , UpperCAmelCase__ )
if h < w:
UpperCAmelCase_ , UpperCAmelCase_ = size, scale * w
else:
UpperCAmelCase_ , UpperCAmelCase_ = scale * h, size
UpperCAmelCase_ = int((1333 / 800) * size )
if max(UpperCAmelCase__ , UpperCAmelCase__ ) > max_size:
UpperCAmelCase_ = max_size / max(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = newh * scale
UpperCAmelCase_ = neww * scale
UpperCAmelCase_ , UpperCAmelCase_ = int(newh + 0.5 ), int(neww + 0.5 )
UpperCAmelCase_ , UpperCAmelCase_ = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
UpperCAmelCase_ = []
for image in image_inputs:
UpperCAmelCase_ , UpperCAmelCase_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase_ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : item[0] )[0]
UpperCAmelCase_ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCamelCase ( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = BridgeTowerImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self : Optional[int] ) ->str:
UpperCAmelCase_ = BridgeTowerImageProcessingTester(self )
@property
def lowerCAmelCase__ ( self : List[str] ) ->Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self : Optional[int] ) ->int:
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''image_std''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''size''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''size_divisor''' ) )
def lowerCAmelCase__ ( self : Optional[int] ) ->Union[str, Any]:
pass
def lowerCAmelCase__ ( self : List[str] ) ->Union[str, Any]:
# Initialize image processor
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self : Any ) ->Optional[int]:
# Initialize image processor
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self : int ) ->List[str]:
# Initialize image processor
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 43
| 1
|
'''simple docstring'''
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __lowerCamelCase ( _UpperCamelCase : str ):
'''simple docstring'''
UpperCAmelCase_ = {}
UpperCAmelCase_ = tokenizer(example['''content'''] , truncation=_UpperCamelCase )['''input_ids''']
UpperCAmelCase_ = len(example['''content'''] ) / len(output['''input_ids'''] )
return output
lowercase__ : int = HfArgumentParser(PretokenizationArguments)
lowercase__ : Optional[int] = parser.parse_args()
if args.num_workers is None:
lowercase__ : Optional[int] = multiprocessing.cpu_count()
lowercase__ : Union[str, Any] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
lowercase__ : Optional[int] = time.time()
lowercase__ : Optional[Any] = load_dataset(args.dataset_name, split="train")
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
lowercase__ : Tuple = time.time()
lowercase__ : Optional[Any] = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"repo_name",
"path",
"copies",
"size",
"content",
"license",
"hash",
"line_mean",
"line_max",
"alpha_frac",
"autogenerated",
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
lowercase__ : str = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 43
|
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Dict ) ->List[str]:
UpperCAmelCase_ = tf.convert_to_tensor(
[
[
8.222_0991, # 3rd highest value; idx. 0
-0.562_0044,
5.2322_9752,
4.038_6393,
-6.879_8378,
-0.5478_5802,
-3.201_2153,
2.9277_7176,
1.8817_1953,
7.3534_1276, # 5th highest value; idx. 9
8.4320_7833, # 2nd highest value; idx. 10
-9.8571_1836,
-5.9620_9236,
-1.1303_9161,
-7.111_5294,
-0.836_9633,
-5.318_6408,
7.0642_7407,
0.8136_9344,
-0.8202_3817,
-5.917_9796,
0.5881_3443,
-6.9977_8438,
4.7155_1189,
-0.1877_1637,
7.4402_0759, # 4th highest value; idx. 25
9.3845_0987, # 1st highest value; idx. 26
2.1266_2941,
-9.3256_2038,
2.3565_2522,
], # cummulative prob of 5 highest values <= 0.6
[
0.5842_5518,
4.5313_9238,
-5.5751_0464,
-6.2803_0699,
-7.1952_9503,
-4.0212_2551,
1.3933_7037,
-6.0670_7057,
1.5948_0517,
-9.64_3119,
0.0390_7799,
0.6723_1762,
-8.8820_6726,
6.2711_5922, # 4th highest value; idx. 13
2.2852_0723,
4.8276_7506,
4.3042_1368,
8.827_5313, # 2nd highest value; idx. 17
5.4402_9958, # 5th highest value; idx. 18
-4.473_5794,
7.3857_9536, # 3rd highest value; idx. 20
-2.9105_1663,
2.6194_6077,
-2.567_4762,
-9.4895_9302,
-4.0292_2645,
-1.3541_6918,
9.6770_2323, # 1st highest value; idx. 27
-5.8947_8553,
1.8537_0467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
UpperCAmelCase_ = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
UpperCAmelCase_ = tf.convert_to_tensor(
[8.22_2099, 7.353_4126, 8.43_2078, 7.440_2075, 9.3_8451, 6.27_1159, 8.82_7531, 5.440_2995, 7.385_7956, 9.67_7023] , dtype=tf.floataa , ) # expected non filtered values as noted above
UpperCAmelCase_ = tf_top_k_top_p_filtering(UpperCAmelCase__ , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
UpperCAmelCase_ = output[output != -float('''inf''' )]
UpperCAmelCase_ = tf.cast(
tf.where(tf.not_equal(UpperCAmelCase__ , tf.constant(-float('''inf''' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(UpperCAmelCase__ , UpperCAmelCase__ , rtol=1e-12 )
tf.debugging.assert_equal(UpperCAmelCase__ , UpperCAmelCase__ )
@require_tf
class lowerCamelCase ( unittest.TestCase , lowerCamelCase ):
'''simple docstring'''
if is_tf_available():
lowerCAmelCase__ = {
'''AutoModelForCausalLM''': TFAutoModelForCausalLM,
'''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq,
'''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM,
'''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq,
'''LogitsProcessorList''': TFLogitsProcessorList,
'''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor,
'''create_tensor_fn''': tf.convert_to_tensor,
'''floats_tensor''': floats_tensor,
'''return_tensors''': '''tf''',
}
@slow
def lowerCAmelCase__ ( self : Dict ) ->Optional[Any]:
# TF-only test: tf.saved_model export
UpperCAmelCase_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
UpperCAmelCase_ = 2
UpperCAmelCase_ = 2
class lowerCamelCase ( tf.Module ):
'''simple docstring'''
def __init__( self : List[str] , UpperCAmelCase__ : List[str] ) ->Dict:
super(UpperCAmelCase__ , self ).__init__()
UpperCAmelCase_ = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((None, input_length) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=UpperCAmelCase__ , )
def lowerCAmelCase__ ( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] ) ->int:
UpperCAmelCase_ = self.model.generate(
input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ , return_dict_in_generate=UpperCAmelCase__ , )
return {"sequences": outputs["sequences"]}
UpperCAmelCase_ = [[2, 0], [102, 103]]
UpperCAmelCase_ = [[1, 0], [1, 1]]
UpperCAmelCase_ = DummyModel(model=UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(UpperCAmelCase__ , UpperCAmelCase__ , signatures={'''serving_default''': dummy_model.serving} )
UpperCAmelCase_ = tf.saved_model.load(UpperCAmelCase__ ).signatures['''serving_default''']
for batch_size in range(1 , len(UpperCAmelCase__ ) + 1 ):
UpperCAmelCase_ = {
'''input_ids''': tf.constant(dummy_input_ids[:batch_size] ),
'''attention_mask''': tf.constant(dummy_attention_masks[:batch_size] ),
}
UpperCAmelCase_ = serving_func(**UpperCAmelCase__ )['''sequences''']
UpperCAmelCase_ = test_model.generate(**UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ )
tf.debugging.assert_equal(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def lowerCAmelCase__ ( self : Tuple ) ->Optional[int]:
# TF-only test: tf.saved_model export
UpperCAmelCase_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
UpperCAmelCase_ = 1
UpperCAmelCase_ = 2
class lowerCamelCase ( tf.Module ):
'''simple docstring'''
def __init__( self : int , UpperCAmelCase__ : int ) ->List[str]:
super(UpperCAmelCase__ , self ).__init__()
UpperCAmelCase_ = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((batch_size, None) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=UpperCAmelCase__ , )
def lowerCAmelCase__ ( self : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] ) ->int:
UpperCAmelCase_ = self.model.generate(
input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ , return_dict_in_generate=UpperCAmelCase__ , )
return {"sequences": outputs["sequences"]}
UpperCAmelCase_ = [[2], [102, 103]]
UpperCAmelCase_ = [[1], [1, 1]]
UpperCAmelCase_ = DummyModel(model=UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(UpperCAmelCase__ , UpperCAmelCase__ , signatures={'''serving_default''': dummy_model.serving} )
UpperCAmelCase_ = tf.saved_model.load(UpperCAmelCase__ ).signatures['''serving_default''']
for input_row in range(len(UpperCAmelCase__ ) ):
UpperCAmelCase_ = {
'''input_ids''': tf.constant([dummy_input_ids[input_row]] ),
'''attention_mask''': tf.constant([dummy_attention_masks[input_row]] ),
}
UpperCAmelCase_ = serving_func(**UpperCAmelCase__ )['''sequences''']
UpperCAmelCase_ = test_model.generate(**UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ )
tf.debugging.assert_equal(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
@require_tensorflow_text
def lowerCAmelCase__ ( self : Optional[Any] ) ->int:
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='''google/flan-t5-small''' , filename='''spiece.model''' , local_dir=UpperCAmelCase__ )
class lowerCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : List[str] ) ->Any:
super().__init__()
UpperCAmelCase_ = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(UpperCAmelCase__ , '''spiece.model''' ) , '''rb''' ).read() )
UpperCAmelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
def lowerCAmelCase__ ( self : Any , UpperCAmelCase__ : Union[str, Any] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : str ) ->List[str]:
UpperCAmelCase_ = self.tokenizer.tokenize(UpperCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = text.pad_model_inputs(
UpperCAmelCase__ , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
UpperCAmelCase_ = self.model.generate(input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
return self.tokenizer.detokenize(UpperCAmelCase__ )
UpperCAmelCase_ = CompleteSentenceTransformer()
UpperCAmelCase_ = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='''inputs''' )
UpperCAmelCase_ = complete_model(UpperCAmelCase__ )
UpperCAmelCase_ = tf.keras.Model(UpperCAmelCase__ , UpperCAmelCase__ )
keras_model.save(UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Optional[int] ) ->Tuple:
# Has PT equivalent: this test relies on random sampling
UpperCAmelCase_ = {
'''do_sample''': True,
'''num_beams''': 1,
'''top_p''': 0.7,
'''top_k''': 10,
'''temperature''': 0.7,
}
UpperCAmelCase_ = 14
UpperCAmelCase_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
UpperCAmelCase_ = '''Hello, my dog is cute and'''
UpperCAmelCase_ = tokenizer(UpperCAmelCase__ , return_tensors='''tf''' )
UpperCAmelCase_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
UpperCAmelCase_ = 638
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
UpperCAmelCase_ = model.generate(**UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
UpperCAmelCase_ = [638, 198]
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
UpperCAmelCase_ = model.generate(**UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def lowerCAmelCase__ ( self : Optional[int] ) ->List[str]:
# Has PT equivalent: ample use of framework-specific code
UpperCAmelCase_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
UpperCAmelCase_ = '''Hugging Face is a technology company based in New York and Paris.'''
UpperCAmelCase_ = bart_tokenizer(UpperCAmelCase__ , return_tensors='''tf''' ).input_ids
UpperCAmelCase_ = TFBartForConditionalGeneration.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
UpperCAmelCase_ = bart_model.generate(UpperCAmelCase__ ).numpy()
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any]=None , **UpperCAmelCase__ : int ) ->List[str]:
return super().call(UpperCAmelCase__ , **UpperCAmelCase__ )
UpperCAmelCase_ = FakeBart.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
UpperCAmelCase_ = bart_model.generate(UpperCAmelCase__ , foo='''bar''' ).numpy()
self.assertTrue(np.array_equal(UpperCAmelCase__ , UpperCAmelCase__ ) )
class lowerCamelCase ( bart_model.model.encoder.__class__ ):
'''simple docstring'''
def lowerCAmelCase__ ( self : str , UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Dict ) ->Any:
return super().call(UpperCAmelCase__ , **UpperCAmelCase__ )
UpperCAmelCase_ = FakeEncoder(bart_model.config , bart_model.model.shared )
UpperCAmelCase_ = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
UpperCAmelCase_ = bart_model.generate(UpperCAmelCase__ ).numpy()
with self.assertRaises(UpperCAmelCase__ ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(UpperCAmelCase__ , foo='''bar''' )
| 43
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
class lowerCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCAmelCase__ : int ) ->None:
UpperCAmelCase_ = value
UpperCAmelCase_ = None
UpperCAmelCase_ = None
class lowerCamelCase :
'''simple docstring'''
def __init__( self : Dict , UpperCAmelCase__ : Node ) ->None:
UpperCAmelCase_ = tree
def lowerCAmelCase__ ( self : Tuple , UpperCAmelCase__ : Node | None ) ->int:
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : Optional[Any] ) ->Iterator[int]:
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43
|
'''simple docstring'''
from collections.abc import Callable
def __lowerCamelCase ( _UpperCamelCase : Callable[[float], float] , _UpperCamelCase : float , _UpperCamelCase : float ):
'''simple docstring'''
UpperCAmelCase_ = a
UpperCAmelCase_ = b
if function(_UpperCamelCase ) == 0: # one of the a or b is a root for the function
return a
elif function(_UpperCamelCase ) == 0:
return b
elif (
function(_UpperCamelCase ) * function(_UpperCamelCase ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('''could not find root in given interval.''' )
else:
UpperCAmelCase_ = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(_UpperCamelCase ) == 0:
return mid
elif function(_UpperCamelCase ) * function(_UpperCamelCase ) < 0:
UpperCAmelCase_ = mid
else:
UpperCAmelCase_ = mid
UpperCAmelCase_ = start + (end - start) / 2.0
return mid
def __lowerCamelCase ( _UpperCamelCase : float ):
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 43
| 1
|
'''simple docstring'''
import math
def __lowerCamelCase ( _UpperCamelCase : float , _UpperCamelCase : float ):
'''simple docstring'''
if initial_intensity < 0:
raise ValueError('''The value of intensity cannot be negative''' )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError('''In Malus Law, the angle is in the range 0-360 degrees''' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(_UpperCamelCase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="malus_law")
| 43
|
'''simple docstring'''
import re
def __lowerCamelCase ( _UpperCamelCase : str ):
'''simple docstring'''
return [char.split() for char in re.split(R'''[^ a-z A-Z 0-9 \s]''' , str_ )]
def __lowerCamelCase ( _UpperCamelCase : str ):
'''simple docstring'''
UpperCAmelCase_ = split_input(str_ )
return "".join(
[''''''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : bool , _UpperCamelCase : str ):
'''simple docstring'''
try:
UpperCAmelCase_ = split_input(_UpperCamelCase )
if upper:
UpperCAmelCase_ = ''''''.join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
UpperCAmelCase_ = ''''''.join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def __lowerCamelCase ( _UpperCamelCase : str ):
'''simple docstring'''
return to_simple_case(_UpperCamelCase )
def __lowerCamelCase ( _UpperCamelCase : str ):
'''simple docstring'''
try:
UpperCAmelCase_ = to_simple_case(_UpperCamelCase )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : bool ):
'''simple docstring'''
return to_complex_case(_UpperCamelCase , _UpperCamelCase , '''_''' )
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : bool ):
'''simple docstring'''
return to_complex_case(_UpperCamelCase , _UpperCamelCase , '''-''' )
if __name__ == "__main__":
__import__("doctest").testmod()
| 43
| 1
|
'''simple docstring'''
from manim import *
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Any ) ->Dict:
UpperCAmelCase_ = Rectangle(height=0.5 , width=0.5 )
UpperCAmelCase_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCAmelCase_ = [mem.copy() for i in range(6 )]
UpperCAmelCase_ = [mem.copy() for i in range(6 )]
UpperCAmelCase_ = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 )
UpperCAmelCase_ = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 )
UpperCAmelCase_ = VGroup(UpperCAmelCase__ , UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 )
UpperCAmelCase_ = Text('''CPU''' , font_size=24 )
UpperCAmelCase_ = Group(UpperCAmelCase__ , UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0.5 , aligned_edge=UpperCAmelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCAmelCase__ )
UpperCAmelCase_ = [mem.copy() for i in range(1 )]
UpperCAmelCase_ = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 )
UpperCAmelCase_ = Text('''GPU''' , font_size=24 )
UpperCAmelCase_ = Group(UpperCAmelCase__ , UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0.5 , aligned_edge=UpperCAmelCase__ )
gpu.align_to(UpperCAmelCase__ , UpperCAmelCase__ )
gpu.set_x(gpu.get_x() - 1 )
self.add(UpperCAmelCase__ )
UpperCAmelCase_ = [mem.copy() for i in range(6 )]
UpperCAmelCase_ = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 )
UpperCAmelCase_ = Text('''Model''' , font_size=24 )
UpperCAmelCase_ = Group(UpperCAmelCase__ , UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0.5 , aligned_edge=UpperCAmelCase__ )
model.move_to([3, -1.0, 0] )
self.play(
Create(UpperCAmelCase__ , run_time=1 ) , Create(UpperCAmelCase__ , run_time=1 ) , Create(UpperCAmelCase__ , run_time=1 ) , )
UpperCAmelCase_ = MarkupText(
f"""First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM.""" , font_size=24 , )
UpperCAmelCase_ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCAmelCase_ = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase__ , run_time=2.5 ) , Write(UpperCAmelCase__ ) , Write(UpperCAmelCase__ ) )
self.add(UpperCAmelCase__ )
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for i, rect in enumerate(UpperCAmelCase__ ):
UpperCAmelCase_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(UpperCAmelCase__ , opacity=0.7 )
cpu_target.move_to(UpperCAmelCase__ )
cpu_target.generate_target()
UpperCAmelCase_ = 0.46 / 4
UpperCAmelCase_ = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=UpperCAmelCase__ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=UpperCAmelCase__ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=UpperCAmelCase__ , buff=0.0 )
cpu_targs.append(UpperCAmelCase__ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(UpperCAmelCase__ ) )
second_animations.append(MoveToTarget(UpperCAmelCase__ , run_time=1.5 ) )
self.play(*UpperCAmelCase__ )
self.play(*UpperCAmelCase__ )
self.wait()
| 43
|
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowercase__ : Optional[Any] = "platform"
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def __lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Any , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : Any=None , _UpperCamelCase : int=None , _UpperCamelCase : int=None , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : Any=None , ):
'''simple docstring'''
if attention_mask is None:
UpperCAmelCase_ = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
UpperCAmelCase_ = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
UpperCAmelCase_ = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase_ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase_ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCamelCase :
'''simple docstring'''
def __init__( self : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple=13 , UpperCAmelCase__ : Tuple=7 , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Optional[int]=99 , UpperCAmelCase__ : Dict=16 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : Dict=4 , UpperCAmelCase__ : str=4 , UpperCAmelCase__ : int="gelu" , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : List[Any]=32 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : Dict=1 , UpperCAmelCase__ : Optional[int]=0 , UpperCAmelCase__ : Union[str, Any]=0.02 , ) ->Optional[int]:
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = eos_token_id
UpperCAmelCase_ = pad_token_id
UpperCAmelCase_ = bos_token_id
UpperCAmelCase_ = initializer_range
def lowerCAmelCase__ ( self : int ) ->Any:
UpperCAmelCase_ = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
UpperCAmelCase_ = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
UpperCAmelCase_ = shift_tokens_right(UpperCAmelCase__ , 1 , 2 )
UpperCAmelCase_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=UpperCAmelCase__ , )
UpperCAmelCase_ = prepare_blenderbot_inputs_dict(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return config, inputs_dict
def lowerCAmelCase__ ( self : Union[str, Any] ) ->List[str]:
UpperCAmelCase_ , UpperCAmelCase_ = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase__ ( self : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple ) ->Tuple:
UpperCAmelCase_ = 20
UpperCAmelCase_ = model_class_name(UpperCAmelCase__ )
UpperCAmelCase_ = model.encode(inputs_dict['''input_ids'''] )
UpperCAmelCase_ , UpperCAmelCase_ = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
UpperCAmelCase_ = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
UpperCAmelCase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase_ = model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , decoder_position_ids=UpperCAmelCase__ , )
UpperCAmelCase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
UpperCAmelCase_ = model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCAmelCase__ , )
UpperCAmelCase_ = model.decode(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def lowerCAmelCase__ ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any ) ->Union[str, Any]:
UpperCAmelCase_ = 20
UpperCAmelCase_ = model_class_name(UpperCAmelCase__ )
UpperCAmelCase_ = model.encode(inputs_dict['''input_ids'''] )
UpperCAmelCase_ , UpperCAmelCase_ = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
UpperCAmelCase_ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
UpperCAmelCase_ = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase_ = model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , decoder_position_ids=UpperCAmelCase__ , )
UpperCAmelCase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
UpperCAmelCase_ = model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCAmelCase__ , decoder_position_ids=UpperCAmelCase__ , )
UpperCAmelCase_ = model.decode(UpperCAmelCase__ , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ )
UpperCAmelCase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = 99
def lowerCAmelCase__ ( self : List[str] ) ->Optional[Any]:
UpperCAmelCase_ = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
UpperCAmelCase_ = input_ids.shape[0]
UpperCAmelCase_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def lowerCAmelCase__ ( self : Any ) ->str:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self._get_config_and_data()
UpperCAmelCase_ = FlaxBlenderbotForConditionalGeneration(UpperCAmelCase__ )
UpperCAmelCase_ = lm_model(input_ids=UpperCAmelCase__ )
UpperCAmelCase_ = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , UpperCAmelCase__ )
def lowerCAmelCase__ ( self : str ) ->int:
UpperCAmelCase_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
UpperCAmelCase_ = FlaxBlenderbotForConditionalGeneration(UpperCAmelCase__ )
UpperCAmelCase_ = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
UpperCAmelCase_ = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
UpperCAmelCase_ = lm_model(input_ids=UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ )
UpperCAmelCase_ = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->List[Any]:
UpperCAmelCase_ = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
UpperCAmelCase_ = shift_tokens_right(UpperCAmelCase__ , 1 , 2 )
UpperCAmelCase_ = np.equal(UpperCAmelCase__ , 1 ).astype(np.floataa ).sum()
UpperCAmelCase_ = np.equal(UpperCAmelCase__ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(UpperCAmelCase__ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowerCamelCase ( lowerCamelCase , unittest.TestCase , lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = True
lowerCAmelCase__ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowerCAmelCase__ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def lowerCAmelCase__ ( self : Optional[int] ) ->List[Any]:
UpperCAmelCase_ = FlaxBlenderbotModelTester(self )
def lowerCAmelCase__ ( self : str ) ->Tuple:
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Tuple ) ->str:
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Dict ) ->Tuple:
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = model_class(UpperCAmelCase__ )
@jax.jit
def encode_jitted(UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any]=None , **UpperCAmelCase__ : Union[str, Any] ):
return model.encode(input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase_ = encode_jitted(**UpperCAmelCase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase_ = encode_jitted(**UpperCAmelCase__ ).to_tuple()
self.assertEqual(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) )
for jitted_output, output in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCAmelCase__ ( self : str ) ->str:
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ = model_class(UpperCAmelCase__ )
UpperCAmelCase_ = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
UpperCAmelCase_ = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int ):
return model.decode(
decoder_input_ids=UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , encoder_outputs=UpperCAmelCase__ , )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase_ = decode_jitted(**UpperCAmelCase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase_ = decode_jitted(**UpperCAmelCase__ ).to_tuple()
self.assertEqual(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) )
for jitted_output, output in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCAmelCase__ ( self : int ) ->int:
for model_class_name in self.all_model_classes:
UpperCAmelCase_ = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
UpperCAmelCase_ = np.ones((1, 1) ) * model.config.eos_token_id
UpperCAmelCase_ = model(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' )
@slow
def lowerCAmelCase__ ( self : Dict ) ->Optional[Any]:
UpperCAmelCase_ = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25}
UpperCAmelCase_ = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True}
UpperCAmelCase_ = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=UpperCAmelCase__ )
UpperCAmelCase_ = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' )
UpperCAmelCase_ = ['''Sam''']
UpperCAmelCase_ = tokenizer(UpperCAmelCase__ , return_tensors='''jax''' )
UpperCAmelCase_ = model.generate(**UpperCAmelCase__ , **UpperCAmelCase__ )
UpperCAmelCase_ = '''Sam is a great name. It means "sun" in Gaelic.'''
UpperCAmelCase_ = tokenizer.batch_decode(UpperCAmelCase__ , **UpperCAmelCase__ )
assert generated_txt[0].strip() == tgt_text
| 43
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : Dict = logging.get_logger(__name__)
lowercase__ : Tuple = {
"uw-madison/mra-base-512-4": "https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json",
}
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = '''mra'''
def __init__( self : List[str] , UpperCAmelCase__ : Optional[int]=5_0265 , UpperCAmelCase__ : Tuple=768 , UpperCAmelCase__ : Dict=12 , UpperCAmelCase__ : Tuple=12 , UpperCAmelCase__ : int=3072 , UpperCAmelCase__ : List[str]="gelu" , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : str=512 , UpperCAmelCase__ : Any=1 , UpperCAmelCase__ : Dict=0.02 , UpperCAmelCase__ : str=1e-5 , UpperCAmelCase__ : Any="absolute" , UpperCAmelCase__ : Optional[Any]=4 , UpperCAmelCase__ : Tuple="full" , UpperCAmelCase__ : List[Any]=0 , UpperCAmelCase__ : List[str]=0 , UpperCAmelCase__ : Dict=1 , UpperCAmelCase__ : List[str]=0 , UpperCAmelCase__ : Optional[int]=2 , **UpperCAmelCase__ : List[str] , ) ->Optional[Any]:
super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = block_per_row
UpperCAmelCase_ = approx_mode
UpperCAmelCase_ = initial_prior_first_n_blocks
UpperCAmelCase_ = initial_prior_diagonal_n_blocks
| 43
|
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowercase__ : Tuple = pytest.mark.integration
@pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] )
def __lowerCamelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : int ):
'''simple docstring'''
inspect_dataset(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ = path + '''.py'''
assert script_name in os.listdir(_UpperCamelCase )
assert "__pycache__" not in os.listdir(_UpperCamelCase )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' , ['''accuracy'''] )
def __lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Optional[Any] ):
'''simple docstring'''
inspect_metric(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ = path + '''.py'''
assert script_name in os.listdir(_UpperCamelCase )
assert "__pycache__" not in os.listdir(_UpperCamelCase )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def __lowerCamelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any , _UpperCamelCase : int ):
'''simple docstring'''
UpperCAmelCase_ = get_dataset_config_info(_UpperCamelCase , config_name=_UpperCamelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def __lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
with pytest.raises(_UpperCamelCase ):
get_dataset_config_info(_UpperCamelCase , config_name=_UpperCamelCase )
@pytest.mark.parametrize(
'''path, expected''' , [
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] , )
def __lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ = get_dataset_config_names(_UpperCamelCase )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' , [
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] , )
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : int ):
'''simple docstring'''
UpperCAmelCase_ = get_dataset_infos(_UpperCamelCase )
assert list(infos.keys() ) == expected_configs
UpperCAmelCase_ = expected_configs[0]
assert expected_config in infos
UpperCAmelCase_ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : int , _UpperCamelCase : str ):
'''simple docstring'''
UpperCAmelCase_ = get_dataset_infos(_UpperCamelCase )
assert expected_config in infos
UpperCAmelCase_ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def __lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[int] ):
'''simple docstring'''
with pytest.raises(_UpperCamelCase ):
get_dataset_split_names(_UpperCamelCase , config_name=_UpperCamelCase )
| 43
| 1
|
'''simple docstring'''
# using dfs for finding eulerian path traversal
def __lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Dict , _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[str]=None ):
'''simple docstring'''
UpperCAmelCase_ = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
UpperCAmelCase_ , UpperCAmelCase_ = True, True
UpperCAmelCase_ = dfs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return path
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : int ):
'''simple docstring'''
UpperCAmelCase_ = 0
UpperCAmelCase_ = -1
for i in range(_UpperCamelCase ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
UpperCAmelCase_ = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def __lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : str ):
'''simple docstring'''
UpperCAmelCase_ = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
UpperCAmelCase_ , UpperCAmelCase_ = check_circuit_or_path(_UpperCamelCase , _UpperCamelCase )
if check == 3:
print('''graph is not Eulerian''' )
print('''no path''' )
return
UpperCAmelCase_ = 1
if check == 2:
UpperCAmelCase_ = odd_node
print('''graph has a Euler path''' )
if check == 1:
print('''graph has a Euler cycle''' )
UpperCAmelCase_ = dfs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
print(_UpperCamelCase )
def __lowerCamelCase ( ):
'''simple docstring'''
UpperCAmelCase_ = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
UpperCAmelCase_ = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
UpperCAmelCase_ = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
UpperCAmelCase_ = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
UpperCAmelCase_ = {
1: [],
2: []
# all degree is zero
}
UpperCAmelCase_ = 10
check_euler(_UpperCamelCase , _UpperCamelCase )
check_euler(_UpperCamelCase , _UpperCamelCase )
check_euler(_UpperCamelCase , _UpperCamelCase )
check_euler(_UpperCamelCase , _UpperCamelCase )
check_euler(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
main()
| 43
|
'''simple docstring'''
import collections
import os
import re
from pathlib import Path
lowercase__ : List[Any] = "src/transformers"
# Matches is_xxx_available()
lowercase__ : Optional[Any] = re.compile(R"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
lowercase__ : Any = re.compile(R"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowercase__ : Union[str, Any] = re.compile(R"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
lowercase__ : Optional[int] = re.compile(R"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
lowercase__ : List[str] = re.compile(R"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowercase__ : Any = re.compile(R"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
lowercase__ : List[Any] = re.compile(R"^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
lowercase__ : Optional[Any] = re.compile(R"^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
lowercase__ : Union[str, Any] = re.compile(R"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
lowercase__ : int = re.compile(R"^\s*try:")
# Catches a line with else:
lowercase__ : Any = re.compile(R"^\s*else:")
def __lowerCamelCase ( _UpperCamelCase : Optional[Any] ):
'''simple docstring'''
if _re_test_backend.search(_UpperCamelCase ) is None:
return None
UpperCAmelCase_ = [b[0] for b in _re_backend.findall(_UpperCamelCase )]
backends.sort()
return "_and_".join(_UpperCamelCase )
def __lowerCamelCase ( _UpperCamelCase : int ):
'''simple docstring'''
with open(_UpperCamelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase_ = f.readlines()
UpperCAmelCase_ = 0
while line_index < len(_UpperCamelCase ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_UpperCamelCase ):
return None
# First grab the objects without a specific backend in _import_structure
UpperCAmelCase_ = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
UpperCAmelCase_ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_UpperCamelCase ):
UpperCAmelCase_ = _re_one_line_import_struct.search(_UpperCamelCase ).groups()[0]
UpperCAmelCase_ = re.findall(R'''\[([^\]]+)\]''' , _UpperCamelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
UpperCAmelCase_ = _re_import_struct_key_value.search(_UpperCamelCase )
if single_line_import_search is not None:
UpperCAmelCase_ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(_UpperCamelCase ) > 0]
objects.extend(_UpperCamelCase )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
UpperCAmelCase_ = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
UpperCAmelCase_ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCAmelCase_ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCAmelCase_ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
UpperCAmelCase_ = lines[line_index]
if _re_import_struct_add_one.search(_UpperCamelCase ) is not None:
objects.append(_re_import_struct_add_one.search(_UpperCamelCase ).groups()[0] )
elif _re_import_struct_add_many.search(_UpperCamelCase ) is not None:
UpperCAmelCase_ = _re_import_struct_add_many.search(_UpperCamelCase ).groups()[0].split(''', ''' )
UpperCAmelCase_ = [obj[1:-1] for obj in imports if len(_UpperCamelCase ) > 0]
objects.extend(_UpperCamelCase )
elif _re_between_brackets.search(_UpperCamelCase ) is not None:
UpperCAmelCase_ = _re_between_brackets.search(_UpperCamelCase ).groups()[0].split(''', ''' )
UpperCAmelCase_ = [obj[1:-1] for obj in imports if len(_UpperCamelCase ) > 0]
objects.extend(_UpperCamelCase )
elif _re_quote_object.search(_UpperCamelCase ) is not None:
objects.append(_re_quote_object.search(_UpperCamelCase ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
UpperCAmelCase_ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
UpperCAmelCase_ = []
while (
line_index < len(_UpperCamelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
UpperCAmelCase_ = lines[line_index]
UpperCAmelCase_ = _re_import.search(_UpperCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
UpperCAmelCase_ = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(_UpperCamelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
UpperCAmelCase_ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCAmelCase_ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCAmelCase_ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
UpperCAmelCase_ = lines[line_index]
UpperCAmelCase_ = _re_import.search(_UpperCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
UpperCAmelCase_ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : Optional[Any] ):
'''simple docstring'''
def find_duplicates(_UpperCamelCase : Tuple ):
return [k for k, v in collections.Counter(_UpperCamelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
UpperCAmelCase_ = []
for key in import_dict_objects.keys():
UpperCAmelCase_ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
UpperCAmelCase_ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
UpperCAmelCase_ = '''base imports''' if key == '''none''' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def __lowerCamelCase ( ):
'''simple docstring'''
UpperCAmelCase_ = []
for root, _, files in os.walk(_UpperCamelCase ):
if "__init__.py" in files:
UpperCAmelCase_ = os.path.join(_UpperCamelCase , '''__init__.py''' )
UpperCAmelCase_ = parse_init(_UpperCamelCase )
if objects is not None:
UpperCAmelCase_ = analyze_results(*_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
UpperCAmelCase_ = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('''\n'''.join(_UpperCamelCase ) )
if len(_UpperCamelCase ) > 0:
raise ValueError('''\n\n'''.join(_UpperCamelCase ) )
def __lowerCamelCase ( ):
'''simple docstring'''
UpperCAmelCase_ = []
for path, directories, files in os.walk(_UpperCamelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(_UpperCamelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_UpperCamelCase ) / folder).glob('''*.py''' ) ) ) == 0:
continue
UpperCAmelCase_ = str((Path(_UpperCamelCase ) / folder).relative_to(_UpperCamelCase ) )
UpperCAmelCase_ = short_path.replace(os.path.sep , '''.''' )
submodules.append(_UpperCamelCase )
for fname in files:
if fname == "__init__.py":
continue
UpperCAmelCase_ = str((Path(_UpperCamelCase ) / fname).relative_to(_UpperCamelCase ) )
UpperCAmelCase_ = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(_UpperCamelCase )
return submodules
lowercase__ : Union[str, Any] = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
"models.esm.openfold_utils",
]
def __lowerCamelCase ( ):
'''simple docstring'''
from transformers.utils import direct_transformers_import
UpperCAmelCase_ = direct_transformers_import(_UpperCamelCase )
UpperCAmelCase_ = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(_UpperCamelCase , '''__init__.py''' ) , '''r''' ) as f:
UpperCAmelCase_ = f.read()
import_structure_keys.update(set(re.findall(R'''import_structure\[\"([^\"]*)\"\]''' , _UpperCamelCase ) ) )
UpperCAmelCase_ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(_UpperCamelCase ) > 0:
UpperCAmelCase_ = '''\n'''.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registed in the main init of Transformers:\n'''
F"""{list_of_modules}\n"""
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 43
| 1
|
'''simple docstring'''
from math import ceil
def __lowerCamelCase ( _UpperCamelCase : int = 1001 ):
'''simple docstring'''
UpperCAmelCase_ = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
UpperCAmelCase_ = 2 * i + 1
UpperCAmelCase_ = 2 * i
UpperCAmelCase_ = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
lowercase__ : Dict = int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number")
| 43
|
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( _UpperCamelCase : tuple[int, int] , _UpperCamelCase : int ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = position
UpperCAmelCase_ = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
UpperCAmelCase_ = []
for position in positions:
UpperCAmelCase_ , UpperCAmelCase_ = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(_UpperCamelCase )
return permissible_positions
def __lowerCamelCase ( _UpperCamelCase : list[list[int]] ):
'''simple docstring'''
return not any(elem == 0 for row in board for elem in row )
def __lowerCamelCase ( _UpperCamelCase : list[list[int]] , _UpperCamelCase : tuple[int, int] , _UpperCamelCase : int ):
'''simple docstring'''
if is_complete(_UpperCamelCase ):
return True
for position in get_valid_pos(_UpperCamelCase , len(_UpperCamelCase ) ):
UpperCAmelCase_ , UpperCAmelCase_ = position
if board[y][x] == 0:
UpperCAmelCase_ = curr + 1
if open_knight_tour_helper(_UpperCamelCase , _UpperCamelCase , curr + 1 ):
return True
UpperCAmelCase_ = 0
return False
def __lowerCamelCase ( _UpperCamelCase : int ):
'''simple docstring'''
UpperCAmelCase_ = [[0 for i in range(_UpperCamelCase )] for j in range(_UpperCamelCase )]
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
UpperCAmelCase_ = 1
if open_knight_tour_helper(_UpperCamelCase , (i, j) , 1 ):
return board
UpperCAmelCase_ = 0
UpperCAmelCase_ = F"""Open Kight Tour cannot be performed on a board of size {n}"""
raise ValueError(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43
| 1
|
'''simple docstring'''
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
lowercase__ : Optional[int] = "\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n"
lowercase__ : int = "\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n"
lowercase__ : List[Any] = "\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for 'cvit-mkb-clsr' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for 'cvit-mkb-clsr' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"precision\": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wnli') # 'wnli' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wiki-ner')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'cvit-mkb-clsr')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'precision@10': 1.0}\n\n"
def __lowerCamelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[Any] ):
'''simple docstring'''
return float((preds == labels).mean() )
def __lowerCamelCase ( _UpperCamelCase : Dict , _UpperCamelCase : str ):
'''simple docstring'''
UpperCAmelCase_ = simple_accuracy(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ = float(fa_score(y_true=_UpperCamelCase , y_pred=_UpperCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Dict ):
'''simple docstring'''
UpperCAmelCase_ = np.array(_UpperCamelCase )
UpperCAmelCase_ = np.array(_UpperCamelCase )
UpperCAmelCase_ = en_sentvecs.shape[0]
# mean centering
UpperCAmelCase_ = en_sentvecs - np.mean(_UpperCamelCase , axis=0 )
UpperCAmelCase_ = in_sentvecs - np.mean(_UpperCamelCase , axis=0 )
UpperCAmelCase_ = cdist(_UpperCamelCase , _UpperCamelCase , '''cosine''' )
UpperCAmelCase_ = np.array(range(_UpperCamelCase ) )
UpperCAmelCase_ = sim.argsort(axis=1 )[:, :10]
UpperCAmelCase_ = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Union[str, Any] ) ->List[Any]:
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
'''references''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if self.config_name != '''cvit-mkb-clsr''' else None , )
def lowerCAmelCase__ ( self : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any ) ->Optional[int]:
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(UpperCAmelCase__ , UpperCAmelCase__ )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(UpperCAmelCase__ , UpperCAmelCase__ )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(UpperCAmelCase__ , UpperCAmelCase__ )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
| 43
|
'''simple docstring'''
from __future__ import annotations
from typing import TypedDict
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
def __lowerCamelCase ( _UpperCamelCase : str ):
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError('''The parameter s type must be str.''' )
return [s[i:] + s[:i] for i in range(len(_UpperCamelCase ) )]
def __lowerCamelCase ( _UpperCamelCase : str ):
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError('''The parameter s type must be str.''' )
if not s:
raise ValueError('''The parameter s must not be empty.''' )
UpperCAmelCase_ = all_rotations(_UpperCamelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
UpperCAmelCase_ = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(_UpperCamelCase ),
}
return response
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : int ):
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError('''The parameter bwt_string type must be str.''' )
if not bwt_string:
raise ValueError('''The parameter bwt_string must not be empty.''' )
try:
UpperCAmelCase_ = int(_UpperCamelCase )
except ValueError:
raise TypeError(
'''The parameter idx_original_string type must be int or passive'''
''' of cast to int.''' )
if idx_original_string < 0:
raise ValueError('''The parameter idx_original_string must not be lower than 0.''' )
if idx_original_string >= len(_UpperCamelCase ):
raise ValueError(
'''The parameter idx_original_string must be lower than''' ''' len(bwt_string).''' )
UpperCAmelCase_ = [''''''] * len(_UpperCamelCase )
for _ in range(len(_UpperCamelCase ) ):
for i in range(len(_UpperCamelCase ) ):
UpperCAmelCase_ = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
lowercase__ : Optional[int] = "Provide a string that I will generate its BWT transform: "
lowercase__ : List[Any] = input(entry_msg).strip()
lowercase__ : Any = bwt_transform(s)
print(
F'''Burrows Wheeler transform for string \'{s}\' results '''
F'''in \'{result['bwt_string']}\''''
)
lowercase__ : Dict = reverse_bwt(result["bwt_string"], result["idx_original_string"])
print(
F'''Reversing Burrows Wheeler transform for entry \'{result['bwt_string']}\' '''
F'''we get original string \'{original_string}\''''
)
| 43
| 1
|
'''simple docstring'''
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def __lowerCamelCase ( _UpperCamelCase : List[str] ):
'''simple docstring'''
UpperCAmelCase_ = args.pruning_method
UpperCAmelCase_ = args.threshold
UpperCAmelCase_ = args.model_name_or_path.rstrip('''/''' )
UpperCAmelCase_ = args.target_model_path
print(F"""Load fine-pruned model from {model_name_or_path}""" )
UpperCAmelCase_ = torch.load(os.path.join(_UpperCamelCase , '''pytorch_model.bin''' ) )
UpperCAmelCase_ = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
UpperCAmelCase_ = tensor
print(F"""Copied layer {name}""" )
elif "classifier" in name or "qa_output" in name:
UpperCAmelCase_ = tensor
print(F"""Copied layer {name}""" )
elif "bias" in name:
UpperCAmelCase_ = tensor
print(F"""Copied layer {name}""" )
else:
if pruning_method == "magnitude":
UpperCAmelCase_ = MagnitudeBinarizer.apply(inputs=_UpperCamelCase , threshold=_UpperCamelCase )
UpperCAmelCase_ = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
UpperCAmelCase_ = name[:-6]
UpperCAmelCase_ = model[F"""{prefix_}mask_scores"""]
UpperCAmelCase_ = TopKBinarizer.apply(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
UpperCAmelCase_ = name[:-6]
UpperCAmelCase_ = model[F"""{prefix_}mask_scores"""]
UpperCAmelCase_ = ThresholdBinarizer.apply(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
UpperCAmelCase_ = name[:-6]
UpperCAmelCase_ = model[F"""{prefix_}mask_scores"""]
UpperCAmelCase_ , UpperCAmelCase_ = -0.1, 1.1
UpperCAmelCase_ = torch.sigmoid(_UpperCamelCase )
UpperCAmelCase_ = s * (r - l) + l
UpperCAmelCase_ = s_bar.clamp(min=0.0 , max=1.0 )
UpperCAmelCase_ = tensor * mask
print(F"""Pruned layer {name}""" )
else:
raise ValueError('''Unknown pruning method''' )
if target_model_path is None:
UpperCAmelCase_ = os.path.join(
os.path.dirname(_UpperCamelCase ) , F"""bertarized_{os.path.basename(_UpperCamelCase )}""" )
if not os.path.isdir(_UpperCamelCase ):
shutil.copytree(_UpperCamelCase , _UpperCamelCase )
print(F"""\nCreated folder {target_model_path}""" )
torch.save(_UpperCamelCase , os.path.join(_UpperCamelCase , '''pytorch_model.bin''' ) )
print('''\nPruned model saved! See you later!''' )
if __name__ == "__main__":
lowercase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--pruning_method",
choices=["l0", "magnitude", "topK", "sigmoied_threshold"],
type=str,
required=True,
help=(
"Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"
" sigmoied_threshold = Soft movement pruning)"
),
)
parser.add_argument(
"--threshold",
type=float,
required=False,
help=(
"For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."
"For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."
"Not needed for `l0`"
),
)
parser.add_argument(
"--model_name_or_path",
type=str,
required=True,
help="Folder containing the model that was previously fine-pruned",
)
parser.add_argument(
"--target_model_path",
default=None,
type=str,
required=False,
help="Folder containing the model that was previously fine-pruned",
)
lowercase__ : Union[str, Any] = parser.parse_args()
main(args)
| 43
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase__ : Union[str, Any] = {
"configuration_mobilevit": ["MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileViTConfig", "MobileViTOnnxConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Union[str, Any] = ["MobileViTFeatureExtractor"]
lowercase__ : List[Any] = ["MobileViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = [
"MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileViTForImageClassification",
"MobileViTForSemanticSegmentation",
"MobileViTModel",
"MobileViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Optional[int] = [
"TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileViTForImageClassification",
"TFMobileViTForSemanticSegmentation",
"TFMobileViTModel",
"TFMobileViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
lowercase__ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 43
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
lowercase__ : int = logging.get_logger(__name__)
lowercase__ : Optional[int] = {
"microsoft/layoutlmv3-base": "https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json",
}
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = '''layoutlmv3'''
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Tuple=5_0265 , UpperCAmelCase__ : Union[str, Any]=768 , UpperCAmelCase__ : List[Any]=12 , UpperCAmelCase__ : Union[str, Any]=12 , UpperCAmelCase__ : Optional[Any]=3072 , UpperCAmelCase__ : Optional[Any]="gelu" , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : Any=512 , UpperCAmelCase__ : int=2 , UpperCAmelCase__ : Dict=0.02 , UpperCAmelCase__ : List[str]=1e-5 , UpperCAmelCase__ : Any=1 , UpperCAmelCase__ : Tuple=0 , UpperCAmelCase__ : str=2 , UpperCAmelCase__ : List[str]=1024 , UpperCAmelCase__ : str=128 , UpperCAmelCase__ : List[Any]=128 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Union[str, Any]=32 , UpperCAmelCase__ : List[Any]=128 , UpperCAmelCase__ : str=64 , UpperCAmelCase__ : Optional[Any]=256 , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Union[str, Any]=224 , UpperCAmelCase__ : Tuple=3 , UpperCAmelCase__ : Optional[Any]=16 , UpperCAmelCase__ : Union[str, Any]=None , **UpperCAmelCase__ : Any , ) ->Optional[Any]:
super().__init__(
vocab_size=UpperCAmelCase__ , hidden_size=UpperCAmelCase__ , num_hidden_layers=UpperCAmelCase__ , num_attention_heads=UpperCAmelCase__ , intermediate_size=UpperCAmelCase__ , hidden_act=UpperCAmelCase__ , hidden_dropout_prob=UpperCAmelCase__ , attention_probs_dropout_prob=UpperCAmelCase__ , max_position_embeddings=UpperCAmelCase__ , type_vocab_size=UpperCAmelCase__ , initializer_range=UpperCAmelCase__ , layer_norm_eps=UpperCAmelCase__ , pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
UpperCAmelCase_ = max_ad_position_embeddings
UpperCAmelCase_ = coordinate_size
UpperCAmelCase_ = shape_size
UpperCAmelCase_ = has_relative_attention_bias
UpperCAmelCase_ = rel_pos_bins
UpperCAmelCase_ = max_rel_pos
UpperCAmelCase_ = has_spatial_attention_bias
UpperCAmelCase_ = rel_ad_pos_bins
UpperCAmelCase_ = max_rel_ad_pos
UpperCAmelCase_ = text_embed
UpperCAmelCase_ = visual_embed
UpperCAmelCase_ = input_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = classifier_dropout
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = version.parse('''1.12''' )
@property
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Mapping[str, Mapping[int, str]]:
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
else:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),
] )
@property
def lowerCAmelCase__ ( self : List[Any] ) ->float:
return 1e-5
@property
def lowerCAmelCase__ ( self : List[str] ) ->int:
return 12
def lowerCAmelCase__ ( self : str , UpperCAmelCase__ : "ProcessorMixin" , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional["TensorType"] = None , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 40 , UpperCAmelCase__ : int = 40 , ) ->Mapping[str, Any]:
setattr(processor.image_processor , '''apply_ocr''' , UpperCAmelCase__ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase_ = compute_effective_axis_dimension(
UpperCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase_ = processor.tokenizer.num_special_tokens_to_add(UpperCAmelCase__ )
UpperCAmelCase_ = compute_effective_axis_dimension(
UpperCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCAmelCase__ )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase_ = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
UpperCAmelCase_ = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
UpperCAmelCase_ = self._generate_dummy_images(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = dict(
processor(
UpperCAmelCase__ , text=UpperCAmelCase__ , boxes=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , ) )
return inputs
| 43
|
'''simple docstring'''
lowercase__ : Union[str, Any] = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
lowercase__ : str = [{"type": "code", "content": INSTALL_CONTENT}]
lowercase__ : Any = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 43
| 1
|
'''simple docstring'''
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
lowercase__ : str = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
lowercase__ : List[str] = get_tests_dir("fixtures/vocab.json")
lowercase__ : int = get_tests_dir("fixtures")
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
def lowerCAmelCase__ ( self : Dict ) ->Any:
UpperCAmelCase_ = 0
def lowerCAmelCase__ ( self : Optional[Any] ) ->List[Any]:
UpperCAmelCase_ = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] ) ->str:
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = WavaVecaConfig()
UpperCAmelCase_ = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
# save in new folder
model_config.save_pretrained(UpperCAmelCase__ )
processor.save_pretrained(UpperCAmelCase__ )
UpperCAmelCase_ = AutoProcessor.from_pretrained(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->int:
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(UpperCAmelCase__ , os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) )
copyfile(UpperCAmelCase__ , os.path.join(UpperCAmelCase__ , '''vocab.json''' ) )
UpperCAmelCase_ = AutoProcessor.from_pretrained(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Dict ) ->Optional[int]:
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = WavaVecaFeatureExtractor()
UpperCAmelCase_ = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
UpperCAmelCase_ = WavaVecaProcessor(UpperCAmelCase__ , UpperCAmelCase__ )
# save in new folder
processor.save_pretrained(UpperCAmelCase__ )
# drop `processor_class` in tokenizer
with open(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) , '''r''' ) as f:
UpperCAmelCase_ = json.load(UpperCAmelCase__ )
config_dict.pop('''processor_class''' )
with open(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) , '''w''' ) as f:
f.write(json.dumps(UpperCAmelCase__ ) )
UpperCAmelCase_ = AutoProcessor.from_pretrained(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Any ) ->Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = WavaVecaFeatureExtractor()
UpperCAmelCase_ = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
UpperCAmelCase_ = WavaVecaProcessor(UpperCAmelCase__ , UpperCAmelCase__ )
# save in new folder
processor.save_pretrained(UpperCAmelCase__ )
# drop `processor_class` in feature extractor
with open(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) , '''r''' ) as f:
UpperCAmelCase_ = json.load(UpperCAmelCase__ )
config_dict.pop('''processor_class''' )
with open(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) , '''w''' ) as f:
f.write(json.dumps(UpperCAmelCase__ ) )
UpperCAmelCase_ = AutoProcessor.from_pretrained(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Optional[int] ) ->Dict:
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = WavaVecaConfig(processor_class='''Wav2Vec2Processor''' )
model_config.save_pretrained(UpperCAmelCase__ )
# copy relevant files
copyfile(UpperCAmelCase__ , os.path.join(UpperCAmelCase__ , '''vocab.json''' ) )
# create emtpy sample processor
with open(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) , '''w''' ) as f:
f.write('''{}''' )
UpperCAmelCase_ = AutoProcessor.from_pretrained(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[int]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCAmelCase__ ):
UpperCAmelCase_ = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCAmelCase__ ):
UpperCAmelCase_ = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCAmelCase__ )
UpperCAmelCase_ = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCAmelCase__ )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
UpperCAmelCase_ = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
UpperCAmelCase_ = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
UpperCAmelCase_ = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCAmelCase__ , use_fast=UpperCAmelCase__ )
UpperCAmelCase_ = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def lowerCAmelCase__ ( self : List[Any] ) ->List[Any]:
try:
AutoConfig.register('''custom''' , UpperCAmelCase__ )
AutoFeatureExtractor.register(UpperCAmelCase__ , UpperCAmelCase__ )
AutoTokenizer.register(UpperCAmelCase__ , slow_tokenizer_class=UpperCAmelCase__ )
AutoProcessor.register(UpperCAmelCase__ , UpperCAmelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCAmelCase__ ):
AutoProcessor.register(UpperCAmelCase__ , UpperCAmelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCAmelCase_ = CustomFeatureExtractor.from_pretrained(UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ = os.path.join(UpperCAmelCase__ , '''vocab.txt''' )
with open(UpperCAmelCase__ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
UpperCAmelCase_ = CustomTokenizer(UpperCAmelCase__ )
UpperCAmelCase_ = CustomProcessor(UpperCAmelCase__ , UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(UpperCAmelCase__ )
UpperCAmelCase_ = AutoProcessor.from_pretrained(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase__ ( self : Any ) ->List[str]:
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = False
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = False
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = '''AutoFeatureExtractor'''
lowerCAmelCase__ = '''AutoTokenizer'''
lowerCAmelCase__ = False
try:
AutoConfig.register('''custom''' , UpperCAmelCase__ )
AutoFeatureExtractor.register(UpperCAmelCase__ , UpperCAmelCase__ )
AutoTokenizer.register(UpperCAmelCase__ , slow_tokenizer_class=UpperCAmelCase__ )
AutoProcessor.register(UpperCAmelCase__ , UpperCAmelCase__ )
# If remote code is not set, the default is to use local classes.
UpperCAmelCase_ = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
UpperCAmelCase_ = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCAmelCase__ )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
UpperCAmelCase_ = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCAmelCase__ )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase__ ( self : Optional[Any] ) ->List[str]:
UpperCAmelCase_ = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''' )
def lowerCAmelCase__ ( self : List[Any] ) ->List[Any]:
UpperCAmelCase_ = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''' )
self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''' )
@is_staging_test
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
@classmethod
def lowerCAmelCase__ ( cls : Dict ) ->str:
UpperCAmelCase_ = TOKEN
HfFolder.save_token(UpperCAmelCase__ )
@classmethod
def lowerCAmelCase__ ( cls : Dict ) ->int:
try:
delete_repo(token=cls._token , repo_id='''test-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-processor''' )
except HTTPError:
pass
def lowerCAmelCase__ ( self : Any ) ->str:
UpperCAmelCase_ = WavaVecaProcessor.from_pretrained(UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(UpperCAmelCase__ , '''test-processor''' ) , push_to_hub=UpperCAmelCase__ , use_auth_token=self._token )
UpperCAmelCase_ = WavaVecaProcessor.from_pretrained(f"""{USER}/test-processor""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(UpperCAmelCase__ , getattr(new_processor.feature_extractor , UpperCAmelCase__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def lowerCAmelCase__ ( self : str ) ->Any:
UpperCAmelCase_ = WavaVecaProcessor.from_pretrained(UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(UpperCAmelCase__ , '''test-processor-org''' ) , push_to_hub=UpperCAmelCase__ , use_auth_token=self._token , organization='''valid_org''' , )
UpperCAmelCase_ = WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(UpperCAmelCase__ , getattr(new_processor.feature_extractor , UpperCAmelCase__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def lowerCAmelCase__ ( self : List[Any] ) ->Tuple:
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
UpperCAmelCase_ = CustomFeatureExtractor.from_pretrained(UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ = os.path.join(UpperCAmelCase__ , '''vocab.txt''' )
with open(UpperCAmelCase__ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
UpperCAmelCase_ = CustomTokenizer(UpperCAmelCase__ )
UpperCAmelCase_ = CustomProcessor(UpperCAmelCase__ , UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f"""{USER}/test-dynamic-processor""" , token=self._token )
UpperCAmelCase_ = Repository(UpperCAmelCase__ , clone_from=f"""{USER}/test-dynamic-processor""" , token=self._token )
processor.save_pretrained(UpperCAmelCase__ )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''',
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(UpperCAmelCase__ , '''tokenizer_config.json''' ) ) as f:
UpperCAmelCase_ = json.load(UpperCAmelCase__ )
self.assertDictEqual(
tokenizer_config['''auto_map'''] , {
'''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None],
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(UpperCAmelCase__ , '''custom_feature_extraction.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(UpperCAmelCase__ , '''custom_tokenization.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(UpperCAmelCase__ , '''custom_processing.py''' ) ) )
repo.push_to_hub()
UpperCAmelCase_ = AutoProcessor.from_pretrained(f"""{USER}/test-dynamic-processor""" , trust_remote_code=UpperCAmelCase__ )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''' )
| 43
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase__ : Optional[Any] = {
"configuration_encodec": [
"ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EncodecConfig",
],
"feature_extraction_encodec": ["EncodecFeatureExtractor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : List[str] = [
"ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST",
"EncodecModel",
"EncodecPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
lowercase__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 43
| 1
|
'''simple docstring'''
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
lowercase__ : Tuple = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
lowercase__ : Any = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
lowercase__ : Any = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase__ ( self : List[str] ) ->MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ),
} ) , )
def lowerCAmelCase__ ( self : Any , UpperCAmelCase__ : List[List[List[str]]] , UpperCAmelCase__ : List[List[str]] , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : int = 4 , ) ->Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=UpperCAmelCase__ , hypotheses=UpperCAmelCase__ , min_len=UpperCAmelCase__ , max_len=UpperCAmelCase__ )
}
| 43
|
'''simple docstring'''
from heapq import heappop, heappush
import numpy as np
def __lowerCamelCase ( _UpperCamelCase : np.ndarray , _UpperCamelCase : tuple[int, int] , _UpperCamelCase : tuple[int, int] , _UpperCamelCase : bool , ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = grid.shape
UpperCAmelCase_ = [-1, 1, 0, 0]
UpperCAmelCase_ = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
UpperCAmelCase_ , UpperCAmelCase_ = [(0, source)], set()
UpperCAmelCase_ = np.full((rows, cols) , np.inf )
UpperCAmelCase_ = 0
UpperCAmelCase_ = np.empty((rows, cols) , dtype=_UpperCamelCase )
UpperCAmelCase_ = None
while queue:
((UpperCAmelCase_) , (UpperCAmelCase_)) = heappop(_UpperCamelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
UpperCAmelCase_ = []
while (x, y) != source:
path.append((x, y) )
UpperCAmelCase_ , UpperCAmelCase_ = predecessors[x, y]
path.append(_UpperCamelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(_UpperCamelCase ) ):
UpperCAmelCase_ , UpperCAmelCase_ = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
UpperCAmelCase_ = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(_UpperCamelCase , (dist + 1, (nx, ny)) )
UpperCAmelCase_ = dist + 1
UpperCAmelCase_ = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43
| 1
|
'''simple docstring'''
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
lowercase__ : List[Any] = object()
# For specifying empty leaf dict `{}`
lowercase__ : str = object()
def __lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ = tuple((re.compile(x + '''$''' ) for x in qs) )
for i in range(len(_UpperCamelCase ) - len(_UpperCamelCase ) + 1 ):
UpperCAmelCase_ = [x.match(_UpperCamelCase ) for x, y in zip(_UpperCamelCase , ks[i:] )]
if matches and all(_UpperCamelCase ):
return True
return False
def __lowerCamelCase ( _UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
def replace(_UpperCamelCase : int , _UpperCamelCase : Optional[int] ):
for rule, replacement in rules:
if _match(_UpperCamelCase , _UpperCamelCase ):
return replacement
return val
return replace
def __lowerCamelCase ( ):
'''simple docstring'''
return [
# embeddings
(("transformer", "wpe", "embedding"), P('''mp''' , _UpperCamelCase )),
(("transformer", "wte", "embedding"), P('''mp''' , _UpperCamelCase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(_UpperCamelCase , '''mp''' )),
(("attention", "out_proj", "kernel"), P('''mp''' , _UpperCamelCase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(_UpperCamelCase , '''mp''' )),
(("mlp", "c_fc", "bias"), P('''mp''' )),
(("mlp", "c_proj", "kernel"), P('''mp''' , _UpperCamelCase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def __lowerCamelCase ( _UpperCamelCase : str ):
'''simple docstring'''
UpperCAmelCase_ = _get_partition_rules()
UpperCAmelCase_ = _replacement_rules(_UpperCamelCase )
UpperCAmelCase_ = {k: _unmatched for k in flatten_dict(_UpperCamelCase )}
UpperCAmelCase_ = {k: replace(_UpperCamelCase , _UpperCamelCase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(_UpperCamelCase ) )
| 43
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCamelCase ( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = XLMTokenizer
lowerCAmelCase__ = False
def lowerCAmelCase__ ( self : int ) ->Union[str, Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase_ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
UpperCAmelCase_ = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
UpperCAmelCase_ = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(UpperCAmelCase__ ) )
def lowerCAmelCase__ ( self : Optional[int] , UpperCAmelCase__ : Any ) ->List[Any]:
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = '''lower newer'''
return input_text, output_text
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Tuple:
UpperCAmelCase_ = XLMTokenizer(self.vocab_file , self.merges_file )
UpperCAmelCase_ = '''lower'''
UpperCAmelCase_ = ['''low''', '''er</w>''']
UpperCAmelCase_ = tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = tokens + ['''<unk>''']
UpperCAmelCase_ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ )
@slow
def lowerCAmelCase__ ( self : Any ) ->str:
UpperCAmelCase_ = XLMTokenizer.from_pretrained('''xlm-mlm-en-2048''' )
UpperCAmelCase_ = tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCAmelCase__ )
UpperCAmelCase_ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCAmelCase__ )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 43
| 1
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : List[str] = logging.get_logger(__name__)
def __lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : int=False ):
'''simple docstring'''
UpperCAmelCase_ = []
# fmt: off
# stem:
rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token''') )
rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings''') )
rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias''') )
# backbone
rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias''') )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias""") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
UpperCAmelCase_ = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
# fmt: on
return rename_keys
def __lowerCamelCase ( _UpperCamelCase : Dict , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Tuple=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
UpperCAmelCase_ = ''''''
else:
UpperCAmelCase_ = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
UpperCAmelCase_ = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase_ = in_proj_bias[: config.hidden_size]
UpperCAmelCase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase_ = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase_ = in_proj_bias[-config.hidden_size :]
def __lowerCamelCase ( _UpperCamelCase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(_UpperCamelCase , _UpperCamelCase )
def __lowerCamelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Any ):
'''simple docstring'''
UpperCAmelCase_ = dct.pop(_UpperCamelCase )
UpperCAmelCase_ = val
def __lowerCamelCase ( ):
'''simple docstring'''
UpperCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase_ = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Any , _UpperCamelCase : int=False ):
'''simple docstring'''
UpperCAmelCase_ = BitConfig(
global_padding='''same''' , layer_type='''bottleneck''' , depths=(3, 4, 9) , out_features=['''stage3'''] , embedding_dynamic_padding=_UpperCamelCase , )
UpperCAmelCase_ = ViTHybridConfig(backbone_config=_UpperCamelCase , image_size=384 , num_labels=1000 )
UpperCAmelCase_ = False
# load original model from timm
UpperCAmelCase_ = timm.create_model(_UpperCamelCase , pretrained=_UpperCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCAmelCase_ = timm_model.state_dict()
if base_model:
remove_classification_head_(_UpperCamelCase )
UpperCAmelCase_ = create_rename_keys(_UpperCamelCase , _UpperCamelCase )
for src, dest in rename_keys:
rename_key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
read_in_q_k_v(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ = '''huggingface/label-files'''
UpperCAmelCase_ = '''imagenet-1k-id2label.json'''
UpperCAmelCase_ = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase_ = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
UpperCAmelCase_ = ViTHybridModel(_UpperCamelCase ).eval()
else:
UpperCAmelCase_ = ViTHybridForImageClassification(_UpperCamelCase ).eval()
model.load_state_dict(_UpperCamelCase )
# create image processor
UpperCAmelCase_ = create_transform(**resolve_data_config({} , model=_UpperCamelCase ) )
UpperCAmelCase_ = transform.transforms
UpperCAmelCase_ = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
UpperCAmelCase_ = ViTHybridImageProcessor(
do_resize=_UpperCamelCase , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_UpperCamelCase , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=_UpperCamelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = transform(_UpperCamelCase ).unsqueeze(0 )
UpperCAmelCase_ = processor(_UpperCamelCase , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(_UpperCamelCase , _UpperCamelCase )
# verify logits
with torch.no_grad():
UpperCAmelCase_ = model(_UpperCamelCase )
UpperCAmelCase_ = outputs.logits
print('''Predicted class:''' , logits.argmax(-1 ).item() )
if base_model:
UpperCAmelCase_ = timm_model.forward_features(_UpperCamelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_UpperCamelCase , outputs.pooler_output , atol=1E-3 )
else:
UpperCAmelCase_ = timm_model(_UpperCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_UpperCamelCase , outputs.logits , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
print(F"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_UpperCamelCase )
print(F"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(_UpperCamelCase )
if push_to_hub:
print(F"""Pushing model and processor to the hub {vit_name}""" )
model.push_to_hub(F"""ybelkada/{vit_name}""" )
processor.push_to_hub(F"""ybelkada/{vit_name}""" )
if __name__ == "__main__":
lowercase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_r50_s16_384",
type=str,
help="Name of the hybrid ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
lowercase__ : List[Any] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 43
|
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def __lowerCamelCase ( _UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
for param in module.parameters():
UpperCAmelCase_ = False
def __lowerCamelCase ( ):
'''simple docstring'''
UpperCAmelCase_ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
UpperCAmelCase_ = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def __lowerCamelCase ( _UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ = plt.imshow(_UpperCamelCase )
fig.axes.get_xaxis().set_visible(_UpperCamelCase )
fig.axes.get_yaxis().set_visible(_UpperCamelCase )
plt.show()
def __lowerCamelCase ( ):
'''simple docstring'''
UpperCAmelCase_ = datetime.now()
UpperCAmelCase_ = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 43
| 1
|
'''simple docstring'''
import os
import jsonlines
import numpy as np
from tqdm import tqdm
lowercase__ : Dict = 2048
lowercase__ : Dict = 4096
lowercase__ : List[Any] = 42
lowercase__ : List[str] = os.environ.pop("PROCESS_TRAIN", "false")
lowercase__ : Optional[int] = {"null": 0, "short": 1, "long": 2, "yes": 3, "no": 4}
def __lowerCamelCase ( _UpperCamelCase : Optional[int] ):
'''simple docstring'''
def choose_first(_UpperCamelCase : Any , _UpperCamelCase : Optional[int]=False ):
assert isinstance(_UpperCamelCase , _UpperCamelCase )
if len(_UpperCamelCase ) == 1:
UpperCAmelCase_ = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
UpperCAmelCase_ = {k: [a[k]] for k in a}
if len(a['''start_token'''] ) > 0:
break
return a
UpperCAmelCase_ = {'''id''': example['''id''']}
UpperCAmelCase_ = example['''annotations''']
UpperCAmelCase_ = annotation['''yes_no_answer''']
if 0 in yes_no_answer or 1 in yes_no_answer:
UpperCAmelCase_ = ['''yes'''] if 1 in yes_no_answer else ['''no''']
UpperCAmelCase_ = UpperCAmelCase_ = []
UpperCAmelCase_ = UpperCAmelCase_ = []
UpperCAmelCase_ = ['''<cls>''']
else:
UpperCAmelCase_ = ['''short''']
UpperCAmelCase_ = choose_first(annotation['''short_answers'''] )
if len(out['''start_token'''] ) == 0:
# answer will be long if short is not available
UpperCAmelCase_ = ['''long''']
UpperCAmelCase_ = choose_first(annotation['''long_answer'''] , is_long_answer=_UpperCamelCase )
UpperCAmelCase_ = []
answer.update(_UpperCamelCase )
# disregard some samples
if len(answer['''start_token'''] ) > 1 or answer["start_token"] == answer["end_token"]:
UpperCAmelCase_ = True
else:
UpperCAmelCase_ = False
UpperCAmelCase_ = ['''start_token''', '''end_token''', '''start_byte''', '''end_byte''', '''text''']
if not all(isinstance(answer[k] , _UpperCamelCase ) for k in cols ):
raise ValueError('''Issue in ID''' , example['''id'''] )
return answer
def __lowerCamelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any]=False ):
'''simple docstring'''
UpperCAmelCase_ = _get_single_answer(_UpperCamelCase )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
UpperCAmelCase_ = example['''document''']['''tokens''']
UpperCAmelCase_ = []
for i in range(len(doc['''token'''] ) ):
if not doc["is_html"][i]:
context.append(doc['''token'''][i] )
return {
"context": " ".join(_UpperCamelCase ),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
UpperCAmelCase_ = ['''start_token''', '''end_token''']
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
UpperCAmelCase_ = example['''document''']['''tokens''']
UpperCAmelCase_ = answer['''start_token''']
UpperCAmelCase_ = answer['''end_token''']
UpperCAmelCase_ = []
for i in range(len(doc['''token'''] ) ):
if not doc["is_html"][i]:
context.append(doc['''token'''][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
UpperCAmelCase_ = ''' '''.join(context[start_token:end_token] )
# checking above code
if assertion:
UpperCAmelCase_ = doc['''is_html'''][answer['''start_token'''] : answer['''end_token''']]
UpperCAmelCase_ = doc['''token'''][answer['''start_token'''] : answer['''end_token''']]
UpperCAmelCase_ = ''' '''.join([old[i] for i in range(len(_UpperCamelCase ) ) if not is_html[i]] )
if new != old:
print('''ID:''' , example['''id'''] )
print('''New:''' , _UpperCamelCase , end='''\n''' )
print('''Old:''' , _UpperCamelCase , end='''\n\n''' )
return {
"context": " ".join(_UpperCamelCase ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : List[Any] , _UpperCamelCase : Any=2048 , _UpperCamelCase : Dict=4096 , _UpperCamelCase : Union[str, Any]=True ):
'''simple docstring'''
UpperCAmelCase_ = get_context_and_ans(_UpperCamelCase , assertion=_UpperCamelCase )
UpperCAmelCase_ = out['''answer''']
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
UpperCAmelCase_ = tokenizer(example['''question''']['''text'''] , out['''context'''] ).input_ids
UpperCAmelCase_ = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = input_ids[:q_len]
UpperCAmelCase_ = range(_UpperCamelCase , len(_UpperCamelCase ) , max_length - doc_stride )
for i in doc_start_indices:
UpperCAmelCase_ = i + max_length - q_len
UpperCAmelCase_ = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer['''category'''][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(_UpperCamelCase ),
"end_token": [-100] * len(_UpperCamelCase ),
"category": category,
},
}
UpperCAmelCase_ = out['''context'''].split()
UpperCAmelCase_ = splitted_context[answer['''end_token''']]
UpperCAmelCase_ = len(
tokenizer(
''' '''.join(splitted_context[: answer['''start_token''']] ) , add_special_tokens=_UpperCamelCase , ).input_ids )
UpperCAmelCase_ = len(
tokenizer(''' '''.join(splitted_context[: answer['''end_token''']] ) , add_special_tokens=_UpperCamelCase ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
UpperCAmelCase_ = len(tokenizer(_UpperCamelCase , add_special_tokens=_UpperCamelCase ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
UpperCAmelCase_ = input_ids[answer['''start_token'''] : answer['''end_token'''] + 1] # right & left are inclusive
UpperCAmelCase_ = answer['''start_token''']
UpperCAmelCase_ = answer['''end_token''']
if assertion:
UpperCAmelCase_ = tokenizer.decode(_UpperCamelCase )
if answer["span"] != new:
print('''ISSUE IN TOKENIZATION''' )
print('''OLD:''' , answer['''span'''] )
print('''NEW:''' , _UpperCamelCase , end='''\n\n''' )
if len(_UpperCamelCase ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
UpperCAmelCase_ = input_ids[:q_len]
UpperCAmelCase_ = range(_UpperCamelCase , len(_UpperCamelCase ) , max_length - doc_stride )
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = [] # null, yes, no, long, short
for i in doc_start_indices:
UpperCAmelCase_ = i + max_length - q_len
UpperCAmelCase_ = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
UpperCAmelCase_ = start_token - i + q_len
UpperCAmelCase_ = end_token - i + q_len
answers_category.append(answer['''category'''][0] ) # ["short"] -> "short"
else:
UpperCAmelCase_ = -100
UpperCAmelCase_ = -100
answers_category.append('''null''' )
UpperCAmelCase_ = inputs[-1][start_token : end_token + 1]
answers_start_token.append(_UpperCamelCase )
answers_end_token.append(_UpperCamelCase )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print('''ISSUE in strided for ID:''' , example['''id'''] )
print('''New:''' , tokenizer.decode(_UpperCamelCase ) )
print('''Old:''' , tokenizer.decode(_UpperCamelCase ) , end='''\n\n''' )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def __lowerCamelCase ( _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any=2048 , _UpperCamelCase : str=4096 , _UpperCamelCase : int=False ):
'''simple docstring'''
UpperCAmelCase_ = get_strided_contexts_and_ans(
_UpperCamelCase , _UpperCamelCase , doc_stride=_UpperCamelCase , max_length=_UpperCamelCase , assertion=_UpperCamelCase , )
return example
def __lowerCamelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : str ):
'''simple docstring'''
with jsonlines.open(_UpperCamelCase , '''a''' ) as writer:
for example in tqdm(_UpperCamelCase , total=len(_UpperCamelCase ) , desc='''Saving samples ... ''' ):
UpperCAmelCase_ = example['''labels''']
for ids, start, end, cat in zip(
example['''input_ids'''] , labels['''start_token'''] , labels['''end_token'''] , labels['''category'''] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
'''input_ids''': ids,
'''start_token''': start,
'''end_token''': end,
'''category''': CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
lowercase__ : List[Any] = load_dataset("natural_questions")
lowercase__ : Any = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base")
lowercase__ : Dict = data["train" if PROCESS_TRAIN == "true" else "validation"]
lowercase__ : List[Any] = {
"tokenizer": tokenizer,
"doc_stride": DOC_STRIDE,
"max_length": MAX_LENGTH,
"assertion": False,
}
lowercase__ : List[str] = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
lowercase__ : Optional[Any] = data.remove_columns(["annotations", "document", "id", "question"])
print(data)
np.random.seed(SEED)
lowercase__ : Optional[int] = "nq-training.jsonl" if PROCESS_TRAIN == "true" else "nq-validation.jsonl"
save_to_disk(data, file_name=cache_file_name)
| 43
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Any ) ->Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self : str ) ->List[str]:
UpperCAmelCase_ = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
UpperCAmelCase_ = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('''sample_euler''' )
UpperCAmelCase_ = '''A painting of a squirrel eating a burger'''
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase__ ( self : List[str] ) ->int:
UpperCAmelCase_ = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
UpperCAmelCase_ = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('''sample_euler''' )
UpperCAmelCase_ = '''A painting of a squirrel eating a burger'''
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[int]:
UpperCAmelCase_ = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
UpperCAmelCase_ = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('''sample_dpmpp_2m''' )
UpperCAmelCase_ = '''A painting of a squirrel eating a burger'''
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = sd_pipe(
[prompt] , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=15 , output_type='''np''' , use_karras_sigmas=UpperCAmelCase__ , )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array(
[0.1138_1689, 0.1211_2921, 0.138_9457, 0.1254_9606, 0.124_4964, 0.1083_1517, 0.1156_2866, 0.1086_7816, 0.1049_9048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 43
| 1
|
'''simple docstring'''
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
lowercase__ : List[str] = logging.getLogger()
def __lowerCamelCase ( ):
'''simple docstring'''
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''-f''' )
UpperCAmelCase_ = parser.parse_args()
return args.f
def __lowerCamelCase ( _UpperCamelCase : Any ):
'''simple docstring'''
UpperCAmelCase_ = {}
UpperCAmelCase_ = os.path.join(_UpperCamelCase , '''all_results.json''' )
if os.path.exists(_UpperCamelCase ):
with open(_UpperCamelCase , '''r''' ) as f:
UpperCAmelCase_ = json.load(_UpperCamelCase )
else:
raise ValueError(F"""can't find {path}""" )
return results
def __lowerCamelCase ( ):
'''simple docstring'''
UpperCAmelCase_ = torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
lowercase__ : int = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
@classmethod
def lowerCAmelCase__ ( cls : int ) ->List[str]:
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = os.path.join(cls.tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
UpperCAmelCase_ = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def lowerCAmelCase__ ( cls : Union[str, Any] ) ->List[Any]:
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCAmelCase__ ( self : str ) ->int:
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
""".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(UpperCAmelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase__ , '''glue_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCAmelCase__ ( self : Tuple ) ->List[str]:
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
""".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(UpperCAmelCase__ )
self.assertLess(result['''perplexity'''] , 100 )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase__ , '''clm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCAmelCase__ ( self : str ) ->str:
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(UpperCAmelCase__ )
self.assertLess(result['''perplexity'''] , 42 )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase__ , '''mlm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCAmelCase__ ( self : Any ) ->Tuple:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
UpperCAmelCase_ = 7 if get_gpu_count() > 1 else 2
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(UpperCAmelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertLess(result['''train_loss'''] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase__ , '''ner_no_trainer''' ) ) )
@unittest.skip(reason='''Fix me @muellerzr''' )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCAmelCase__ ( self : Tuple ) ->str:
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(UpperCAmelCase__ )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['''eval_f1'''] , 28 )
self.assertGreaterEqual(result['''eval_exact'''] , 28 )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase__ , '''qa_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Any:
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(UpperCAmelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase__ , '''swag_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCAmelCase__ ( self : Dict ) ->List[str]:
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(UpperCAmelCase__ )
self.assertGreaterEqual(result['''eval_rouge1'''] , 10 )
self.assertGreaterEqual(result['''eval_rouge2'''] , 2 )
self.assertGreaterEqual(result['''eval_rougeL'''] , 7 )
self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase__ , '''summarization_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCAmelCase__ ( self : List[str] ) ->Any:
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(UpperCAmelCase__ )
self.assertGreaterEqual(result['''eval_bleu'''] , 30 )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase__ , '''translation_no_trainer''' ) ) )
@slow
def lowerCAmelCase__ ( self : List[Any] ) ->List[str]:
UpperCAmelCase_ = logging.StreamHandler(sys.stdout )
logger.addHandler(UpperCAmelCase__ )
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
""".split()
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(UpperCAmelCase__ )
self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.10 )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowerCAmelCase__ ( self : Dict ) ->Any:
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
""".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
UpperCAmelCase_ = get_results(UpperCAmelCase__ )
# The base model scores a 25%
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase__ , '''step_1''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase__ , '''image_classification_no_trainer''' ) ) )
| 43
|
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class lowerCamelCase ( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = MvpTokenizer
lowerCAmelCase__ = MvpTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = filter_roberta_detectors
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Tuple:
super().setUp()
UpperCAmelCase_ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
UpperCAmelCase_ = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
UpperCAmelCase_ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
UpperCAmelCase_ = {'''unk_token''': '''<unk>'''}
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCAmelCase__ ) )
def lowerCAmelCase__ ( self : Tuple , **UpperCAmelCase__ : List[str] ) ->Dict:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Optional[int] , **UpperCAmelCase__ : int ) ->Tuple:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any] ) ->Union[str, Any]:
return "lower newer", "lower newer"
@cached_property
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[int]:
return MvpTokenizer.from_pretrained('''RUCAIBox/mvp''' )
@cached_property
def lowerCAmelCase__ ( self : Tuple ) ->Tuple:
return MvpTokenizerFast.from_pretrained('''RUCAIBox/mvp''' )
@require_torch
def lowerCAmelCase__ ( self : Any ) ->Dict:
UpperCAmelCase_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
UpperCAmelCase_ = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase_ = tokenizer(UpperCAmelCase__ , max_length=len(UpperCAmelCase__ ) , padding=UpperCAmelCase__ , return_tensors='''pt''' )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
UpperCAmelCase_ = batch.input_ids.tolist()[0]
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
# Test that special tokens are reset
@require_torch
def lowerCAmelCase__ ( self : str ) ->int:
UpperCAmelCase_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase_ = tokenizer(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors='''pt''' )
# check if input_ids are returned and no labels
self.assertIn('''input_ids''' , UpperCAmelCase__ )
self.assertIn('''attention_mask''' , UpperCAmelCase__ )
self.assertNotIn('''labels''' , UpperCAmelCase__ )
self.assertNotIn('''decoder_attention_mask''' , UpperCAmelCase__ )
@require_torch
def lowerCAmelCase__ ( self : Tuple ) ->Optional[Any]:
UpperCAmelCase_ = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase_ = tokenizer(text_target=UpperCAmelCase__ , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def lowerCAmelCase__ ( self : List[str] ) ->int:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase_ = tokenizer(
['''I am a small frog''' * 1024, '''I am a small frog'''] , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , return_tensors='''pt''' )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual(batch.input_ids.shape , (2, 1024) )
@require_torch
def lowerCAmelCase__ ( self : Dict ) ->Optional[int]:
UpperCAmelCase_ = ['''A long paragraph for summarization.''']
UpperCAmelCase_ = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase_ = tokenizer(UpperCAmelCase__ , text_target=UpperCAmelCase__ , return_tensors='''pt''' )
UpperCAmelCase_ = inputs['''input_ids''']
UpperCAmelCase_ = inputs['''labels''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def lowerCAmelCase__ ( self : str ) ->Optional[Any]:
pass
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
UpperCAmelCase_ = self.tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
UpperCAmelCase_ = '''A, <mask> AllenNLP sentence.'''
UpperCAmelCase_ = tokenizer_r.encode_plus(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ )
UpperCAmelCase_ = tokenizer_p.encode_plus(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
UpperCAmelCase_ = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
UpperCAmelCase_ = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
UpperCAmelCase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
UpperCAmelCase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 43
| 1
|
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
lowercase__ : Any = {
"/attention/": "/0/SelfAttention/",
"/self_attention/": "/0/SelfAttention/",
"/encoder_decoder_attention/": "/1/EncDecAttention/",
"value": "v",
"query": "q",
"key": "k",
"out": "o",
"pre_self_attention_layer_norm": "0/layer_norm",
"pre_cross_attention_layer_norm": "1/layer_norm",
"pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong
"token_embedder": "shared",
"encoder_norm": "final_layer_norm",
"decoder_norm": "final_layer_norm",
"relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight",
"router/router_weights/w/": "router/classifier/",
"roer/roer_weights/w/": "router/classifier/",
"logits_dense": "lm_head",
}
def __lowerCamelCase ( _UpperCamelCase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ = list(s_dict.keys() )
for key in keys:
UpperCAmelCase_ = R'''.*/layers_(\d+)'''
UpperCAmelCase_ = key
if re.match(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ = re.sub(R'''layers_(\d+)''' , R'''block/\1/layer''' , _UpperCamelCase )
UpperCAmelCase_ = R'''(encoder|decoder)\/'''
if re.match(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ = re.match(_UpperCamelCase , _UpperCamelCase ).groups()
if groups[0] == "encoder":
UpperCAmelCase_ = re.sub(R'''/mlp/''' , R'''/1/mlp/''' , _UpperCamelCase )
UpperCAmelCase_ = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/1/layer_norm/''' , _UpperCamelCase )
elif groups[0] == "decoder":
UpperCAmelCase_ = re.sub(R'''/mlp/''' , R'''/2/mlp/''' , _UpperCamelCase )
UpperCAmelCase_ = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/2/layer_norm/''' , _UpperCamelCase )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
UpperCAmelCase_ = new_key.replace(_UpperCamelCase , _UpperCamelCase )
print(F"""{key} -> {new_key}""" )
UpperCAmelCase_ = s_dict.pop(_UpperCamelCase )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
UpperCAmelCase_ = s_dict[
'''encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
UpperCAmelCase_ = s_dict[
'''decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
UpperCAmelCase_ = s_dict[key].shape[0]
UpperCAmelCase_ = s_dict[key]
for idx in range(_UpperCamelCase ):
UpperCAmelCase_ = expert_weihts[idx]
print(F"""{key} -> {key.replace("expert/" , "nested fstring" )}""" )
s_dict.pop(_UpperCamelCase )
return s_dict
lowercase__ : Union[str, Any] = {
"NUM_ENCODER_LAYERS": "num_layers",
"NUM_DECODER_LAYERS": "num_decoder_layers",
"NUM_HEADS": "num_heads",
"HEAD_DIM": "d_kv",
"EMBED_DIM": "d_model",
"MLP_DIM": "d_ff",
"NUM_SELECTED_EXPERTS": "num_selected_experts",
"NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers",
"NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers",
"dense.MlpBlock.activations": "feed_forward_proj",
}
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Any ):
'''simple docstring'''
import regex as re
with open(_UpperCamelCase , '''r''' ) as f:
UpperCAmelCase_ = f.read()
UpperCAmelCase_ = re.findall(R'''(.*) = ([0-9.]*)''' , _UpperCamelCase )
UpperCAmelCase_ = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
UpperCAmelCase_ = float(_UpperCamelCase ) if '''.''' in value else int(_UpperCamelCase )
UpperCAmelCase_ = re.findall(R'''(.*activations) = \(\'(.*)\',\)''' , _UpperCamelCase )[0]
UpperCAmelCase_ = str(activation[1] )
UpperCAmelCase_ = num_experts
UpperCAmelCase_ = SwitchTransformersConfig(**_UpperCamelCase )
return config
def __lowerCamelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : str="./" , _UpperCamelCase : int=8 ):
'''simple docstring'''
print(F"""Loading flax weights from : {flax_checkpoint_path}""" )
UpperCAmelCase_ = checkpoints.load_tax_checkpoint(_UpperCamelCase )
if gin_file is not None:
UpperCAmelCase_ = convert_gin_to_config(_UpperCamelCase , _UpperCamelCase )
else:
UpperCAmelCase_ = SwitchTransformersConfig.from_pretrained(_UpperCamelCase )
UpperCAmelCase_ = SwitchTransformersForConditionalGeneration(_UpperCamelCase )
UpperCAmelCase_ = flax_params['''target''']
UpperCAmelCase_ = flatten_dict(_UpperCamelCase , sep='''/''' )
UpperCAmelCase_ = rename_keys(_UpperCamelCase )
UpperCAmelCase_ = unflatten_dict(_UpperCamelCase , sep='''/''' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(_UpperCamelCase , _UpperCamelCase )
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
pt_model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
lowercase__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"
" model architecture. If not provided, a `gin_file` has to be provided."
),
)
parser.add_argument(
"--gin_file",
default=None,
type=str,
required=False,
help="Path to the gin config file. If not provided, a `config_file` has to be passed ",
)
parser.add_argument(
"--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model."
)
parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts")
lowercase__ : Optional[int] = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 43
|
'''simple docstring'''
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : list[str] ):
'''simple docstring'''
UpperCAmelCase_ = ''''''
for word_or_phrase in separated:
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise Exception('''join() accepts only strings to be joined''' )
joined += word_or_phrase + separator
return joined.strip(_UpperCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 43
| 1
|
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : int ) ->List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowerCAmelCase__ ( self : Tuple ) ->Union[str, Any]:
UpperCAmelCase_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
UpperCAmelCase_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
UpperCAmelCase_ = '''xvjiarui/stable-diffusion-2-inpainting'''
UpperCAmelCase_ , UpperCAmelCase_ = FlaxStableDiffusionInpaintPipeline.from_pretrained(UpperCAmelCase__ , safety_checker=UpperCAmelCase__ )
UpperCAmelCase_ = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCAmelCase_ = jax.random.PRNGKey(0 )
UpperCAmelCase_ = 50
UpperCAmelCase_ = jax.device_count()
UpperCAmelCase_ = num_samples * [prompt]
UpperCAmelCase_ = num_samples * [init_image]
UpperCAmelCase_ = num_samples * [mask_image]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = pipeline.prepare_inputs(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# shard inputs and rng
UpperCAmelCase_ = replicate(UpperCAmelCase__ )
UpperCAmelCase_ = jax.random.split(UpperCAmelCase__ , jax.device_count() )
UpperCAmelCase_ = shard(UpperCAmelCase__ )
UpperCAmelCase_ = shard(UpperCAmelCase__ )
UpperCAmelCase_ = shard(UpperCAmelCase__ )
UpperCAmelCase_ = pipeline(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , jit=UpperCAmelCase__ )
UpperCAmelCase_ = output.images.reshape(UpperCAmelCase__ , 512 , 512 , 3 )
UpperCAmelCase_ = images[0, 253:256, 253:256, -1]
UpperCAmelCase_ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCAmelCase_ = jnp.array(
[0.361_1307, 0.3764_9736, 0.375_7408, 0.3821_3953, 0.3929_5167, 0.384_1631, 0.4155_4978, 0.413_7475, 0.421_7084] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 43
|
'''simple docstring'''
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowercase__ : Optional[int] = logging.get_logger(__name__)
def __lowerCamelCase ( _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[int, Iterable[int]] , _UpperCamelCase : bool , _UpperCamelCase : int ):
'''simple docstring'''
def constraint_to_multiple_of(_UpperCamelCase : int , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[int]=0 , _UpperCamelCase : str=None ):
UpperCAmelCase_ = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
UpperCAmelCase_ = math.floor(val / multiple ) * multiple
if x < min_val:
UpperCAmelCase_ = math.ceil(val / multiple ) * multiple
return x
UpperCAmelCase_ = (output_size, output_size) if isinstance(_UpperCamelCase , _UpperCamelCase ) else output_size
UpperCAmelCase_ , UpperCAmelCase_ = get_image_size(_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ = output_size
# determine new height and width
UpperCAmelCase_ = output_height / input_height
UpperCAmelCase_ = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
UpperCAmelCase_ = scale_width
else:
# fit height
UpperCAmelCase_ = scale_height
UpperCAmelCase_ = constraint_to_multiple_of(scale_height * input_height , multiple=_UpperCamelCase )
UpperCAmelCase_ = constraint_to_multiple_of(scale_width * input_width , multiple=_UpperCamelCase )
return (new_height, new_width)
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ['''pixel_values''']
def __init__( self : Any , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[int, float] = 1 / 255 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , **UpperCAmelCase__ : str , ) ->None:
super().__init__(**UpperCAmelCase__ )
UpperCAmelCase_ = size if size is not None else {'''height''': 384, '''width''': 384}
UpperCAmelCase_ = get_size_dict(UpperCAmelCase__ )
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = keep_aspect_ratio
UpperCAmelCase_ = ensure_multiple_of
UpperCAmelCase_ = resample
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase__ ( self : Union[str, Any] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : List[str] , ) ->np.ndarray:
UpperCAmelCase_ = get_size_dict(UpperCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
UpperCAmelCase_ = get_resize_output_image_size(
UpperCAmelCase__ , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=UpperCAmelCase__ , multiple=UpperCAmelCase__ , )
return resize(UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase__ ( self : int , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[int, float] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Optional[Any] , ) ->Any:
return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase__ ( self : List[Any] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[float, List[float]] , UpperCAmelCase__ : Union[float, List[float]] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Optional[int] , ) ->np.ndarray:
return normalize(UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase__ ( self : str , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : int = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : int = None , UpperCAmelCase__ : PILImageResampling = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : float = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase__ : Any , ) ->PIL.Image.Image:
UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ = size if size is not None else self.size
UpperCAmelCase_ = get_size_dict(UpperCAmelCase__ )
UpperCAmelCase_ = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
UpperCAmelCase_ = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
UpperCAmelCase_ = resample if resample is not None else self.resample
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ = image_std if image_std is not None else self.image_std
UpperCAmelCase_ = make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
UpperCAmelCase_ = [to_numpy_array(UpperCAmelCase__ ) for image in images]
if do_resize:
UpperCAmelCase_ = [self.resize(image=UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ ) for image in images]
if do_rescale:
UpperCAmelCase_ = [self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ ) for image in images]
if do_normalize:
UpperCAmelCase_ = [self.normalize(image=UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ ) for image in images]
UpperCAmelCase_ = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
UpperCAmelCase_ = {'''pixel_values''': images}
return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
def lowerCAmelCase__ ( self : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Tuple] = None ) ->Optional[Any]:
UpperCAmelCase_ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(UpperCAmelCase__ ):
UpperCAmelCase_ = target_sizes.numpy()
UpperCAmelCase_ = []
for idx in range(len(UpperCAmelCase__ ) ):
UpperCAmelCase_ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=UpperCAmelCase__ )
UpperCAmelCase_ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCAmelCase__ )
else:
UpperCAmelCase_ = logits.argmax(dim=1 )
UpperCAmelCase_ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 43
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : Tuple = logging.get_logger(__name__)
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = '''timm_backbone'''
def __init__( self : Any , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : str=3 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Union[str, Any]=None , **UpperCAmelCase__ : Optional[Any] , ) ->Dict:
super().__init__(**UpperCAmelCase__ )
UpperCAmelCase_ = backbone
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = features_only
UpperCAmelCase_ = use_pretrained_backbone
UpperCAmelCase_ = True
UpperCAmelCase_ = out_indices if out_indices is not None else (-1,)
| 43
|
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : int = logging.get_logger(__name__)
lowercase__ : List[str] = {
"huggingface/time-series-transformer-tourism-monthly": (
"https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = '''time_series_transformer'''
lowerCAmelCase__ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self : Optional[int] , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : str = "student_t" , UpperCAmelCase__ : str = "nll" , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : List[int] = [1, 2, 3, 4, 5, 6, 7] , UpperCAmelCase__ : Optional[Union[str, bool]] = "mean" , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : int = 32 , UpperCAmelCase__ : int = 32 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : str = "gelu" , UpperCAmelCase__ : int = 64 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : int = 100 , UpperCAmelCase__ : float = 0.02 , UpperCAmelCase__ : Union[str, Any]=True , **UpperCAmelCase__ : Tuple , ) ->Optional[int]:
# time series specific configuration
UpperCAmelCase_ = prediction_length
UpperCAmelCase_ = context_length or prediction_length
UpperCAmelCase_ = distribution_output
UpperCAmelCase_ = loss
UpperCAmelCase_ = input_size
UpperCAmelCase_ = num_time_features
UpperCAmelCase_ = lags_sequence
UpperCAmelCase_ = scaling
UpperCAmelCase_ = num_dynamic_real_features
UpperCAmelCase_ = num_static_real_features
UpperCAmelCase_ = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(UpperCAmelCase__ ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
UpperCAmelCase_ = cardinality
else:
UpperCAmelCase_ = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(UpperCAmelCase__ ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
UpperCAmelCase_ = embedding_dimension
else:
UpperCAmelCase_ = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
UpperCAmelCase_ = num_parallel_samples
# Transformer architecture configuration
UpperCAmelCase_ = input_size * len(UpperCAmelCase__ ) + self._number_of_features
UpperCAmelCase_ = d_model
UpperCAmelCase_ = encoder_attention_heads
UpperCAmelCase_ = decoder_attention_heads
UpperCAmelCase_ = encoder_ffn_dim
UpperCAmelCase_ = decoder_ffn_dim
UpperCAmelCase_ = encoder_layers
UpperCAmelCase_ = decoder_layers
UpperCAmelCase_ = dropout
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = activation_dropout
UpperCAmelCase_ = encoder_layerdrop
UpperCAmelCase_ = decoder_layerdrop
UpperCAmelCase_ = activation_function
UpperCAmelCase_ = init_std
UpperCAmelCase_ = use_cache
super().__init__(is_encoder_decoder=UpperCAmelCase__ , **UpperCAmelCase__ )
@property
def lowerCAmelCase__ ( self : List[str] ) ->int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 43
| 1
|
'''simple docstring'''
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'''split_dict''' , [
SplitDict(),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1337 , num_examples=42 , dataset_name='''my_dataset''' )} ),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1337 , num_examples=42 )} ),
SplitDict({'''train''': SplitInfo()} ),
] , )
def __lowerCamelCase ( _UpperCamelCase : SplitDict ):
'''simple docstring'''
UpperCAmelCase_ = split_dict._to_yaml_list()
assert len(_UpperCamelCase ) == len(_UpperCamelCase )
UpperCAmelCase_ = SplitDict._from_yaml_list(_UpperCamelCase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
UpperCAmelCase_ = None
# the split name of split_dict takes over the name of the split info object
UpperCAmelCase_ = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'''split_info''' , [SplitInfo(), SplitInfo(dataset_name=_UpperCamelCase ), SplitInfo(dataset_name='''my_dataset''' )] )
def __lowerCamelCase ( _UpperCamelCase : List[str] ):
'''simple docstring'''
UpperCAmelCase_ = asdict(SplitDict({'''train''': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 43
|
'''simple docstring'''
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
lowercase__ : Dict = logging.get_logger(__name__)
lowercase__ : List[Any] = "T5Config"
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = '''mt5'''
lowerCAmelCase__ = MTaConfig
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = '''mt5'''
lowerCAmelCase__ = MTaConfig
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = '''mt5'''
lowerCAmelCase__ = MTaConfig
| 43
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : str = logging.get_logger(__name__)
lowercase__ : Optional[Any] = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = '''openai-gpt'''
lowerCAmelCase__ = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Union[str, Any] , UpperCAmelCase__ : List[str]=4_0478 , UpperCAmelCase__ : List[Any]=512 , UpperCAmelCase__ : Dict=768 , UpperCAmelCase__ : Optional[int]=12 , UpperCAmelCase__ : Optional[int]=12 , UpperCAmelCase__ : Tuple="gelu" , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : List[Any]=1e-5 , UpperCAmelCase__ : Dict=0.02 , UpperCAmelCase__ : Any="cls_index" , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Tuple=0.1 , **UpperCAmelCase__ : List[Any] , ) ->Optional[int]:
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = n_positions
UpperCAmelCase_ = n_embd
UpperCAmelCase_ = n_layer
UpperCAmelCase_ = n_head
UpperCAmelCase_ = afn
UpperCAmelCase_ = resid_pdrop
UpperCAmelCase_ = embd_pdrop
UpperCAmelCase_ = attn_pdrop
UpperCAmelCase_ = layer_norm_epsilon
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = summary_type
UpperCAmelCase_ = summary_use_proj
UpperCAmelCase_ = summary_activation
UpperCAmelCase_ = summary_first_dropout
UpperCAmelCase_ = summary_proj_to_labels
super().__init__(**UpperCAmelCase__ )
| 43
|
'''simple docstring'''
import comet # From: unbabel-comet
import torch
import datasets
lowercase__ : str = datasets.logging.get_logger(__name__)
lowercase__ : Dict = "\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel's Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = \"{COMET}: A Neural Framework for {MT} Evaluation\",\n author = \"Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon\",\n booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",\n pages = \"2685--2702\",\n}\n"
lowercase__ : str = "\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n"
lowercase__ : str = "\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric('comet')\n >>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use\n >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]\n >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]\n >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [0.19, 0.92]\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase__ ( self : List[Any] ) ->Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''sources''': datasets.Value('''string''' , id='''sequence''' ),
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[
'''https://github.com/Unbabel/COMET''',
'''https://www.aclweb.org/anthology/2020.emnlp-main.213/''',
'''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''',
] , )
def lowerCAmelCase__ ( self : int , UpperCAmelCase__ : Union[str, Any] ) ->Any:
if self.config_name == "default":
UpperCAmelCase_ = comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da''' ) )
else:
UpperCAmelCase_ = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def lowerCAmelCase__ ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : int=False ) ->Optional[Any]:
if gpus is None:
UpperCAmelCase_ = 1 if torch.cuda.is_available() else 0
UpperCAmelCase_ = {'''src''': sources, '''mt''': predictions, '''ref''': references}
UpperCAmelCase_ = [dict(zip(UpperCAmelCase__ , UpperCAmelCase__ ) ) for t in zip(*data.values() )]
UpperCAmelCase_ , UpperCAmelCase_ = self.scorer.predict(UpperCAmelCase__ , gpus=UpperCAmelCase__ , progress_bar=UpperCAmelCase__ )
return {"mean_score": mean_score, "scores": scores}
| 43
| 1
|
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
lowercase__ : str = Lock()
def __lowerCamelCase ( _UpperCamelCase : Dict , _UpperCamelCase : int , _UpperCamelCase : Any , _UpperCamelCase : Dict , _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple , _UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(_UpperCamelCase )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
UpperCAmelCase_ = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
UpperCAmelCase_ = min(_UpperCamelCase , _UpperCamelCase )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(_UpperCamelCase )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
UpperCAmelCase_ = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
UpperCAmelCase_ = max(_UpperCamelCase , _UpperCamelCase )
# after all swaps are performed, send the values back to main
result_pipe[1].send(_UpperCamelCase )
def __lowerCamelCase ( _UpperCamelCase : int ):
'''simple docstring'''
UpperCAmelCase_ = []
UpperCAmelCase_ = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
UpperCAmelCase_ = Pipe()
UpperCAmelCase_ = Pipe()
process_array_.append(
Process(
target=_UpperCamelCase , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
UpperCAmelCase_ = temp_rs
UpperCAmelCase_ = temp_rr
for i in range(1 , len(_UpperCamelCase ) - 1 ):
UpperCAmelCase_ = Pipe()
UpperCAmelCase_ = Pipe()
process_array_.append(
Process(
target=_UpperCamelCase , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
UpperCAmelCase_ = temp_rs
UpperCAmelCase_ = temp_rr
process_array_.append(
Process(
target=_UpperCamelCase , args=(
len(_UpperCamelCase ) - 1,
arr[len(_UpperCamelCase ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(_UpperCamelCase ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(_UpperCamelCase ) ):
UpperCAmelCase_ = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def __lowerCamelCase ( ):
'''simple docstring'''
UpperCAmelCase_ = list(range(10 , 0 , -1 ) )
print('''Initial List''' )
print(*_UpperCamelCase )
UpperCAmelCase_ = odd_even_transposition(_UpperCamelCase )
print('''Sorted List\n''' )
print(*_UpperCamelCase )
if __name__ == "__main__":
main()
| 43
|
'''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : int = 32 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[int, float] = 1 / 255 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Union[float, List[float]]] = [0.4814_5466, 0.457_8275, 0.4082_1073] , UpperCAmelCase__ : Optional[Union[float, List[float]]] = [0.2686_2954, 0.2613_0258, 0.2757_7711] , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[str, Any]=7 , UpperCAmelCase__ : Optional[int]=30 , UpperCAmelCase__ : Union[str, Any]=400 , UpperCAmelCase__ : List[Any]=3 , ) ->Dict:
UpperCAmelCase_ = parent
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size if size is not None else {'''shortest_edge''': 288}
UpperCAmelCase_ = size_divisor
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = image_mean
UpperCAmelCase_ = image_std
UpperCAmelCase_ = do_pad
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
def lowerCAmelCase__ ( self : Tuple ) ->List[Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def lowerCAmelCase__ ( self : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict=False ) ->Any:
if not batched:
UpperCAmelCase_ = self.size['''shortest_edge''']
UpperCAmelCase_ = image_inputs[0]
if isinstance(UpperCAmelCase__ , Image.Image ):
UpperCAmelCase_ , UpperCAmelCase_ = image.size
else:
UpperCAmelCase_ , UpperCAmelCase_ = image.shape[1], image.shape[2]
UpperCAmelCase_ = size / min(UpperCAmelCase__ , UpperCAmelCase__ )
if h < w:
UpperCAmelCase_ , UpperCAmelCase_ = size, scale * w
else:
UpperCAmelCase_ , UpperCAmelCase_ = scale * h, size
UpperCAmelCase_ = int((1333 / 800) * size )
if max(UpperCAmelCase__ , UpperCAmelCase__ ) > max_size:
UpperCAmelCase_ = max_size / max(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = newh * scale
UpperCAmelCase_ = neww * scale
UpperCAmelCase_ , UpperCAmelCase_ = int(newh + 0.5 ), int(neww + 0.5 )
UpperCAmelCase_ , UpperCAmelCase_ = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
UpperCAmelCase_ = []
for image in image_inputs:
UpperCAmelCase_ , UpperCAmelCase_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase_ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : item[0] )[0]
UpperCAmelCase_ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCamelCase ( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = BridgeTowerImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self : Optional[int] ) ->str:
UpperCAmelCase_ = BridgeTowerImageProcessingTester(self )
@property
def lowerCAmelCase__ ( self : List[str] ) ->Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self : Optional[int] ) ->int:
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''image_std''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''size''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''size_divisor''' ) )
def lowerCAmelCase__ ( self : Optional[int] ) ->Union[str, Any]:
pass
def lowerCAmelCase__ ( self : List[str] ) ->Union[str, Any]:
# Initialize image processor
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self : Any ) ->Optional[int]:
# Initialize image processor
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self : int ) ->List[str]:
# Initialize image processor
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 43
| 1
|
'''simple docstring'''
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : List[str] ) ->List[Any]:
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = 5
# Realm tok
UpperCAmelCase_ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''test''',
'''question''',
'''this''',
'''is''',
'''the''',
'''first''',
'''second''',
'''third''',
'''fourth''',
'''fifth''',
'''record''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
UpperCAmelCase_ = os.path.join(self.tmpdirname , '''realm_tokenizer''' )
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
UpperCAmelCase_ = os.path.join(UpperCAmelCase__ , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
UpperCAmelCase_ = os.path.join(self.tmpdirname , '''realm_block_records''' )
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Dict ) ->RealmTokenizer:
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) )
def lowerCAmelCase__ ( self : Optional[int] ) ->List[Any]:
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self : str ) ->Any:
UpperCAmelCase_ = RealmConfig(num_block_records=self.num_block_records )
return config
def lowerCAmelCase__ ( self : Dict ) ->Optional[int]:
UpperCAmelCase_ = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''question''': ['''foo''', '''bar'''],
'''answers''': [['''Foo''', '''Bar'''], ['''Bar''']],
} )
return dataset
def lowerCAmelCase__ ( self : str ) ->Any:
UpperCAmelCase_ = np.array(
[
B'''This is the first record''',
B'''This is the second record''',
B'''This is the third record''',
B'''This is the fourth record''',
B'''This is the fifth record''',
B'''This is a longer longer longer record''',
] , dtype=UpperCAmelCase__ , )
return block_records
def lowerCAmelCase__ ( self : Tuple ) ->Union[str, Any]:
UpperCAmelCase_ = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def lowerCAmelCase__ ( self : Any ) ->Union[str, Any]:
UpperCAmelCase_ = self.get_config()
UpperCAmelCase_ = self.get_dummy_retriever()
UpperCAmelCase_ = retriever.tokenizer
UpperCAmelCase_ = np.array([0, 3] , dtype='''long''' )
UpperCAmelCase_ = tokenizer(['''Test question'''] ).input_ids
UpperCAmelCase_ = tokenizer(
['''the fourth'''] , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , ).input_ids
UpperCAmelCase_ = config.reader_seq_len
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = retriever(
UpperCAmelCase__ , UpperCAmelCase__ , answer_ids=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors='''np''' )
self.assertEqual(len(UpperCAmelCase__ ) , 2 )
self.assertEqual(len(UpperCAmelCase__ ) , 2 )
self.assertEqual(len(UpperCAmelCase__ ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , )
def lowerCAmelCase__ ( self : List[str] ) ->List[str]:
UpperCAmelCase_ = self.get_config()
UpperCAmelCase_ = self.get_dummy_retriever()
UpperCAmelCase_ = retriever.tokenizer
UpperCAmelCase_ = np.array([0, 3, 5] , dtype='''long''' )
UpperCAmelCase_ = tokenizer(['''Test question'''] ).input_ids
UpperCAmelCase_ = tokenizer(
['''the fourth''', '''longer longer'''] , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , ).input_ids
UpperCAmelCase_ = config.reader_seq_len
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = retriever(
UpperCAmelCase__ , UpperCAmelCase__ , answer_ids=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors='''np''' )
self.assertEqual([False, True, True] , UpperCAmelCase__ )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , UpperCAmelCase__ )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Union[str, Any]:
UpperCAmelCase_ = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
# Test local path
UpperCAmelCase_ = retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
self.assertEqual(retriever.block_records[0] , B'''This is the first record''' )
# Test mocked remote path
with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download:
UpperCAmelCase_ = os.path.join(
os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME )
UpperCAmelCase_ = RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' )
self.assertEqual(retriever.block_records[0] , B'''This is the first record''' )
| 43
|
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Dict ) ->List[str]:
UpperCAmelCase_ = tf.convert_to_tensor(
[
[
8.222_0991, # 3rd highest value; idx. 0
-0.562_0044,
5.2322_9752,
4.038_6393,
-6.879_8378,
-0.5478_5802,
-3.201_2153,
2.9277_7176,
1.8817_1953,
7.3534_1276, # 5th highest value; idx. 9
8.4320_7833, # 2nd highest value; idx. 10
-9.8571_1836,
-5.9620_9236,
-1.1303_9161,
-7.111_5294,
-0.836_9633,
-5.318_6408,
7.0642_7407,
0.8136_9344,
-0.8202_3817,
-5.917_9796,
0.5881_3443,
-6.9977_8438,
4.7155_1189,
-0.1877_1637,
7.4402_0759, # 4th highest value; idx. 25
9.3845_0987, # 1st highest value; idx. 26
2.1266_2941,
-9.3256_2038,
2.3565_2522,
], # cummulative prob of 5 highest values <= 0.6
[
0.5842_5518,
4.5313_9238,
-5.5751_0464,
-6.2803_0699,
-7.1952_9503,
-4.0212_2551,
1.3933_7037,
-6.0670_7057,
1.5948_0517,
-9.64_3119,
0.0390_7799,
0.6723_1762,
-8.8820_6726,
6.2711_5922, # 4th highest value; idx. 13
2.2852_0723,
4.8276_7506,
4.3042_1368,
8.827_5313, # 2nd highest value; idx. 17
5.4402_9958, # 5th highest value; idx. 18
-4.473_5794,
7.3857_9536, # 3rd highest value; idx. 20
-2.9105_1663,
2.6194_6077,
-2.567_4762,
-9.4895_9302,
-4.0292_2645,
-1.3541_6918,
9.6770_2323, # 1st highest value; idx. 27
-5.8947_8553,
1.8537_0467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
UpperCAmelCase_ = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
UpperCAmelCase_ = tf.convert_to_tensor(
[8.22_2099, 7.353_4126, 8.43_2078, 7.440_2075, 9.3_8451, 6.27_1159, 8.82_7531, 5.440_2995, 7.385_7956, 9.67_7023] , dtype=tf.floataa , ) # expected non filtered values as noted above
UpperCAmelCase_ = tf_top_k_top_p_filtering(UpperCAmelCase__ , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
UpperCAmelCase_ = output[output != -float('''inf''' )]
UpperCAmelCase_ = tf.cast(
tf.where(tf.not_equal(UpperCAmelCase__ , tf.constant(-float('''inf''' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(UpperCAmelCase__ , UpperCAmelCase__ , rtol=1e-12 )
tf.debugging.assert_equal(UpperCAmelCase__ , UpperCAmelCase__ )
@require_tf
class lowerCamelCase ( unittest.TestCase , lowerCamelCase ):
'''simple docstring'''
if is_tf_available():
lowerCAmelCase__ = {
'''AutoModelForCausalLM''': TFAutoModelForCausalLM,
'''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq,
'''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM,
'''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq,
'''LogitsProcessorList''': TFLogitsProcessorList,
'''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor,
'''create_tensor_fn''': tf.convert_to_tensor,
'''floats_tensor''': floats_tensor,
'''return_tensors''': '''tf''',
}
@slow
def lowerCAmelCase__ ( self : Dict ) ->Optional[Any]:
# TF-only test: tf.saved_model export
UpperCAmelCase_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
UpperCAmelCase_ = 2
UpperCAmelCase_ = 2
class lowerCamelCase ( tf.Module ):
'''simple docstring'''
def __init__( self : List[str] , UpperCAmelCase__ : List[str] ) ->Dict:
super(UpperCAmelCase__ , self ).__init__()
UpperCAmelCase_ = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((None, input_length) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=UpperCAmelCase__ , )
def lowerCAmelCase__ ( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] ) ->int:
UpperCAmelCase_ = self.model.generate(
input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ , return_dict_in_generate=UpperCAmelCase__ , )
return {"sequences": outputs["sequences"]}
UpperCAmelCase_ = [[2, 0], [102, 103]]
UpperCAmelCase_ = [[1, 0], [1, 1]]
UpperCAmelCase_ = DummyModel(model=UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(UpperCAmelCase__ , UpperCAmelCase__ , signatures={'''serving_default''': dummy_model.serving} )
UpperCAmelCase_ = tf.saved_model.load(UpperCAmelCase__ ).signatures['''serving_default''']
for batch_size in range(1 , len(UpperCAmelCase__ ) + 1 ):
UpperCAmelCase_ = {
'''input_ids''': tf.constant(dummy_input_ids[:batch_size] ),
'''attention_mask''': tf.constant(dummy_attention_masks[:batch_size] ),
}
UpperCAmelCase_ = serving_func(**UpperCAmelCase__ )['''sequences''']
UpperCAmelCase_ = test_model.generate(**UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ )
tf.debugging.assert_equal(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def lowerCAmelCase__ ( self : Tuple ) ->Optional[int]:
# TF-only test: tf.saved_model export
UpperCAmelCase_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
UpperCAmelCase_ = 1
UpperCAmelCase_ = 2
class lowerCamelCase ( tf.Module ):
'''simple docstring'''
def __init__( self : int , UpperCAmelCase__ : int ) ->List[str]:
super(UpperCAmelCase__ , self ).__init__()
UpperCAmelCase_ = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((batch_size, None) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=UpperCAmelCase__ , )
def lowerCAmelCase__ ( self : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] ) ->int:
UpperCAmelCase_ = self.model.generate(
input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ , return_dict_in_generate=UpperCAmelCase__ , )
return {"sequences": outputs["sequences"]}
UpperCAmelCase_ = [[2], [102, 103]]
UpperCAmelCase_ = [[1], [1, 1]]
UpperCAmelCase_ = DummyModel(model=UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(UpperCAmelCase__ , UpperCAmelCase__ , signatures={'''serving_default''': dummy_model.serving} )
UpperCAmelCase_ = tf.saved_model.load(UpperCAmelCase__ ).signatures['''serving_default''']
for input_row in range(len(UpperCAmelCase__ ) ):
UpperCAmelCase_ = {
'''input_ids''': tf.constant([dummy_input_ids[input_row]] ),
'''attention_mask''': tf.constant([dummy_attention_masks[input_row]] ),
}
UpperCAmelCase_ = serving_func(**UpperCAmelCase__ )['''sequences''']
UpperCAmelCase_ = test_model.generate(**UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ )
tf.debugging.assert_equal(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
@require_tensorflow_text
def lowerCAmelCase__ ( self : Optional[Any] ) ->int:
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='''google/flan-t5-small''' , filename='''spiece.model''' , local_dir=UpperCAmelCase__ )
class lowerCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : List[str] ) ->Any:
super().__init__()
UpperCAmelCase_ = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(UpperCAmelCase__ , '''spiece.model''' ) , '''rb''' ).read() )
UpperCAmelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
def lowerCAmelCase__ ( self : Any , UpperCAmelCase__ : Union[str, Any] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : str ) ->List[str]:
UpperCAmelCase_ = self.tokenizer.tokenize(UpperCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = text.pad_model_inputs(
UpperCAmelCase__ , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
UpperCAmelCase_ = self.model.generate(input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
return self.tokenizer.detokenize(UpperCAmelCase__ )
UpperCAmelCase_ = CompleteSentenceTransformer()
UpperCAmelCase_ = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='''inputs''' )
UpperCAmelCase_ = complete_model(UpperCAmelCase__ )
UpperCAmelCase_ = tf.keras.Model(UpperCAmelCase__ , UpperCAmelCase__ )
keras_model.save(UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Optional[int] ) ->Tuple:
# Has PT equivalent: this test relies on random sampling
UpperCAmelCase_ = {
'''do_sample''': True,
'''num_beams''': 1,
'''top_p''': 0.7,
'''top_k''': 10,
'''temperature''': 0.7,
}
UpperCAmelCase_ = 14
UpperCAmelCase_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
UpperCAmelCase_ = '''Hello, my dog is cute and'''
UpperCAmelCase_ = tokenizer(UpperCAmelCase__ , return_tensors='''tf''' )
UpperCAmelCase_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
UpperCAmelCase_ = 638
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
UpperCAmelCase_ = model.generate(**UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
UpperCAmelCase_ = [638, 198]
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
UpperCAmelCase_ = model.generate(**UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def lowerCAmelCase__ ( self : Optional[int] ) ->List[str]:
# Has PT equivalent: ample use of framework-specific code
UpperCAmelCase_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
UpperCAmelCase_ = '''Hugging Face is a technology company based in New York and Paris.'''
UpperCAmelCase_ = bart_tokenizer(UpperCAmelCase__ , return_tensors='''tf''' ).input_ids
UpperCAmelCase_ = TFBartForConditionalGeneration.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
UpperCAmelCase_ = bart_model.generate(UpperCAmelCase__ ).numpy()
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any]=None , **UpperCAmelCase__ : int ) ->List[str]:
return super().call(UpperCAmelCase__ , **UpperCAmelCase__ )
UpperCAmelCase_ = FakeBart.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
UpperCAmelCase_ = bart_model.generate(UpperCAmelCase__ , foo='''bar''' ).numpy()
self.assertTrue(np.array_equal(UpperCAmelCase__ , UpperCAmelCase__ ) )
class lowerCamelCase ( bart_model.model.encoder.__class__ ):
'''simple docstring'''
def lowerCAmelCase__ ( self : str , UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Dict ) ->Any:
return super().call(UpperCAmelCase__ , **UpperCAmelCase__ )
UpperCAmelCase_ = FakeEncoder(bart_model.config , bart_model.model.shared )
UpperCAmelCase_ = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
UpperCAmelCase_ = bart_model.generate(UpperCAmelCase__ ).numpy()
with self.assertRaises(UpperCAmelCase__ ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(UpperCAmelCase__ , foo='''bar''' )
| 43
| 1
|
'''simple docstring'''
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Union[str, Any] ) ->List[Any]:
UpperCAmelCase_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
UpperCAmelCase_ = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(UpperCAmelCase__ )
UpperCAmelCase_ = -1
UpperCAmelCase_ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCAmelCase__ )
UpperCAmelCase_ = model.generate(UpperCAmelCase__ , max_new_tokens=10 , do_sample=UpperCAmelCase__ )
UpperCAmelCase_ = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
UpperCAmelCase_ = TextStreamer(UpperCAmelCase__ )
model.generate(UpperCAmelCase__ , max_new_tokens=10 , do_sample=UpperCAmelCase__ , streamer=UpperCAmelCase__ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
UpperCAmelCase_ = cs.out[:-1]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Tuple ) ->int:
UpperCAmelCase_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
UpperCAmelCase_ = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(UpperCAmelCase__ )
UpperCAmelCase_ = -1
UpperCAmelCase_ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCAmelCase__ )
UpperCAmelCase_ = model.generate(UpperCAmelCase__ , max_new_tokens=10 , do_sample=UpperCAmelCase__ )
UpperCAmelCase_ = tokenizer.decode(greedy_ids[0] )
UpperCAmelCase_ = TextIteratorStreamer(UpperCAmelCase__ )
UpperCAmelCase_ = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
UpperCAmelCase_ = Thread(target=model.generate , kwargs=UpperCAmelCase__ )
thread.start()
UpperCAmelCase_ = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase__ ( self : int ) ->Any:
UpperCAmelCase_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
UpperCAmelCase_ = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(UpperCAmelCase__ )
UpperCAmelCase_ = -1
UpperCAmelCase_ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCAmelCase__ )
UpperCAmelCase_ = model.generate(UpperCAmelCase__ , max_new_tokens=10 , do_sample=UpperCAmelCase__ )
UpperCAmelCase_ = greedy_ids[:, input_ids.shape[1] :]
UpperCAmelCase_ = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
UpperCAmelCase_ = TextStreamer(UpperCAmelCase__ , skip_prompt=UpperCAmelCase__ )
model.generate(UpperCAmelCase__ , max_new_tokens=10 , do_sample=UpperCAmelCase__ , streamer=UpperCAmelCase__ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
UpperCAmelCase_ = cs.out[:-1]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->List[Any]:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
UpperCAmelCase_ = AutoTokenizer.from_pretrained('''distilgpt2''' )
UpperCAmelCase_ = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(UpperCAmelCase__ )
UpperCAmelCase_ = -1
UpperCAmelCase_ = torch.ones((1, 5) , device=UpperCAmelCase__ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
UpperCAmelCase_ = TextStreamer(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )
model.generate(UpperCAmelCase__ , max_new_tokens=1 , do_sample=UpperCAmelCase__ , streamer=UpperCAmelCase__ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
UpperCAmelCase_ = cs.out[:-1] # Remove the final "\n"
UpperCAmelCase_ = tokenizer(UpperCAmelCase__ , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def lowerCAmelCase__ ( self : List[str] ) ->str:
UpperCAmelCase_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
UpperCAmelCase_ = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(UpperCAmelCase__ )
UpperCAmelCase_ = -1
UpperCAmelCase_ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCAmelCase__ )
UpperCAmelCase_ = TextIteratorStreamer(UpperCAmelCase__ , timeout=0.001 )
UpperCAmelCase_ = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
UpperCAmelCase_ = Thread(target=model.generate , kwargs=UpperCAmelCase__ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(UpperCAmelCase__ ):
UpperCAmelCase_ = ''''''
for new_text in streamer:
streamer_text += new_text
| 43
|
'''simple docstring'''
from collections.abc import Callable
def __lowerCamelCase ( _UpperCamelCase : Callable[[float], float] , _UpperCamelCase : float , _UpperCamelCase : float ):
'''simple docstring'''
UpperCAmelCase_ = a
UpperCAmelCase_ = b
if function(_UpperCamelCase ) == 0: # one of the a or b is a root for the function
return a
elif function(_UpperCamelCase ) == 0:
return b
elif (
function(_UpperCamelCase ) * function(_UpperCamelCase ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('''could not find root in given interval.''' )
else:
UpperCAmelCase_ = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(_UpperCamelCase ) == 0:
return mid
elif function(_UpperCamelCase ) * function(_UpperCamelCase ) < 0:
UpperCAmelCase_ = mid
else:
UpperCAmelCase_ = mid
UpperCAmelCase_ = start + (end - start) / 2.0
return mid
def __lowerCamelCase ( _UpperCamelCase : float ):
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 43
| 1
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
lowercase__ : List[Any] = logging.getLogger(__name__)
@dataclass
class lowerCamelCase :
'''simple docstring'''
lowerCAmelCase__ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
lowerCAmelCase__ = field(
default=lowerCamelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowerCAmelCase__ = field(
default=lowerCamelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
lowerCAmelCase__ = field(
default=lowerCamelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
lowerCAmelCase__ = field(default=lowerCamelCase , metadata={'''help''': '''Whether tp freeze the encoder.'''} )
lowerCAmelCase__ = field(default=lowerCamelCase , metadata={'''help''': '''Whether to freeze the embeddings.'''} )
@dataclass
class lowerCamelCase :
'''simple docstring'''
lowerCAmelCase__ = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
lowerCAmelCase__ = field(
default='''summarization''' , metadata={'''help''': '''Task name, summarization (or summarization_{dataset} for pegasus) or translation'''} , )
lowerCAmelCase__ = field(
default=10_24 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
lowerCAmelCase__ = field(
default=1_28 , metadata={
'''help''': (
'''The maximum total sequence length for target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
lowerCAmelCase__ = field(
default=1_42 , metadata={
'''help''': (
'''The maximum total sequence length for validation target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded. '''
'''This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '''
'''during ``evaluate`` and ``predict``.'''
)
} , )
lowerCAmelCase__ = field(
default=1_42 , metadata={
'''help''': (
'''The maximum total sequence length for test target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
lowerCAmelCase__ = field(default=-1 , metadata={'''help''': '''# training examples. -1 means use all.'''} )
lowerCAmelCase__ = field(default=-1 , metadata={'''help''': '''# validation examples. -1 means use all.'''} )
lowerCAmelCase__ = field(default=-1 , metadata={'''help''': '''# test examples. -1 means use all.'''} )
lowerCAmelCase__ = field(default=lowerCamelCase , metadata={'''help''': '''Source language id for translation.'''} )
lowerCAmelCase__ = field(default=lowerCamelCase , metadata={'''help''': '''Target language id for translation.'''} )
lowerCAmelCase__ = field(default=lowerCamelCase , metadata={'''help''': '''# num_beams to use for evaluation.'''} )
lowerCAmelCase__ = field(
default=lowerCamelCase , metadata={'''help''': '''If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'''} , )
def __lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : int , _UpperCamelCase : Any ):
'''simple docstring'''
logger.info(F"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(F""" {key} = {metrics[key]}""" )
save_json(_UpperCamelCase , os.path.join(_UpperCamelCase , F"""{split}_results.json""" ) )
def __lowerCamelCase ( ):
'''simple docstring'''
UpperCAmelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = parser.parse_args_into_dataclasses()
check_output_dir(_UpperCamelCase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , _UpperCamelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCAmelCase_ = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
assert hasattr(_UpperCamelCase , _UpperCamelCase ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(_UpperCamelCase , _UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) )
UpperCAmelCase_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCAmelCase_ = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=_UpperCamelCase , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(_UpperCamelCase , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
UpperCAmelCase_ = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_UpperCamelCase , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(_UpperCamelCase )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
UpperCAmelCase_ = SeqaSeqDataset
# Get datasets
UpperCAmelCase_ = (
dataset_class(
_UpperCamelCase , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_train
else None
)
UpperCAmelCase_ = (
dataset_class(
_UpperCamelCase , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
UpperCAmelCase_ = (
dataset_class(
_UpperCamelCase , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
UpperCAmelCase_ = (
build_compute_metrics_fn(data_args.task , _UpperCamelCase ) if training_args.predict_with_generate else None
)
UpperCAmelCase_ = SeqaSeqTrainer(
model=_UpperCamelCase , args=_UpperCamelCase , data_args=_UpperCamelCase , train_dataset=_UpperCamelCase , eval_dataset=_UpperCamelCase , data_collator=SeqaSeqDataCollator(
_UpperCamelCase , _UpperCamelCase , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_UpperCamelCase , tokenizer=_UpperCamelCase , )
UpperCAmelCase_ = {}
# Training
if training_args.do_train:
logger.info('''*** Train ***''' )
UpperCAmelCase_ = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
UpperCAmelCase_ = train_result.metrics
UpperCAmelCase_ = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('''train''' , _UpperCamelCase , training_args.output_dir )
all_metrics.update(_UpperCamelCase )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCAmelCase_ = trainer.evaluate(metric_key_prefix='''val''' )
UpperCAmelCase_ = data_args.n_val
UpperCAmelCase_ = round(metrics['''val_loss'''] , 4 )
if trainer.is_world_process_zero():
handle_metrics('''val''' , _UpperCamelCase , training_args.output_dir )
all_metrics.update(_UpperCamelCase )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
UpperCAmelCase_ = trainer.predict(test_dataset=_UpperCamelCase , metric_key_prefix='''test''' )
UpperCAmelCase_ = test_output.metrics
UpperCAmelCase_ = data_args.n_test
if trainer.is_world_process_zero():
UpperCAmelCase_ = round(metrics['''test_loss'''] , 4 )
handle_metrics('''test''' , _UpperCamelCase , training_args.output_dir )
all_metrics.update(_UpperCamelCase )
if training_args.predict_with_generate:
UpperCAmelCase_ = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase )
UpperCAmelCase_ = lmap(str.strip , _UpperCamelCase )
write_txt_file(_UpperCamelCase , os.path.join(training_args.output_dir , '''test_generations.txt''' ) )
if trainer.is_world_process_zero():
save_json(_UpperCamelCase , os.path.join(training_args.output_dir , '''all_results.json''' ) )
return all_metrics
def __lowerCamelCase ( _UpperCamelCase : str ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 43
|
'''simple docstring'''
import re
def __lowerCamelCase ( _UpperCamelCase : str ):
'''simple docstring'''
return [char.split() for char in re.split(R'''[^ a-z A-Z 0-9 \s]''' , str_ )]
def __lowerCamelCase ( _UpperCamelCase : str ):
'''simple docstring'''
UpperCAmelCase_ = split_input(str_ )
return "".join(
[''''''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : bool , _UpperCamelCase : str ):
'''simple docstring'''
try:
UpperCAmelCase_ = split_input(_UpperCamelCase )
if upper:
UpperCAmelCase_ = ''''''.join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
UpperCAmelCase_ = ''''''.join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def __lowerCamelCase ( _UpperCamelCase : str ):
'''simple docstring'''
return to_simple_case(_UpperCamelCase )
def __lowerCamelCase ( _UpperCamelCase : str ):
'''simple docstring'''
try:
UpperCAmelCase_ = to_simple_case(_UpperCamelCase )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : bool ):
'''simple docstring'''
return to_complex_case(_UpperCamelCase , _UpperCamelCase , '''_''' )
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : bool ):
'''simple docstring'''
return to_complex_case(_UpperCamelCase , _UpperCamelCase , '''-''' )
if __name__ == "__main__":
__import__("doctest").testmod()
| 43
| 1
|
'''simple docstring'''
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 43
|
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowercase__ : Optional[Any] = "platform"
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def __lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Any , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : Any=None , _UpperCamelCase : int=None , _UpperCamelCase : int=None , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : Any=None , ):
'''simple docstring'''
if attention_mask is None:
UpperCAmelCase_ = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
UpperCAmelCase_ = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
UpperCAmelCase_ = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase_ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase_ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCamelCase :
'''simple docstring'''
def __init__( self : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple=13 , UpperCAmelCase__ : Tuple=7 , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Optional[int]=99 , UpperCAmelCase__ : Dict=16 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : Dict=4 , UpperCAmelCase__ : str=4 , UpperCAmelCase__ : int="gelu" , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : List[Any]=32 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : Dict=1 , UpperCAmelCase__ : Optional[int]=0 , UpperCAmelCase__ : Union[str, Any]=0.02 , ) ->Optional[int]:
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = eos_token_id
UpperCAmelCase_ = pad_token_id
UpperCAmelCase_ = bos_token_id
UpperCAmelCase_ = initializer_range
def lowerCAmelCase__ ( self : int ) ->Any:
UpperCAmelCase_ = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
UpperCAmelCase_ = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
UpperCAmelCase_ = shift_tokens_right(UpperCAmelCase__ , 1 , 2 )
UpperCAmelCase_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=UpperCAmelCase__ , )
UpperCAmelCase_ = prepare_blenderbot_inputs_dict(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return config, inputs_dict
def lowerCAmelCase__ ( self : Union[str, Any] ) ->List[str]:
UpperCAmelCase_ , UpperCAmelCase_ = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase__ ( self : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple ) ->Tuple:
UpperCAmelCase_ = 20
UpperCAmelCase_ = model_class_name(UpperCAmelCase__ )
UpperCAmelCase_ = model.encode(inputs_dict['''input_ids'''] )
UpperCAmelCase_ , UpperCAmelCase_ = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
UpperCAmelCase_ = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
UpperCAmelCase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase_ = model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , decoder_position_ids=UpperCAmelCase__ , )
UpperCAmelCase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
UpperCAmelCase_ = model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCAmelCase__ , )
UpperCAmelCase_ = model.decode(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def lowerCAmelCase__ ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any ) ->Union[str, Any]:
UpperCAmelCase_ = 20
UpperCAmelCase_ = model_class_name(UpperCAmelCase__ )
UpperCAmelCase_ = model.encode(inputs_dict['''input_ids'''] )
UpperCAmelCase_ , UpperCAmelCase_ = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
UpperCAmelCase_ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
UpperCAmelCase_ = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase_ = model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , decoder_position_ids=UpperCAmelCase__ , )
UpperCAmelCase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
UpperCAmelCase_ = model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCAmelCase__ , decoder_position_ids=UpperCAmelCase__ , )
UpperCAmelCase_ = model.decode(UpperCAmelCase__ , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ )
UpperCAmelCase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = 99
def lowerCAmelCase__ ( self : List[str] ) ->Optional[Any]:
UpperCAmelCase_ = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
UpperCAmelCase_ = input_ids.shape[0]
UpperCAmelCase_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def lowerCAmelCase__ ( self : Any ) ->str:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self._get_config_and_data()
UpperCAmelCase_ = FlaxBlenderbotForConditionalGeneration(UpperCAmelCase__ )
UpperCAmelCase_ = lm_model(input_ids=UpperCAmelCase__ )
UpperCAmelCase_ = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , UpperCAmelCase__ )
def lowerCAmelCase__ ( self : str ) ->int:
UpperCAmelCase_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
UpperCAmelCase_ = FlaxBlenderbotForConditionalGeneration(UpperCAmelCase__ )
UpperCAmelCase_ = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
UpperCAmelCase_ = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
UpperCAmelCase_ = lm_model(input_ids=UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ )
UpperCAmelCase_ = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->List[Any]:
UpperCAmelCase_ = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
UpperCAmelCase_ = shift_tokens_right(UpperCAmelCase__ , 1 , 2 )
UpperCAmelCase_ = np.equal(UpperCAmelCase__ , 1 ).astype(np.floataa ).sum()
UpperCAmelCase_ = np.equal(UpperCAmelCase__ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(UpperCAmelCase__ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowerCamelCase ( lowerCamelCase , unittest.TestCase , lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = True
lowerCAmelCase__ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowerCAmelCase__ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def lowerCAmelCase__ ( self : Optional[int] ) ->List[Any]:
UpperCAmelCase_ = FlaxBlenderbotModelTester(self )
def lowerCAmelCase__ ( self : str ) ->Tuple:
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Tuple ) ->str:
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Dict ) ->Tuple:
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = model_class(UpperCAmelCase__ )
@jax.jit
def encode_jitted(UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any]=None , **UpperCAmelCase__ : Union[str, Any] ):
return model.encode(input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase_ = encode_jitted(**UpperCAmelCase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase_ = encode_jitted(**UpperCAmelCase__ ).to_tuple()
self.assertEqual(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) )
for jitted_output, output in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCAmelCase__ ( self : str ) ->str:
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ = model_class(UpperCAmelCase__ )
UpperCAmelCase_ = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
UpperCAmelCase_ = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int ):
return model.decode(
decoder_input_ids=UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , encoder_outputs=UpperCAmelCase__ , )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase_ = decode_jitted(**UpperCAmelCase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase_ = decode_jitted(**UpperCAmelCase__ ).to_tuple()
self.assertEqual(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) )
for jitted_output, output in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCAmelCase__ ( self : int ) ->int:
for model_class_name in self.all_model_classes:
UpperCAmelCase_ = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
UpperCAmelCase_ = np.ones((1, 1) ) * model.config.eos_token_id
UpperCAmelCase_ = model(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' )
@slow
def lowerCAmelCase__ ( self : Dict ) ->Optional[Any]:
UpperCAmelCase_ = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25}
UpperCAmelCase_ = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True}
UpperCAmelCase_ = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=UpperCAmelCase__ )
UpperCAmelCase_ = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' )
UpperCAmelCase_ = ['''Sam''']
UpperCAmelCase_ = tokenizer(UpperCAmelCase__ , return_tensors='''jax''' )
UpperCAmelCase_ = model.generate(**UpperCAmelCase__ , **UpperCAmelCase__ )
UpperCAmelCase_ = '''Sam is a great name. It means "sun" in Gaelic.'''
UpperCAmelCase_ = tokenizer.batch_decode(UpperCAmelCase__ , **UpperCAmelCase__ )
assert generated_txt[0].strip() == tgt_text
| 43
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ : Any = {
"configuration_convbert": ["CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvBertConfig", "ConvBertOnnxConfig"],
"tokenization_convbert": ["ConvBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Optional[Any] = ["ConvBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : List[Any] = [
"CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvBertForMaskedLM",
"ConvBertForMultipleChoice",
"ConvBertForQuestionAnswering",
"ConvBertForSequenceClassification",
"ConvBertForTokenClassification",
"ConvBertLayer",
"ConvBertModel",
"ConvBertPreTrainedModel",
"load_tf_weights_in_convbert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : str = [
"TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFConvBertForMaskedLM",
"TFConvBertForMultipleChoice",
"TFConvBertForQuestionAnswering",
"TFConvBertForSequenceClassification",
"TFConvBertForTokenClassification",
"TFConvBertLayer",
"TFConvBertModel",
"TFConvBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
lowercase__ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 43
|
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowercase__ : Tuple = pytest.mark.integration
@pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] )
def __lowerCamelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : int ):
'''simple docstring'''
inspect_dataset(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ = path + '''.py'''
assert script_name in os.listdir(_UpperCamelCase )
assert "__pycache__" not in os.listdir(_UpperCamelCase )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' , ['''accuracy'''] )
def __lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Optional[Any] ):
'''simple docstring'''
inspect_metric(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ = path + '''.py'''
assert script_name in os.listdir(_UpperCamelCase )
assert "__pycache__" not in os.listdir(_UpperCamelCase )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def __lowerCamelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any , _UpperCamelCase : int ):
'''simple docstring'''
UpperCAmelCase_ = get_dataset_config_info(_UpperCamelCase , config_name=_UpperCamelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def __lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
with pytest.raises(_UpperCamelCase ):
get_dataset_config_info(_UpperCamelCase , config_name=_UpperCamelCase )
@pytest.mark.parametrize(
'''path, expected''' , [
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] , )
def __lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ = get_dataset_config_names(_UpperCamelCase )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' , [
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] , )
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : int ):
'''simple docstring'''
UpperCAmelCase_ = get_dataset_infos(_UpperCamelCase )
assert list(infos.keys() ) == expected_configs
UpperCAmelCase_ = expected_configs[0]
assert expected_config in infos
UpperCAmelCase_ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : int , _UpperCamelCase : str ):
'''simple docstring'''
UpperCAmelCase_ = get_dataset_infos(_UpperCamelCase )
assert expected_config in infos
UpperCAmelCase_ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def __lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[int] ):
'''simple docstring'''
with pytest.raises(_UpperCamelCase ):
get_dataset_split_names(_UpperCamelCase , config_name=_UpperCamelCase )
| 43
| 1
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
lowercase__ : Tuple = logging.get_logger(__name__)
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = '''upernet'''
def __init__( self : Optional[Any] , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : List[str]=512 , UpperCAmelCase__ : List[Any]=0.02 , UpperCAmelCase__ : Any=[1, 2, 3, 6] , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Optional[Any]=0.4 , UpperCAmelCase__ : List[Any]=384 , UpperCAmelCase__ : Optional[Any]=256 , UpperCAmelCase__ : Dict=1 , UpperCAmelCase__ : Optional[int]=False , UpperCAmelCase__ : List[str]=255 , **UpperCAmelCase__ : Optional[Any] , ) ->Optional[int]:
super().__init__(**UpperCAmelCase__ )
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
UpperCAmelCase_ = CONFIG_MAPPING['''resnet'''](out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
UpperCAmelCase_ = backbone_config.get('''model_type''' )
UpperCAmelCase_ = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase_ = config_class.from_dict(UpperCAmelCase__ )
UpperCAmelCase_ = backbone_config
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = pool_scales
UpperCAmelCase_ = use_auxiliary_head
UpperCAmelCase_ = auxiliary_loss_weight
UpperCAmelCase_ = auxiliary_in_channels
UpperCAmelCase_ = auxiliary_channels
UpperCAmelCase_ = auxiliary_num_convs
UpperCAmelCase_ = auxiliary_concat_input
UpperCAmelCase_ = loss_ignore_index
def lowerCAmelCase__ ( self : Optional[Any] ) ->Tuple:
UpperCAmelCase_ = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ = self.backbone_config.to_dict()
UpperCAmelCase_ = self.__class__.model_type
return output
| 43
|
'''simple docstring'''
import collections
import os
import re
from pathlib import Path
lowercase__ : List[Any] = "src/transformers"
# Matches is_xxx_available()
lowercase__ : Optional[Any] = re.compile(R"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
lowercase__ : Any = re.compile(R"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowercase__ : Union[str, Any] = re.compile(R"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
lowercase__ : Optional[int] = re.compile(R"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
lowercase__ : List[str] = re.compile(R"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowercase__ : Any = re.compile(R"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
lowercase__ : List[Any] = re.compile(R"^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
lowercase__ : Optional[Any] = re.compile(R"^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
lowercase__ : Union[str, Any] = re.compile(R"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
lowercase__ : int = re.compile(R"^\s*try:")
# Catches a line with else:
lowercase__ : Any = re.compile(R"^\s*else:")
def __lowerCamelCase ( _UpperCamelCase : Optional[Any] ):
'''simple docstring'''
if _re_test_backend.search(_UpperCamelCase ) is None:
return None
UpperCAmelCase_ = [b[0] for b in _re_backend.findall(_UpperCamelCase )]
backends.sort()
return "_and_".join(_UpperCamelCase )
def __lowerCamelCase ( _UpperCamelCase : int ):
'''simple docstring'''
with open(_UpperCamelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase_ = f.readlines()
UpperCAmelCase_ = 0
while line_index < len(_UpperCamelCase ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_UpperCamelCase ):
return None
# First grab the objects without a specific backend in _import_structure
UpperCAmelCase_ = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
UpperCAmelCase_ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_UpperCamelCase ):
UpperCAmelCase_ = _re_one_line_import_struct.search(_UpperCamelCase ).groups()[0]
UpperCAmelCase_ = re.findall(R'''\[([^\]]+)\]''' , _UpperCamelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
UpperCAmelCase_ = _re_import_struct_key_value.search(_UpperCamelCase )
if single_line_import_search is not None:
UpperCAmelCase_ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(_UpperCamelCase ) > 0]
objects.extend(_UpperCamelCase )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
UpperCAmelCase_ = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
UpperCAmelCase_ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCAmelCase_ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCAmelCase_ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
UpperCAmelCase_ = lines[line_index]
if _re_import_struct_add_one.search(_UpperCamelCase ) is not None:
objects.append(_re_import_struct_add_one.search(_UpperCamelCase ).groups()[0] )
elif _re_import_struct_add_many.search(_UpperCamelCase ) is not None:
UpperCAmelCase_ = _re_import_struct_add_many.search(_UpperCamelCase ).groups()[0].split(''', ''' )
UpperCAmelCase_ = [obj[1:-1] for obj in imports if len(_UpperCamelCase ) > 0]
objects.extend(_UpperCamelCase )
elif _re_between_brackets.search(_UpperCamelCase ) is not None:
UpperCAmelCase_ = _re_between_brackets.search(_UpperCamelCase ).groups()[0].split(''', ''' )
UpperCAmelCase_ = [obj[1:-1] for obj in imports if len(_UpperCamelCase ) > 0]
objects.extend(_UpperCamelCase )
elif _re_quote_object.search(_UpperCamelCase ) is not None:
objects.append(_re_quote_object.search(_UpperCamelCase ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
UpperCAmelCase_ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
UpperCAmelCase_ = []
while (
line_index < len(_UpperCamelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
UpperCAmelCase_ = lines[line_index]
UpperCAmelCase_ = _re_import.search(_UpperCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
UpperCAmelCase_ = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(_UpperCamelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
UpperCAmelCase_ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCAmelCase_ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCAmelCase_ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
UpperCAmelCase_ = lines[line_index]
UpperCAmelCase_ = _re_import.search(_UpperCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
UpperCAmelCase_ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : Optional[Any] ):
'''simple docstring'''
def find_duplicates(_UpperCamelCase : Tuple ):
return [k for k, v in collections.Counter(_UpperCamelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
UpperCAmelCase_ = []
for key in import_dict_objects.keys():
UpperCAmelCase_ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
UpperCAmelCase_ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
UpperCAmelCase_ = '''base imports''' if key == '''none''' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def __lowerCamelCase ( ):
'''simple docstring'''
UpperCAmelCase_ = []
for root, _, files in os.walk(_UpperCamelCase ):
if "__init__.py" in files:
UpperCAmelCase_ = os.path.join(_UpperCamelCase , '''__init__.py''' )
UpperCAmelCase_ = parse_init(_UpperCamelCase )
if objects is not None:
UpperCAmelCase_ = analyze_results(*_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
UpperCAmelCase_ = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('''\n'''.join(_UpperCamelCase ) )
if len(_UpperCamelCase ) > 0:
raise ValueError('''\n\n'''.join(_UpperCamelCase ) )
def __lowerCamelCase ( ):
'''simple docstring'''
UpperCAmelCase_ = []
for path, directories, files in os.walk(_UpperCamelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(_UpperCamelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_UpperCamelCase ) / folder).glob('''*.py''' ) ) ) == 0:
continue
UpperCAmelCase_ = str((Path(_UpperCamelCase ) / folder).relative_to(_UpperCamelCase ) )
UpperCAmelCase_ = short_path.replace(os.path.sep , '''.''' )
submodules.append(_UpperCamelCase )
for fname in files:
if fname == "__init__.py":
continue
UpperCAmelCase_ = str((Path(_UpperCamelCase ) / fname).relative_to(_UpperCamelCase ) )
UpperCAmelCase_ = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(_UpperCamelCase )
return submodules
lowercase__ : Union[str, Any] = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
"models.esm.openfold_utils",
]
def __lowerCamelCase ( ):
'''simple docstring'''
from transformers.utils import direct_transformers_import
UpperCAmelCase_ = direct_transformers_import(_UpperCamelCase )
UpperCAmelCase_ = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(_UpperCamelCase , '''__init__.py''' ) , '''r''' ) as f:
UpperCAmelCase_ = f.read()
import_structure_keys.update(set(re.findall(R'''import_structure\[\"([^\"]*)\"\]''' , _UpperCamelCase ) ) )
UpperCAmelCase_ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(_UpperCamelCase ) > 0:
UpperCAmelCase_ = '''\n'''.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registed in the main init of Transformers:\n'''
F"""{list_of_modules}\n"""
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 43
| 1
|
'''simple docstring'''
import numpy as np
def __lowerCamelCase ( _UpperCamelCase : np.array ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43
|
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( _UpperCamelCase : tuple[int, int] , _UpperCamelCase : int ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = position
UpperCAmelCase_ = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
UpperCAmelCase_ = []
for position in positions:
UpperCAmelCase_ , UpperCAmelCase_ = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(_UpperCamelCase )
return permissible_positions
def __lowerCamelCase ( _UpperCamelCase : list[list[int]] ):
'''simple docstring'''
return not any(elem == 0 for row in board for elem in row )
def __lowerCamelCase ( _UpperCamelCase : list[list[int]] , _UpperCamelCase : tuple[int, int] , _UpperCamelCase : int ):
'''simple docstring'''
if is_complete(_UpperCamelCase ):
return True
for position in get_valid_pos(_UpperCamelCase , len(_UpperCamelCase ) ):
UpperCAmelCase_ , UpperCAmelCase_ = position
if board[y][x] == 0:
UpperCAmelCase_ = curr + 1
if open_knight_tour_helper(_UpperCamelCase , _UpperCamelCase , curr + 1 ):
return True
UpperCAmelCase_ = 0
return False
def __lowerCamelCase ( _UpperCamelCase : int ):
'''simple docstring'''
UpperCAmelCase_ = [[0 for i in range(_UpperCamelCase )] for j in range(_UpperCamelCase )]
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
UpperCAmelCase_ = 1
if open_knight_tour_helper(_UpperCamelCase , (i, j) , 1 ):
return board
UpperCAmelCase_ = 0
UpperCAmelCase_ = F"""Open Kight Tour cannot be performed on a board of size {n}"""
raise ValueError(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43
| 1
|
'''simple docstring'''
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class lowerCamelCase ( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = DownBlockaD # noqa F405
lowerCAmelCase__ = '''down'''
def lowerCAmelCase__ ( self : List[Any] ) ->int:
UpperCAmelCase_ = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(UpperCAmelCase__ )
class lowerCamelCase ( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = ResnetDownsampleBlockaD # noqa F405
lowerCAmelCase__ = '''down'''
def lowerCAmelCase__ ( self : List[str] ) ->Tuple:
UpperCAmelCase_ = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(UpperCAmelCase__ )
class lowerCamelCase ( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnDownBlockaD # noqa F405
lowerCAmelCase__ = '''down'''
def lowerCAmelCase__ ( self : Any ) ->Optional[int]:
UpperCAmelCase_ = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(UpperCAmelCase__ )
class lowerCamelCase ( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = CrossAttnDownBlockaD # noqa F405
lowerCAmelCase__ = '''down'''
def lowerCAmelCase__ ( self : Any ) ->List[str]:
UpperCAmelCase_ , UpperCAmelCase_ = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase_ = 32
return init_dict, inputs_dict
def lowerCAmelCase__ ( self : Tuple ) ->List[Any]:
UpperCAmelCase_ = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(UpperCAmelCase__ )
class lowerCamelCase ( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SimpleCrossAttnDownBlockaD # noqa F405
lowerCAmelCase__ = '''down'''
@property
def lowerCAmelCase__ ( self : Dict ) ->int:
return super().get_dummy_input(include_encoder_hidden_states=UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Optional[int] ) ->Any:
UpperCAmelCase_ , UpperCAmelCase_ = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase_ = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Dict:
UpperCAmelCase_ = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(UpperCAmelCase__ )
class lowerCamelCase ( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SkipDownBlockaD # noqa F405
lowerCAmelCase__ = '''down'''
@property
def lowerCAmelCase__ ( self : List[Any] ) ->Union[str, Any]:
return super().get_dummy_input(include_skip_sample=UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[Any]:
UpperCAmelCase_ = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(UpperCAmelCase__ )
class lowerCamelCase ( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnSkipDownBlockaD # noqa F405
lowerCAmelCase__ = '''down'''
@property
def lowerCAmelCase__ ( self : Optional[Any] ) ->int:
return super().get_dummy_input(include_skip_sample=UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
UpperCAmelCase_ = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(UpperCAmelCase__ )
class lowerCamelCase ( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = DownEncoderBlockaD # noqa F405
lowerCAmelCase__ = '''down'''
@property
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[int]:
return super().get_dummy_input(include_temb=UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Union[str, Any]:
UpperCAmelCase_ = {
'''in_channels''': 32,
'''out_channels''': 32,
}
UpperCAmelCase_ = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase__ ( self : Optional[int] ) ->str:
UpperCAmelCase_ = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(UpperCAmelCase__ )
class lowerCamelCase ( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnDownEncoderBlockaD # noqa F405
lowerCAmelCase__ = '''down'''
@property
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
return super().get_dummy_input(include_temb=UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Dict:
UpperCAmelCase_ = {
'''in_channels''': 32,
'''out_channels''': 32,
}
UpperCAmelCase_ = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase__ ( self : List[str] ) ->Optional[Any]:
UpperCAmelCase_ = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(UpperCAmelCase__ )
class lowerCamelCase ( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UNetMidBlockaD # noqa F405
lowerCAmelCase__ = '''mid'''
def lowerCAmelCase__ ( self : Any ) ->List[Any]:
UpperCAmelCase_ = {
'''in_channels''': 32,
'''temb_channels''': 128,
}
UpperCAmelCase_ = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase__ ( self : Optional[int] ) ->Union[str, Any]:
UpperCAmelCase_ = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(UpperCAmelCase__ )
class lowerCamelCase ( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UNetMidBlockaDCrossAttn # noqa F405
lowerCAmelCase__ = '''mid'''
def lowerCAmelCase__ ( self : Optional[Any] ) ->int:
UpperCAmelCase_ , UpperCAmelCase_ = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase_ = 32
return init_dict, inputs_dict
def lowerCAmelCase__ ( self : Tuple ) ->Tuple:
UpperCAmelCase_ = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(UpperCAmelCase__ )
class lowerCamelCase ( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UNetMidBlockaDSimpleCrossAttn # noqa F405
lowerCAmelCase__ = '''mid'''
@property
def lowerCAmelCase__ ( self : List[str] ) ->str:
return super().get_dummy_input(include_encoder_hidden_states=UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Tuple:
UpperCAmelCase_ , UpperCAmelCase_ = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase_ = 32
return init_dict, inputs_dict
def lowerCAmelCase__ ( self : List[str] ) ->int:
UpperCAmelCase_ = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(UpperCAmelCase__ )
class lowerCamelCase ( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UpBlockaD # noqa F405
lowerCAmelCase__ = '''up'''
@property
def lowerCAmelCase__ ( self : Optional[Any] ) ->Any:
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCAmelCase__ )
def lowerCAmelCase__ ( self : str ) ->Dict:
UpperCAmelCase_ = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(UpperCAmelCase__ )
class lowerCamelCase ( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = ResnetUpsampleBlockaD # noqa F405
lowerCAmelCase__ = '''up'''
@property
def lowerCAmelCase__ ( self : Optional[int] ) ->Tuple:
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Dict ) ->List[Any]:
UpperCAmelCase_ = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(UpperCAmelCase__ )
class lowerCamelCase ( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = CrossAttnUpBlockaD # noqa F405
lowerCAmelCase__ = '''up'''
@property
def lowerCAmelCase__ ( self : Dict ) ->Optional[Any]:
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Optional[int] ) ->List[str]:
UpperCAmelCase_ , UpperCAmelCase_ = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase_ = 32
return init_dict, inputs_dict
def lowerCAmelCase__ ( self : Optional[Any] ) ->Any:
UpperCAmelCase_ = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(UpperCAmelCase__ )
class lowerCamelCase ( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SimpleCrossAttnUpBlockaD # noqa F405
lowerCAmelCase__ = '''up'''
@property
def lowerCAmelCase__ ( self : int ) ->Any:
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCAmelCase__ , include_encoder_hidden_states=UpperCAmelCase__ )
def lowerCAmelCase__ ( self : List[str] ) ->Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase_ = 32
return init_dict, inputs_dict
def lowerCAmelCase__ ( self : Tuple ) ->List[Any]:
UpperCAmelCase_ = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(UpperCAmelCase__ )
class lowerCamelCase ( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnUpBlockaD # noqa F405
lowerCAmelCase__ = '''up'''
@property
def lowerCAmelCase__ ( self : List[str] ) ->int:
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCAmelCase__ )
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def lowerCAmelCase__ ( self : Optional[Any] ) ->List[str]:
UpperCAmelCase_ = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(UpperCAmelCase__ )
class lowerCamelCase ( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SkipUpBlockaD # noqa F405
lowerCAmelCase__ = '''up'''
@property
def lowerCAmelCase__ ( self : List[Any] ) ->List[Any]:
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCAmelCase__ )
def lowerCAmelCase__ ( self : str ) ->Optional[int]:
UpperCAmelCase_ = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(UpperCAmelCase__ )
class lowerCamelCase ( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnSkipUpBlockaD # noqa F405
lowerCAmelCase__ = '''up'''
@property
def lowerCAmelCase__ ( self : Optional[Any] ) ->Any:
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Tuple:
UpperCAmelCase_ = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(UpperCAmelCase__ )
class lowerCamelCase ( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UpDecoderBlockaD # noqa F405
lowerCAmelCase__ = '''up'''
@property
def lowerCAmelCase__ ( self : Any ) ->Optional[int]:
return super().get_dummy_input(include_temb=UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Dict ) ->Dict:
UpperCAmelCase_ = {'''in_channels''': 32, '''out_channels''': 32}
UpperCAmelCase_ = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase__ ( self : Any ) ->str:
UpperCAmelCase_ = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(UpperCAmelCase__ )
class lowerCamelCase ( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnUpDecoderBlockaD # noqa F405
lowerCAmelCase__ = '''up'''
@property
def lowerCAmelCase__ ( self : str ) ->str:
return super().get_dummy_input(include_temb=UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] ) ->List[str]:
UpperCAmelCase_ = {'''in_channels''': 32, '''out_channels''': 32}
UpperCAmelCase_ = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase__ ( self : int ) ->Tuple:
UpperCAmelCase_ = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(UpperCAmelCase__ )
| 43
|
'''simple docstring'''
from __future__ import annotations
from typing import TypedDict
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
def __lowerCamelCase ( _UpperCamelCase : str ):
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError('''The parameter s type must be str.''' )
return [s[i:] + s[:i] for i in range(len(_UpperCamelCase ) )]
def __lowerCamelCase ( _UpperCamelCase : str ):
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError('''The parameter s type must be str.''' )
if not s:
raise ValueError('''The parameter s must not be empty.''' )
UpperCAmelCase_ = all_rotations(_UpperCamelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
UpperCAmelCase_ = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(_UpperCamelCase ),
}
return response
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : int ):
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError('''The parameter bwt_string type must be str.''' )
if not bwt_string:
raise ValueError('''The parameter bwt_string must not be empty.''' )
try:
UpperCAmelCase_ = int(_UpperCamelCase )
except ValueError:
raise TypeError(
'''The parameter idx_original_string type must be int or passive'''
''' of cast to int.''' )
if idx_original_string < 0:
raise ValueError('''The parameter idx_original_string must not be lower than 0.''' )
if idx_original_string >= len(_UpperCamelCase ):
raise ValueError(
'''The parameter idx_original_string must be lower than''' ''' len(bwt_string).''' )
UpperCAmelCase_ = [''''''] * len(_UpperCamelCase )
for _ in range(len(_UpperCamelCase ) ):
for i in range(len(_UpperCamelCase ) ):
UpperCAmelCase_ = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
lowercase__ : Optional[int] = "Provide a string that I will generate its BWT transform: "
lowercase__ : List[Any] = input(entry_msg).strip()
lowercase__ : Any = bwt_transform(s)
print(
F'''Burrows Wheeler transform for string \'{s}\' results '''
F'''in \'{result['bwt_string']}\''''
)
lowercase__ : Dict = reverse_bwt(result["bwt_string"], result["idx_original_string"])
print(
F'''Reversing Burrows Wheeler transform for entry \'{result['bwt_string']}\' '''
F'''we get original string \'{original_string}\''''
)
| 43
| 1
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowercase__ : Optional[Any] = logging.get_logger(__name__)
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ['''input_features''', '''attention_mask''']
def __init__( self : Tuple , UpperCAmelCase__ : List[Any]=80 , UpperCAmelCase__ : List[str]=1_6000 , UpperCAmelCase__ : Dict=80 , UpperCAmelCase__ : Optional[int]=0.0 , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : List[Any]=True , **UpperCAmelCase__ : Union[str, Any] , ) ->Optional[int]:
super().__init__(feature_size=UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , padding_value=UpperCAmelCase__ , **UpperCAmelCase__ )
UpperCAmelCase_ = num_mel_bins
UpperCAmelCase_ = do_ceptral_normalize
UpperCAmelCase_ = normalize_means
UpperCAmelCase_ = normalize_vars
UpperCAmelCase_ = True
def lowerCAmelCase__ ( self : List[Any] , UpperCAmelCase__ : np.ndarray , ) ->np.ndarray:
UpperCAmelCase_ = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
UpperCAmelCase_ = torch.from_numpy(UpperCAmelCase__ ).unsqueeze(0 )
UpperCAmelCase_ = ta_kaldi.fbank(UpperCAmelCase__ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def lowerCAmelCase__ ( UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[bool] = True , UpperCAmelCase__ : Optional[bool] = True , UpperCAmelCase__ : float = 0.0 , ) ->np.ndarray:
# make sure we normalize float32 arrays
if normalize_means:
UpperCAmelCase_ = x[:input_length].mean(axis=0 )
UpperCAmelCase_ = np.subtract(UpperCAmelCase__ , UpperCAmelCase__ )
if normalize_vars:
UpperCAmelCase_ = x[:input_length].std(axis=0 )
UpperCAmelCase_ = np.divide(UpperCAmelCase__ , UpperCAmelCase__ )
if input_length < x.shape[0]:
UpperCAmelCase_ = padding_value
# make sure array is in float32
UpperCAmelCase_ = x.astype(np.floataa )
return x
def lowerCAmelCase__ ( self : int , UpperCAmelCase__ : List[np.ndarray] , UpperCAmelCase__ : Optional[np.ndarray] = None ) ->List[np.ndarray]:
UpperCAmelCase_ = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(UpperCAmelCase__ , UpperCAmelCase__ , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(UpperCAmelCase__ , UpperCAmelCase__ )
]
def __call__( self : Optional[int] , UpperCAmelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCAmelCase__ : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[bool] = None , **UpperCAmelCase__ : Optional[int] , ) ->BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
UpperCAmelCase_ = isinstance(UpperCAmelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
UpperCAmelCase_ = is_batched_numpy or (
isinstance(UpperCAmelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCAmelCase_ = [np.asarray(UpperCAmelCase__ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCAmelCase__ , np.ndarray ):
UpperCAmelCase_ = np.asarray(UpperCAmelCase__ , dtype=np.floataa )
elif isinstance(UpperCAmelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCAmelCase_ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCAmelCase_ = [raw_speech]
# extract fbank features
UpperCAmelCase_ = [self._extract_fbank_features(UpperCAmelCase__ ) for waveform in raw_speech]
# convert into correct format for padding
UpperCAmelCase_ = BatchFeature({'''input_features''': features} )
UpperCAmelCase_ = self.pad(
UpperCAmelCase__ , padding=UpperCAmelCase__ , max_length=UpperCAmelCase__ , truncation=UpperCAmelCase__ , pad_to_multiple_of=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ , )
# make sure list is in array format
UpperCAmelCase_ = padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , UpperCAmelCase__ ):
UpperCAmelCase_ = [np.asarray(UpperCAmelCase__ , dtype=np.floataa ) for feature in input_features]
UpperCAmelCase_ = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
UpperCAmelCase_ = [np.asarray(UpperCAmelCase__ , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
UpperCAmelCase_ = (
np.array(UpperCAmelCase__ , dtype=np.intaa )
if self._get_padding_strategies(UpperCAmelCase__ , max_length=UpperCAmelCase__ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
UpperCAmelCase_ = self.normalize(
padded_inputs['''input_features'''] , attention_mask=UpperCAmelCase__ )
if return_tensors is not None:
UpperCAmelCase_ = padded_inputs.convert_to_tensors(UpperCAmelCase__ )
return padded_inputs
| 43
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase__ : Union[str, Any] = {
"configuration_mobilevit": ["MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileViTConfig", "MobileViTOnnxConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Union[str, Any] = ["MobileViTFeatureExtractor"]
lowercase__ : List[Any] = ["MobileViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = [
"MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileViTForImageClassification",
"MobileViTForSemanticSegmentation",
"MobileViTModel",
"MobileViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Optional[int] = [
"TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileViTForImageClassification",
"TFMobileViTForSemanticSegmentation",
"TFMobileViTModel",
"TFMobileViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
lowercase__ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 43
| 1
|
'''simple docstring'''
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
lowercase__ : Optional[int] = logging.get_logger(__name__)
lowercase__ : Union[str, Any] = {
"artists_file": "artists.json",
"lyrics_file": "lyrics.json",
"genres_file": "genres.json",
}
lowercase__ : List[str] = {
"artists_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json",
},
"genres_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json",
},
"lyrics_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json",
},
}
lowercase__ : Optional[int] = {
"jukebox": 512,
}
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_LYRIC_TOKENS_SIZES
lowerCAmelCase__ = ['''input_ids''', '''attention_mask''']
def __init__( self : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int]=["v3", "v2", "v2"] , UpperCAmelCase__ : Tuple=512 , UpperCAmelCase__ : Tuple=5 , UpperCAmelCase__ : Any="<|endoftext|>" , **UpperCAmelCase__ : Union[str, Any] , ) ->List[str]:
UpperCAmelCase_ = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else unk_token
super().__init__(
unk_token=UpperCAmelCase__ , n_genres=UpperCAmelCase__ , version=UpperCAmelCase__ , max_n_lyric_tokens=UpperCAmelCase__ , **UpperCAmelCase__ , )
UpperCAmelCase_ = version
UpperCAmelCase_ = max_n_lyric_tokens
UpperCAmelCase_ = n_genres
with open(UpperCAmelCase__ , encoding='''utf-8''' ) as vocab_handle:
UpperCAmelCase_ = json.load(UpperCAmelCase__ )
with open(UpperCAmelCase__ , encoding='''utf-8''' ) as vocab_handle:
UpperCAmelCase_ = json.load(UpperCAmelCase__ )
with open(UpperCAmelCase__ , encoding='''utf-8''' ) as vocab_handle:
UpperCAmelCase_ = json.load(UpperCAmelCase__ )
UpperCAmelCase_ = r'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+'''
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
UpperCAmelCase_ = oov.replace(r'''\-\'''' , r'''\-+\'''' )
UpperCAmelCase_ = regex.compile(UpperCAmelCase__ )
UpperCAmelCase_ = {v: k for k, v in self.artists_encoder.items()}
UpperCAmelCase_ = {v: k for k, v in self.genres_encoder.items()}
UpperCAmelCase_ = {v: k for k, v in self.lyrics_encoder.items()}
@property
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[int]:
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def lowerCAmelCase__ ( self : str ) ->List[str]:
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def lowerCAmelCase__ ( self : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] ) ->Optional[int]:
UpperCAmelCase_ = [self.artists_encoder.get(UpperCAmelCase__ , 0 ) for artist in list_artists]
for genres in range(len(UpperCAmelCase__ ) ):
UpperCAmelCase_ = [self.genres_encoder.get(UpperCAmelCase__ , 0 ) for genre in list_genres[genres]]
UpperCAmelCase_ = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
UpperCAmelCase_ = [[self.lyrics_encoder.get(UpperCAmelCase__ , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def lowerCAmelCase__ ( self : Dict , UpperCAmelCase__ : List[str] ) ->Tuple:
return list(UpperCAmelCase__ )
def lowerCAmelCase__ ( self : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : str ) ->Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.prepare_for_tokenization(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = self._tokenize(UpperCAmelCase__ )
return artist, genre, lyrics
def lowerCAmelCase__ ( self : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : bool = False ) ->Tuple[str, str, str, Dict[str, Any]]:
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
UpperCAmelCase_ = artists[idx].lower()
UpperCAmelCase_ = [genres[idx].lower()]
else:
UpperCAmelCase_ = self._normalize(artists[idx] ) + '''.v2'''
UpperCAmelCase_ = [
self._normalize(UpperCAmelCase__ ) + '''.v2''' for genre in genres[idx].split('''_''' )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
UpperCAmelCase_ = regex.compile(r'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+''' )
UpperCAmelCase_ = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n'''
UpperCAmelCase_ = {vocab[index]: index + 1 for index in range(len(UpperCAmelCase__ ) )}
UpperCAmelCase_ = 0
UpperCAmelCase_ = len(UpperCAmelCase__ ) + 1
UpperCAmelCase_ = self.vocab
UpperCAmelCase_ = {v: k for k, v in self.vocab.items()}
UpperCAmelCase_ = ''''''
else:
UpperCAmelCase_ = regex.compile(r'''[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+''' )
UpperCAmelCase_ = self._run_strip_accents(UpperCAmelCase__ )
UpperCAmelCase_ = lyrics.replace('''\\''' , '''\n''' )
UpperCAmelCase_ = self.out_of_vocab.sub('''''' , UpperCAmelCase__ ), [], []
return artists, genres, lyrics
def lowerCAmelCase__ ( self : Tuple , UpperCAmelCase__ : Optional[int] ) ->Dict:
UpperCAmelCase_ = unicodedata.normalize('''NFD''' , UpperCAmelCase__ )
UpperCAmelCase_ = []
for char in text:
UpperCAmelCase_ = unicodedata.category(UpperCAmelCase__ )
if cat == "Mn":
continue
output.append(UpperCAmelCase__ )
return "".join(UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Tuple , UpperCAmelCase__ : str ) ->str:
UpperCAmelCase_ = (
[chr(UpperCAmelCase__ ) for i in range(ord('''a''' ) , ord('''z''' ) + 1 )]
+ [chr(UpperCAmelCase__ ) for i in range(ord('''A''' ) , ord('''Z''' ) + 1 )]
+ [chr(UpperCAmelCase__ ) for i in range(ord('''0''' ) , ord('''9''' ) + 1 )]
+ ['''.''']
)
UpperCAmelCase_ = frozenset(UpperCAmelCase__ )
UpperCAmelCase_ = re.compile(r'''_+''' )
UpperCAmelCase_ = ''''''.join([c if c in accepted else '''_''' for c in text.lower()] )
UpperCAmelCase_ = pattern.sub('''_''' , UpperCAmelCase__ ).strip('''_''' )
return text
def lowerCAmelCase__ ( self : Union[str, Any] , UpperCAmelCase__ : List[str] ) ->str:
return " ".join(UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Tuple , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : bool = False ) ->List[str]:
# Convert to TensorType
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
UpperCAmelCase_ = TensorType(UpperCAmelCase__ )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
'''Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.''' )
import tensorflow as tf
UpperCAmelCase_ = tf.constant
UpperCAmelCase_ = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('''Unable to convert output to PyTorch tensors format, PyTorch is not installed.''' )
import torch
UpperCAmelCase_ = torch.tensor
UpperCAmelCase_ = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError('''Unable to convert output to JAX tensors format, JAX is not installed.''' )
import jax.numpy as jnp # noqa: F811
UpperCAmelCase_ = jnp.array
UpperCAmelCase_ = _is_jax
else:
UpperCAmelCase_ = np.asarray
UpperCAmelCase_ = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
UpperCAmelCase_ = [inputs]
if not is_tensor(UpperCAmelCase__ ):
UpperCAmelCase_ = as_tensor(UpperCAmelCase__ )
except: # noqa E722
raise ValueError(
'''Unable to create tensor, you should probably activate truncation and/or padding '''
'''with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.''' )
return inputs
def __call__( self : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int="" , UpperCAmelCase__ : int="pt" ) ->BatchEncoding:
UpperCAmelCase_ = [0, 0, 0]
UpperCAmelCase_ = [artist] * len(self.version )
UpperCAmelCase_ = [genres] * len(self.version )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.tokenize(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self._convert_token_to_id(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = [-INFINITY] * len(full_tokens[-1] )
UpperCAmelCase_ = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=UpperCAmelCase__ )
for i in range(len(self.version ) )
]
return BatchEncoding({'''input_ids''': input_ids, '''attention_masks''': attention_masks} )
def lowerCAmelCase__ ( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) ->Tuple[str]:
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_ = os.path.join(
UpperCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''artists_file'''] )
with open(UpperCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=UpperCAmelCase__ ) )
UpperCAmelCase_ = os.path.join(
UpperCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''genres_file'''] )
with open(UpperCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=UpperCAmelCase__ ) )
UpperCAmelCase_ = os.path.join(
UpperCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''lyrics_file'''] )
with open(UpperCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=UpperCAmelCase__ ) )
return (artists_file, genres_file, lyrics_file)
def lowerCAmelCase__ ( self : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str ) ->Dict:
UpperCAmelCase_ = self.artists_decoder.get(UpperCAmelCase__ )
UpperCAmelCase_ = [self.genres_decoder.get(UpperCAmelCase__ ) for genre in genres_index]
UpperCAmelCase_ = [self.lyrics_decoder.get(UpperCAmelCase__ ) for character in lyric_index]
return artist, genres, lyrics
| 43
|
'''simple docstring'''
lowercase__ : Union[str, Any] = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
lowercase__ : str = [{"type": "code", "content": INSTALL_CONTENT}]
lowercase__ : Any = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 43
| 1
|
'''simple docstring'''
import math
def __lowerCamelCase ( _UpperCamelCase : int ):
'''simple docstring'''
return math.sqrt(_UpperCamelCase ) * math.sqrt(_UpperCamelCase ) == num
def __lowerCamelCase ( _UpperCamelCase : int ):
'''simple docstring'''
UpperCAmelCase_ = 0
UpperCAmelCase_ = n
while left <= right:
UpperCAmelCase_ = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
UpperCAmelCase_ = mid - 1
else:
UpperCAmelCase_ = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase__ : Optional[Any] = {
"configuration_encodec": [
"ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EncodecConfig",
],
"feature_extraction_encodec": ["EncodecFeatureExtractor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : List[str] = [
"ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST",
"EncodecModel",
"EncodecPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
lowercase__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 43
| 1
|
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
lowercase__ : str = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n"
class lowerCamelCase ( unittest.TestCase , lowerCamelCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Dict ) ->str:
UpperCAmelCase_ = load_tool('''text-question-answering''' )
self.tool.setup()
UpperCAmelCase_ = load_tool('''text-question-answering''' , remote=UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Any:
UpperCAmelCase_ = self.tool(UpperCAmelCase__ , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(UpperCAmelCase__ , '''launched the BigScience Research Workshop''' )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[Any]:
UpperCAmelCase_ = self.remote_tool(UpperCAmelCase__ , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(UpperCAmelCase__ , '''launched the BigScience Research Workshop''' )
def lowerCAmelCase__ ( self : Any ) ->Any:
UpperCAmelCase_ = self.tool(text=UpperCAmelCase__ , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(UpperCAmelCase__ , '''launched the BigScience Research Workshop''' )
def lowerCAmelCase__ ( self : List[str] ) ->List[Any]:
UpperCAmelCase_ = self.remote_tool(text=UpperCAmelCase__ , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(UpperCAmelCase__ , '''launched the BigScience Research Workshop''' )
| 43
|
'''simple docstring'''
from heapq import heappop, heappush
import numpy as np
def __lowerCamelCase ( _UpperCamelCase : np.ndarray , _UpperCamelCase : tuple[int, int] , _UpperCamelCase : tuple[int, int] , _UpperCamelCase : bool , ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = grid.shape
UpperCAmelCase_ = [-1, 1, 0, 0]
UpperCAmelCase_ = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
UpperCAmelCase_ , UpperCAmelCase_ = [(0, source)], set()
UpperCAmelCase_ = np.full((rows, cols) , np.inf )
UpperCAmelCase_ = 0
UpperCAmelCase_ = np.empty((rows, cols) , dtype=_UpperCamelCase )
UpperCAmelCase_ = None
while queue:
((UpperCAmelCase_) , (UpperCAmelCase_)) = heappop(_UpperCamelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
UpperCAmelCase_ = []
while (x, y) != source:
path.append((x, y) )
UpperCAmelCase_ , UpperCAmelCase_ = predecessors[x, y]
path.append(_UpperCamelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(_UpperCamelCase ) ):
UpperCAmelCase_ , UpperCAmelCase_ = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
UpperCAmelCase_ = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(_UpperCamelCase , (dist + 1, (nx, ny)) )
UpperCAmelCase_ = dist + 1
UpperCAmelCase_ = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
lowercase__ : Union[str, Any] = {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = '''roformer'''
def __init__( self : List[str] , UpperCAmelCase__ : Dict=5_0000 , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : List[Any]=768 , UpperCAmelCase__ : Optional[int]=12 , UpperCAmelCase__ : Dict=12 , UpperCAmelCase__ : Optional[int]=3072 , UpperCAmelCase__ : Any="gelu" , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : Union[str, Any]=1536 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : Optional[Any]=1e-12 , UpperCAmelCase__ : Union[str, Any]=0 , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : Optional[Any]=True , **UpperCAmelCase__ : Any , ) ->Tuple:
super().__init__(pad_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size if embedding_size is None else embedding_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = rotary_value
UpperCAmelCase_ = use_cache
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self : str ) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase_ = {0: '''batch''', 1: '''sequence'''}
UpperCAmelCase_ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 43
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCamelCase ( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = XLMTokenizer
lowerCAmelCase__ = False
def lowerCAmelCase__ ( self : int ) ->Union[str, Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase_ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
UpperCAmelCase_ = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
UpperCAmelCase_ = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(UpperCAmelCase__ ) )
def lowerCAmelCase__ ( self : Optional[int] , UpperCAmelCase__ : Any ) ->List[Any]:
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = '''lower newer'''
return input_text, output_text
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Tuple:
UpperCAmelCase_ = XLMTokenizer(self.vocab_file , self.merges_file )
UpperCAmelCase_ = '''lower'''
UpperCAmelCase_ = ['''low''', '''er</w>''']
UpperCAmelCase_ = tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = tokens + ['''<unk>''']
UpperCAmelCase_ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ )
@slow
def lowerCAmelCase__ ( self : Any ) ->str:
UpperCAmelCase_ = XLMTokenizer.from_pretrained('''xlm-mlm-en-2048''' )
UpperCAmelCase_ = tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCAmelCase__ )
UpperCAmelCase_ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCAmelCase__ )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 43
| 1
|
'''simple docstring'''
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , *UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Any=None , **UpperCAmelCase__ : Tuple ) ->List[str]:
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
UpperCAmelCase_ = eval_examples
UpperCAmelCase_ = post_process_function
def lowerCAmelCase__ ( self : List[str] , UpperCAmelCase__ : Optional[Dataset] = None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Optional[List[str]] = None , UpperCAmelCase__ : str = "eval" , **UpperCAmelCase__ : str , ) ->Dict[str, float]:
UpperCAmelCase_ = gen_kwargs.copy()
UpperCAmelCase_ = (
gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''' ) is not None else self.args.generation_max_length
)
UpperCAmelCase_ = (
gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''' ) is not None else self.args.generation_num_beams
)
UpperCAmelCase_ = gen_kwargs
UpperCAmelCase_ = self.eval_dataset if eval_dataset is None else eval_dataset
UpperCAmelCase_ = self.get_eval_dataloader(UpperCAmelCase__ )
UpperCAmelCase_ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase_ = self.compute_metrics
UpperCAmelCase_ = None
UpperCAmelCase_ = time.time()
UpperCAmelCase_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCAmelCase_ = eval_loop(
UpperCAmelCase__ , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCAmelCase__ , metric_key_prefix=UpperCAmelCase__ , )
finally:
UpperCAmelCase_ = compute_metrics
UpperCAmelCase_ = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
UpperCAmelCase__ , UpperCAmelCase__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
UpperCAmelCase_ = self.post_process_function(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = self.compute_metrics(UpperCAmelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
UpperCAmelCase_ = metrics.pop(UpperCAmelCase__ )
metrics.update(output.metrics )
else:
UpperCAmelCase_ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(UpperCAmelCase__ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCAmelCase_ = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCAmelCase__ )
return metrics
def lowerCAmelCase__ ( self : str , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : str = "test" , **UpperCAmelCase__ : List[str] ) ->Optional[int]:
UpperCAmelCase_ = gen_kwargs.copy()
UpperCAmelCase_ = self.get_test_dataloader(UpperCAmelCase__ )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase_ = self.compute_metrics
UpperCAmelCase_ = None
UpperCAmelCase_ = time.time()
UpperCAmelCase_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCAmelCase_ = eval_loop(
UpperCAmelCase__ , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCAmelCase__ , metric_key_prefix=UpperCAmelCase__ , )
finally:
UpperCAmelCase_ = compute_metrics
UpperCAmelCase_ = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
UpperCAmelCase__ , UpperCAmelCase__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCAmelCase_ = self.post_process_function(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , '''predict''' )
UpperCAmelCase_ = self.compute_metrics(UpperCAmelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
UpperCAmelCase_ = metrics.pop(UpperCAmelCase__ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCAmelCase__ )
| 43
|
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def __lowerCamelCase ( _UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
for param in module.parameters():
UpperCAmelCase_ = False
def __lowerCamelCase ( ):
'''simple docstring'''
UpperCAmelCase_ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
UpperCAmelCase_ = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def __lowerCamelCase ( _UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ = plt.imshow(_UpperCamelCase )
fig.axes.get_xaxis().set_visible(_UpperCamelCase )
fig.axes.get_yaxis().set_visible(_UpperCamelCase )
plt.show()
def __lowerCamelCase ( ):
'''simple docstring'''
UpperCAmelCase_ = datetime.now()
UpperCAmelCase_ = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 43
| 1
|
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def __lowerCamelCase ( _UpperCamelCase : int ):
'''simple docstring'''
random.seed(_UpperCamelCase )
np.random.seed(_UpperCamelCase )
torch.manual_seed(_UpperCamelCase )
torch.cuda.manual_seed_all(_UpperCamelCase )
# ^^ safe to call this function even if cuda is not available
class lowerCamelCase :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCAmelCase__ : Iterable[torch.nn.Parameter] , UpperCAmelCase__ : float = 0.9999 , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Union[float, int] = 1.0 , UpperCAmelCase__ : Union[float, int] = 2 / 3 , UpperCAmelCase__ : Optional[Any] = None , UpperCAmelCase__ : Dict[str, Any] = None , **UpperCAmelCase__ : Optional[int] , ) ->List[str]:
if isinstance(UpperCAmelCase__ , torch.nn.Module ):
UpperCAmelCase_ = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage`''' , '''1.0.0''' , UpperCAmelCase__ , standard_warn=UpperCAmelCase__ , )
UpperCAmelCase_ = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
UpperCAmelCase_ = True
if kwargs.get('''max_value''' , UpperCAmelCase__ ) is not None:
UpperCAmelCase_ = '''The `max_value` argument is deprecated. Please use `decay` instead.'''
deprecate('''max_value''' , '''1.0.0''' , UpperCAmelCase__ , standard_warn=UpperCAmelCase__ )
UpperCAmelCase_ = kwargs['''max_value''']
if kwargs.get('''min_value''' , UpperCAmelCase__ ) is not None:
UpperCAmelCase_ = '''The `min_value` argument is deprecated. Please use `min_decay` instead.'''
deprecate('''min_value''' , '''1.0.0''' , UpperCAmelCase__ , standard_warn=UpperCAmelCase__ )
UpperCAmelCase_ = kwargs['''min_value''']
UpperCAmelCase_ = list(UpperCAmelCase__ )
UpperCAmelCase_ = [p.clone().detach() for p in parameters]
if kwargs.get('''device''' , UpperCAmelCase__ ) is not None:
UpperCAmelCase_ = '''The `device` argument is deprecated. Please use `to` instead.'''
deprecate('''device''' , '''1.0.0''' , UpperCAmelCase__ , standard_warn=UpperCAmelCase__ )
self.to(device=kwargs['''device'''] )
UpperCAmelCase_ = None
UpperCAmelCase_ = decay
UpperCAmelCase_ = min_decay
UpperCAmelCase_ = update_after_step
UpperCAmelCase_ = use_ema_warmup
UpperCAmelCase_ = inv_gamma
UpperCAmelCase_ = power
UpperCAmelCase_ = 0
UpperCAmelCase_ = None # set in `step()`
UpperCAmelCase_ = model_cls
UpperCAmelCase_ = model_config
@classmethod
def lowerCAmelCase__ ( cls : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any] ) ->"EMAModel":
UpperCAmelCase_ , UpperCAmelCase_ = model_cls.load_config(UpperCAmelCase__ , return_unused_kwargs=UpperCAmelCase__ )
UpperCAmelCase_ = model_cls.from_pretrained(UpperCAmelCase__ )
UpperCAmelCase_ = cls(model.parameters() , model_cls=UpperCAmelCase__ , model_config=model.config )
ema_model.load_state_dict(UpperCAmelCase__ )
return ema_model
def lowerCAmelCase__ ( self : List[Any] , UpperCAmelCase__ : Optional[Any] ) ->Tuple:
if self.model_cls is None:
raise ValueError('''`save_pretrained` can only be used if `model_cls` was defined at __init__.''' )
if self.model_config is None:
raise ValueError('''`save_pretrained` can only be used if `model_config` was defined at __init__.''' )
UpperCAmelCase_ = self.model_cls.from_config(self.model_config )
UpperCAmelCase_ = self.state_dict()
state_dict.pop('''shadow_params''' , UpperCAmelCase__ )
model.register_to_config(**UpperCAmelCase__ )
self.copy_to(model.parameters() )
model.save_pretrained(UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] , UpperCAmelCase__ : int ) ->float:
UpperCAmelCase_ = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
UpperCAmelCase_ = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
UpperCAmelCase_ = (1 + step) / (10 + step)
UpperCAmelCase_ = min(UpperCAmelCase__ , self.decay )
# make sure decay is not smaller than min_decay
UpperCAmelCase_ = max(UpperCAmelCase__ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowerCAmelCase__ ( self : Optional[Any] , UpperCAmelCase__ : Iterable[torch.nn.Parameter] ) ->Optional[Any]:
if isinstance(UpperCAmelCase__ , torch.nn.Module ):
UpperCAmelCase_ = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage.step`''' , '''1.0.0''' , UpperCAmelCase__ , standard_warn=UpperCAmelCase__ , )
UpperCAmelCase_ = parameters.parameters()
UpperCAmelCase_ = list(UpperCAmelCase__ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
UpperCAmelCase_ = self.get_decay(self.optimization_step )
UpperCAmelCase_ = decay
UpperCAmelCase_ = 1 - decay
UpperCAmelCase_ = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , UpperCAmelCase__ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
UpperCAmelCase_ = deepspeed.zero.GatheredParameters(UpperCAmelCase__ , modifier_rank=UpperCAmelCase__ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Tuple , UpperCAmelCase__ : Iterable[torch.nn.Parameter] ) ->None:
UpperCAmelCase_ = list(UpperCAmelCase__ )
for s_param, param in zip(self.shadow_params , UpperCAmelCase__ ):
param.data.copy_(s_param.to(param.device ).data )
def lowerCAmelCase__ ( self : int , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : str=None ) ->None:
UpperCAmelCase_ = [
p.to(device=UpperCAmelCase__ , dtype=UpperCAmelCase__ ) if p.is_floating_point() else p.to(device=UpperCAmelCase__ )
for p in self.shadow_params
]
def lowerCAmelCase__ ( self : Tuple ) ->dict:
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowerCAmelCase__ ( self : Union[str, Any] , UpperCAmelCase__ : Iterable[torch.nn.Parameter] ) ->None:
UpperCAmelCase_ = [param.detach().cpu().clone() for param in parameters]
def lowerCAmelCase__ ( self : int , UpperCAmelCase__ : Iterable[torch.nn.Parameter] ) ->None:
if self.temp_stored_params is None:
raise RuntimeError('''This ExponentialMovingAverage has no `store()`ed weights ''' '''to `restore()`''' )
for c_param, param in zip(self.temp_stored_params , UpperCAmelCase__ ):
param.data.copy_(c_param.data )
# Better memory-wise.
UpperCAmelCase_ = None
def lowerCAmelCase__ ( self : Optional[Any] , UpperCAmelCase__ : dict ) ->None:
UpperCAmelCase_ = copy.deepcopy(UpperCAmelCase__ )
UpperCAmelCase_ = state_dict.get('''decay''' , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError('''Decay must be between 0 and 1''' )
UpperCAmelCase_ = state_dict.get('''min_decay''' , self.min_decay )
if not isinstance(self.min_decay , UpperCAmelCase__ ):
raise ValueError('''Invalid min_decay''' )
UpperCAmelCase_ = state_dict.get('''optimization_step''' , self.optimization_step )
if not isinstance(self.optimization_step , UpperCAmelCase__ ):
raise ValueError('''Invalid optimization_step''' )
UpperCAmelCase_ = state_dict.get('''update_after_step''' , self.update_after_step )
if not isinstance(self.update_after_step , UpperCAmelCase__ ):
raise ValueError('''Invalid update_after_step''' )
UpperCAmelCase_ = state_dict.get('''use_ema_warmup''' , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , UpperCAmelCase__ ):
raise ValueError('''Invalid use_ema_warmup''' )
UpperCAmelCase_ = state_dict.get('''inv_gamma''' , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError('''Invalid inv_gamma''' )
UpperCAmelCase_ = state_dict.get('''power''' , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError('''Invalid power''' )
UpperCAmelCase_ = state_dict.get('''shadow_params''' , UpperCAmelCase__ )
if shadow_params is not None:
UpperCAmelCase_ = shadow_params
if not isinstance(self.shadow_params , UpperCAmelCase__ ):
raise ValueError('''shadow_params must be a list''' )
if not all(isinstance(UpperCAmelCase__ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError('''shadow_params must all be Tensors''' )
| 43
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Any ) ->Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self : str ) ->List[str]:
UpperCAmelCase_ = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
UpperCAmelCase_ = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('''sample_euler''' )
UpperCAmelCase_ = '''A painting of a squirrel eating a burger'''
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase__ ( self : List[str] ) ->int:
UpperCAmelCase_ = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
UpperCAmelCase_ = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('''sample_euler''' )
UpperCAmelCase_ = '''A painting of a squirrel eating a burger'''
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[int]:
UpperCAmelCase_ = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
UpperCAmelCase_ = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('''sample_dpmpp_2m''' )
UpperCAmelCase_ = '''A painting of a squirrel eating a burger'''
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = sd_pipe(
[prompt] , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=15 , output_type='''np''' , use_karras_sigmas=UpperCAmelCase__ , )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array(
[0.1138_1689, 0.1211_2921, 0.138_9457, 0.1254_9606, 0.124_4964, 0.1083_1517, 0.1156_2866, 0.1086_7816, 0.1049_9048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 43
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase__ : Optional[Any] = {
"configuration_encodec": [
"ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EncodecConfig",
],
"feature_extraction_encodec": ["EncodecFeatureExtractor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : List[str] = [
"ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST",
"EncodecModel",
"EncodecPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
lowercase__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 43
|
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class lowerCamelCase ( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = MvpTokenizer
lowerCAmelCase__ = MvpTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = filter_roberta_detectors
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Tuple:
super().setUp()
UpperCAmelCase_ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
UpperCAmelCase_ = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
UpperCAmelCase_ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
UpperCAmelCase_ = {'''unk_token''': '''<unk>'''}
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCAmelCase__ ) )
def lowerCAmelCase__ ( self : Tuple , **UpperCAmelCase__ : List[str] ) ->Dict:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Optional[int] , **UpperCAmelCase__ : int ) ->Tuple:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any] ) ->Union[str, Any]:
return "lower newer", "lower newer"
@cached_property
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[int]:
return MvpTokenizer.from_pretrained('''RUCAIBox/mvp''' )
@cached_property
def lowerCAmelCase__ ( self : Tuple ) ->Tuple:
return MvpTokenizerFast.from_pretrained('''RUCAIBox/mvp''' )
@require_torch
def lowerCAmelCase__ ( self : Any ) ->Dict:
UpperCAmelCase_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
UpperCAmelCase_ = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase_ = tokenizer(UpperCAmelCase__ , max_length=len(UpperCAmelCase__ ) , padding=UpperCAmelCase__ , return_tensors='''pt''' )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
UpperCAmelCase_ = batch.input_ids.tolist()[0]
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
# Test that special tokens are reset
@require_torch
def lowerCAmelCase__ ( self : str ) ->int:
UpperCAmelCase_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase_ = tokenizer(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors='''pt''' )
# check if input_ids are returned and no labels
self.assertIn('''input_ids''' , UpperCAmelCase__ )
self.assertIn('''attention_mask''' , UpperCAmelCase__ )
self.assertNotIn('''labels''' , UpperCAmelCase__ )
self.assertNotIn('''decoder_attention_mask''' , UpperCAmelCase__ )
@require_torch
def lowerCAmelCase__ ( self : Tuple ) ->Optional[Any]:
UpperCAmelCase_ = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase_ = tokenizer(text_target=UpperCAmelCase__ , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def lowerCAmelCase__ ( self : List[str] ) ->int:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase_ = tokenizer(
['''I am a small frog''' * 1024, '''I am a small frog'''] , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , return_tensors='''pt''' )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual(batch.input_ids.shape , (2, 1024) )
@require_torch
def lowerCAmelCase__ ( self : Dict ) ->Optional[int]:
UpperCAmelCase_ = ['''A long paragraph for summarization.''']
UpperCAmelCase_ = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase_ = tokenizer(UpperCAmelCase__ , text_target=UpperCAmelCase__ , return_tensors='''pt''' )
UpperCAmelCase_ = inputs['''input_ids''']
UpperCAmelCase_ = inputs['''labels''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def lowerCAmelCase__ ( self : str ) ->Optional[Any]:
pass
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
UpperCAmelCase_ = self.tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
UpperCAmelCase_ = '''A, <mask> AllenNLP sentence.'''
UpperCAmelCase_ = tokenizer_r.encode_plus(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ )
UpperCAmelCase_ = tokenizer_p.encode_plus(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
UpperCAmelCase_ = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
UpperCAmelCase_ = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
UpperCAmelCase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
UpperCAmelCase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 43
| 1
|
'''simple docstring'''
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('''socket.socket''' )
@patch('''builtins.open''' )
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Any ):
'''simple docstring'''
UpperCAmelCase_ = Mock()
UpperCAmelCase_ = conn, Mock()
UpperCAmelCase_ = iter([1, None] )
UpperCAmelCase_ = lambda _UpperCamelCase : next(_UpperCamelCase )
# ===== invoke =====
send_file(filename='''mytext.txt''' , testing=_UpperCamelCase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 43
|
'''simple docstring'''
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : list[str] ):
'''simple docstring'''
UpperCAmelCase_ = ''''''
for word_or_phrase in separated:
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise Exception('''join() accepts only strings to be joined''' )
joined += word_or_phrase + separator
return joined.strip(_UpperCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 43
| 1
|
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def __lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : Union[str, Any]=False ):
'''simple docstring'''
UpperCAmelCase_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""module.blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""module.blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''module.cls_token''', '''vit.embeddings.cls_token'''),
('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''module.pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''module.norm.weight''', '''layernorm.weight'''),
('''module.norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
UpperCAmelCase_ = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def __lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
UpperCAmelCase_ = ''''''
else:
UpperCAmelCase_ = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(F"""module.blocks.{i}.attn.qkv.weight""" )
UpperCAmelCase_ = state_dict.pop(F"""module.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase_ = in_proj_bias[: config.hidden_size]
UpperCAmelCase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase_ = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase_ = in_proj_bias[-config.hidden_size :]
def __lowerCamelCase ( _UpperCamelCase : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(_UpperCamelCase , _UpperCamelCase )
def __lowerCamelCase ( _UpperCamelCase : List[str] ):
'''simple docstring'''
UpperCAmelCase_ = [
'''module.fc.fc1.weight''',
'''module.fc.fc1.bias''',
'''module.fc.bn1.weight''',
'''module.fc.bn1.bias''',
'''module.fc.bn1.running_mean''',
'''module.fc.bn1.running_var''',
'''module.fc.bn1.num_batches_tracked''',
'''module.fc.fc2.weight''',
'''module.fc.fc2.bias''',
'''module.fc.bn2.weight''',
'''module.fc.bn2.bias''',
'''module.fc.bn2.running_mean''',
'''module.fc.bn2.running_var''',
'''module.fc.bn2.num_batches_tracked''',
'''module.fc.fc3.weight''',
'''module.fc.fc3.bias''',
]
for k in ignore_keys:
state_dict.pop(_UpperCamelCase , _UpperCamelCase )
def __lowerCamelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Tuple ):
'''simple docstring'''
UpperCAmelCase_ = dct.pop(_UpperCamelCase )
UpperCAmelCase_ = val
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ = ViTMSNConfig()
UpperCAmelCase_ = 1000
UpperCAmelCase_ = '''datasets/huggingface/label-files'''
UpperCAmelCase_ = '''imagenet-1k-id2label.json'''
UpperCAmelCase_ = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase ) , '''r''' ) )
UpperCAmelCase_ = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
UpperCAmelCase_ = 384
UpperCAmelCase_ = 1536
UpperCAmelCase_ = 6
elif "l16" in checkpoint_url:
UpperCAmelCase_ = 1024
UpperCAmelCase_ = 4096
UpperCAmelCase_ = 24
UpperCAmelCase_ = 16
UpperCAmelCase_ = 0.1
elif "b4" in checkpoint_url:
UpperCAmelCase_ = 4
elif "l7" in checkpoint_url:
UpperCAmelCase_ = 7
UpperCAmelCase_ = 1024
UpperCAmelCase_ = 4096
UpperCAmelCase_ = 24
UpperCAmelCase_ = 16
UpperCAmelCase_ = 0.1
UpperCAmelCase_ = ViTMSNModel(_UpperCamelCase )
UpperCAmelCase_ = torch.hub.load_state_dict_from_url(_UpperCamelCase , map_location='''cpu''' )['''target_encoder''']
UpperCAmelCase_ = ViTImageProcessor(size=config.image_size )
remove_projection_head(_UpperCamelCase )
UpperCAmelCase_ = create_rename_keys(_UpperCamelCase , base_model=_UpperCamelCase )
for src, dest in rename_keys:
rename_key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
read_in_q_k_v(_UpperCamelCase , _UpperCamelCase , base_model=_UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
model.eval()
UpperCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase_ = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
UpperCAmelCase_ = ViTImageProcessor(
size=config.image_size , image_mean=_UpperCamelCase , image_std=_UpperCamelCase )
UpperCAmelCase_ = image_processor(images=_UpperCamelCase , return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
UpperCAmelCase_ = model(**_UpperCamelCase )
UpperCAmelCase_ = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
UpperCAmelCase_ = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] )
elif "b16" in checkpoint_url:
UpperCAmelCase_ = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] )
elif "l16" in checkpoint_url:
UpperCAmelCase_ = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] )
elif "b4" in checkpoint_url:
UpperCAmelCase_ = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] )
else:
UpperCAmelCase_ = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , _UpperCamelCase , atol=1E-4 )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_UpperCamelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
lowercase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
lowercase__ : str = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 43
|
'''simple docstring'''
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowercase__ : Optional[int] = logging.get_logger(__name__)
def __lowerCamelCase ( _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[int, Iterable[int]] , _UpperCamelCase : bool , _UpperCamelCase : int ):
'''simple docstring'''
def constraint_to_multiple_of(_UpperCamelCase : int , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[int]=0 , _UpperCamelCase : str=None ):
UpperCAmelCase_ = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
UpperCAmelCase_ = math.floor(val / multiple ) * multiple
if x < min_val:
UpperCAmelCase_ = math.ceil(val / multiple ) * multiple
return x
UpperCAmelCase_ = (output_size, output_size) if isinstance(_UpperCamelCase , _UpperCamelCase ) else output_size
UpperCAmelCase_ , UpperCAmelCase_ = get_image_size(_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ = output_size
# determine new height and width
UpperCAmelCase_ = output_height / input_height
UpperCAmelCase_ = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
UpperCAmelCase_ = scale_width
else:
# fit height
UpperCAmelCase_ = scale_height
UpperCAmelCase_ = constraint_to_multiple_of(scale_height * input_height , multiple=_UpperCamelCase )
UpperCAmelCase_ = constraint_to_multiple_of(scale_width * input_width , multiple=_UpperCamelCase )
return (new_height, new_width)
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ['''pixel_values''']
def __init__( self : Any , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[int, float] = 1 / 255 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , **UpperCAmelCase__ : str , ) ->None:
super().__init__(**UpperCAmelCase__ )
UpperCAmelCase_ = size if size is not None else {'''height''': 384, '''width''': 384}
UpperCAmelCase_ = get_size_dict(UpperCAmelCase__ )
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = keep_aspect_ratio
UpperCAmelCase_ = ensure_multiple_of
UpperCAmelCase_ = resample
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase__ ( self : Union[str, Any] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : List[str] , ) ->np.ndarray:
UpperCAmelCase_ = get_size_dict(UpperCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
UpperCAmelCase_ = get_resize_output_image_size(
UpperCAmelCase__ , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=UpperCAmelCase__ , multiple=UpperCAmelCase__ , )
return resize(UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase__ ( self : int , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[int, float] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Optional[Any] , ) ->Any:
return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase__ ( self : List[Any] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[float, List[float]] , UpperCAmelCase__ : Union[float, List[float]] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Optional[int] , ) ->np.ndarray:
return normalize(UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase__ ( self : str , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : int = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : int = None , UpperCAmelCase__ : PILImageResampling = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : float = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase__ : Any , ) ->PIL.Image.Image:
UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ = size if size is not None else self.size
UpperCAmelCase_ = get_size_dict(UpperCAmelCase__ )
UpperCAmelCase_ = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
UpperCAmelCase_ = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
UpperCAmelCase_ = resample if resample is not None else self.resample
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ = image_std if image_std is not None else self.image_std
UpperCAmelCase_ = make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
UpperCAmelCase_ = [to_numpy_array(UpperCAmelCase__ ) for image in images]
if do_resize:
UpperCAmelCase_ = [self.resize(image=UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ ) for image in images]
if do_rescale:
UpperCAmelCase_ = [self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ ) for image in images]
if do_normalize:
UpperCAmelCase_ = [self.normalize(image=UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ ) for image in images]
UpperCAmelCase_ = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
UpperCAmelCase_ = {'''pixel_values''': images}
return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
def lowerCAmelCase__ ( self : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Tuple] = None ) ->Optional[Any]:
UpperCAmelCase_ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(UpperCAmelCase__ ):
UpperCAmelCase_ = target_sizes.numpy()
UpperCAmelCase_ = []
for idx in range(len(UpperCAmelCase__ ) ):
UpperCAmelCase_ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=UpperCAmelCase__ )
UpperCAmelCase_ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCAmelCase__ )
else:
UpperCAmelCase_ = logits.argmax(dim=1 )
UpperCAmelCase_ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 43
| 1
|
'''simple docstring'''
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='''session''' )
def __lowerCamelCase ( ):
'''simple docstring'''
UpperCAmelCase_ = 10
UpperCAmelCase_ = datasets.Features(
{
'''tokens''': datasets.Sequence(datasets.Value('''string''' ) ),
'''labels''': datasets.Sequence(datasets.ClassLabel(names=['''negative''', '''positive'''] ) ),
'''answers''': datasets.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
'''id''': datasets.Value('''int64''' ),
} )
UpperCAmelCase_ = datasets.Dataset.from_dict(
{
'''tokens''': [['''foo'''] * 5] * n,
'''labels''': [[1] * 5] * n,
'''answers''': [{'''answer_start''': [97], '''text''': ['''1976''']}] * 10,
'''id''': list(range(_UpperCamelCase ) ),
} , features=_UpperCamelCase , )
return dataset
@pytest.fixture(scope='''session''' )
def __lowerCamelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ = str(tmp_path_factory.mktemp('''data''' ) / '''file.arrow''' )
dataset.map(cache_file_name=_UpperCamelCase )
return filename
# FILE_CONTENT + files
lowercase__ : Tuple = "\\n Text data.\n Second line of data."
@pytest.fixture(scope='''session''' )
def __lowerCamelCase ( _UpperCamelCase : Tuple ):
'''simple docstring'''
UpperCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / '''file.txt'''
UpperCAmelCase_ = FILE_CONTENT
with open(_UpperCamelCase , '''w''' ) as f:
f.write(_UpperCamelCase )
return filename
@pytest.fixture(scope='''session''' )
def __lowerCamelCase ( _UpperCamelCase : Optional[Any] ):
'''simple docstring'''
import bza
UpperCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / '''file.txt.bz2'''
UpperCAmelCase_ = bytes(_UpperCamelCase , '''utf-8''' )
with bza.open(_UpperCamelCase , '''wb''' ) as f:
f.write(_UpperCamelCase )
return path
@pytest.fixture(scope='''session''' )
def __lowerCamelCase ( _UpperCamelCase : int ):
'''simple docstring'''
import gzip
UpperCAmelCase_ = str(tmp_path_factory.mktemp('''data''' ) / '''file.txt.gz''' )
UpperCAmelCase_ = bytes(_UpperCamelCase , '''utf-8''' )
with gzip.open(_UpperCamelCase , '''wb''' ) as f:
f.write(_UpperCamelCase )
return path
@pytest.fixture(scope='''session''' )
def __lowerCamelCase ( _UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
if datasets.config.LZ4_AVAILABLE:
import lza.frame
UpperCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / '''file.txt.lz4'''
UpperCAmelCase_ = bytes(_UpperCamelCase , '''utf-8''' )
with lza.frame.open(_UpperCamelCase , '''wb''' ) as f:
f.write(_UpperCamelCase )
return path
@pytest.fixture(scope='''session''' )
def __lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Dict ):
'''simple docstring'''
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
UpperCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / '''file.txt.7z'''
with pyazr.SevenZipFile(_UpperCamelCase , '''w''' ) as archive:
archive.write(_UpperCamelCase , arcname=os.path.basename(_UpperCamelCase ) )
return path
@pytest.fixture(scope='''session''' )
def __lowerCamelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : List[Any] ):
'''simple docstring'''
import tarfile
UpperCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / '''file.txt.tar'''
with tarfile.TarFile(_UpperCamelCase , '''w''' ) as f:
f.add(_UpperCamelCase , arcname=os.path.basename(_UpperCamelCase ) )
return path
@pytest.fixture(scope='''session''' )
def __lowerCamelCase ( _UpperCamelCase : Any ):
'''simple docstring'''
import lzma
UpperCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / '''file.txt.xz'''
UpperCAmelCase_ = bytes(_UpperCamelCase , '''utf-8''' )
with lzma.open(_UpperCamelCase , '''wb''' ) as f:
f.write(_UpperCamelCase )
return path
@pytest.fixture(scope='''session''' )
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Dict ):
'''simple docstring'''
import zipfile
UpperCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / '''file.txt.zip'''
with zipfile.ZipFile(_UpperCamelCase , '''w''' ) as f:
f.write(_UpperCamelCase , arcname=os.path.basename(_UpperCamelCase ) )
return path
@pytest.fixture(scope='''session''' )
def __lowerCamelCase ( _UpperCamelCase : int ):
'''simple docstring'''
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
UpperCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / '''file.txt.zst'''
UpperCAmelCase_ = bytes(_UpperCamelCase , '''utf-8''' )
with zstd.open(_UpperCamelCase , '''wb''' ) as f:
f.write(_UpperCamelCase )
return path
@pytest.fixture(scope='''session''' )
def __lowerCamelCase ( _UpperCamelCase : Dict ):
'''simple docstring'''
UpperCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / '''file.xml'''
UpperCAmelCase_ = textwrap.dedent(
'''\
<?xml version="1.0" encoding="UTF-8" ?>
<tmx version="1.4">
<header segtype="sentence" srclang="ca" />
<body>
<tu>
<tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>
<tuv xml:lang="en"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>
<tuv xml:lang="en"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>
<tuv xml:lang="en"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>
<tuv xml:lang="en"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>
<tuv xml:lang="en"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>''' )
with open(_UpperCamelCase , '''w''' ) as f:
f.write(_UpperCamelCase )
return filename
lowercase__ : List[str] = [
{"col_1": "0", "col_2": 0, "col_3": 0.0},
{"col_1": "1", "col_2": 1, "col_3": 1.0},
{"col_1": "2", "col_2": 2, "col_3": 2.0},
{"col_1": "3", "col_2": 3, "col_3": 3.0},
]
lowercase__ : List[Any] = [
{"col_1": "4", "col_2": 4, "col_3": 4.0},
{"col_1": "5", "col_2": 5, "col_3": 5.0},
]
lowercase__ : List[Any] = {
"col_1": ["0", "1", "2", "3"],
"col_2": [0, 1, 2, 3],
"col_3": [0.0, 1.0, 2.0, 3.0],
}
lowercase__ : Optional[int] = [
{"col_3": 0.0, "col_1": "0", "col_2": 0},
{"col_3": 1.0, "col_1": "1", "col_2": 1},
]
lowercase__ : List[str] = [
{"col_1": "s0", "col_2": 0, "col_3": 0.0},
{"col_1": "s1", "col_2": 1, "col_3": 1.0},
{"col_1": "s2", "col_2": 2, "col_3": 2.0},
{"col_1": "s3", "col_2": 3, "col_3": 3.0},
]
@pytest.fixture(scope='''session''' )
def __lowerCamelCase ( ):
'''simple docstring'''
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='''session''' )
def __lowerCamelCase ( _UpperCamelCase : List[str] ):
'''simple docstring'''
UpperCAmelCase_ = datasets.Dataset.from_dict(_UpperCamelCase )
UpperCAmelCase_ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.arrow''' )
dataset.map(cache_file_name=_UpperCamelCase )
return path
@pytest.fixture(scope='''session''' )
def __lowerCamelCase ( _UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.sqlite''' )
with contextlib.closing(sqlitea.connect(_UpperCamelCase ) ) as con:
UpperCAmelCase_ = con.cursor()
cur.execute('''CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)''' )
for item in DATA:
cur.execute('''INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)''' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='''session''' )
def __lowerCamelCase ( _UpperCamelCase : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.csv''' )
with open(_UpperCamelCase , '''w''' , newline='''''' ) as f:
UpperCAmelCase_ = csv.DictWriter(_UpperCamelCase , fieldnames=['''col_1''', '''col_2''', '''col_3'''] )
writer.writeheader()
for item in DATA:
writer.writerow(_UpperCamelCase )
return path
@pytest.fixture(scope='''session''' )
def __lowerCamelCase ( _UpperCamelCase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.csv''' )
with open(_UpperCamelCase , '''w''' , newline='''''' ) as f:
UpperCAmelCase_ = csv.DictWriter(_UpperCamelCase , fieldnames=['''col_1''', '''col_2''', '''col_3'''] )
writer.writeheader()
for item in DATA:
writer.writerow(_UpperCamelCase )
return path
@pytest.fixture(scope='''session''' )
def __lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Optional[int] ):
'''simple docstring'''
import bza
UpperCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.bz2'''
with open(_UpperCamelCase , '''rb''' ) as f:
UpperCAmelCase_ = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(_UpperCamelCase , '''wb''' ) as f:
f.write(_UpperCamelCase )
return path
@pytest.fixture(scope='''session''' )
def __lowerCamelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict , _UpperCamelCase : List[str] ):
'''simple docstring'''
UpperCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.zip'''
with zipfile.ZipFile(_UpperCamelCase , '''w''' ) as f:
f.write(_UpperCamelCase , arcname=os.path.basename(_UpperCamelCase ) )
f.write(_UpperCamelCase , arcname=os.path.basename(_UpperCamelCase ) )
return path
@pytest.fixture(scope='''session''' )
def __lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : Dict , _UpperCamelCase : str ):
'''simple docstring'''
UpperCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.zip'''
with zipfile.ZipFile(_UpperCamelCase , '''w''' ) as f:
f.write(_UpperCamelCase , arcname=os.path.basename(csv_path.replace('''.csv''' , '''.CSV''' ) ) )
f.write(_UpperCamelCase , arcname=os.path.basename(csva_path.replace('''.csv''' , '''.CSV''' ) ) )
return path
@pytest.fixture(scope='''session''' )
def __lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : str , _UpperCamelCase : Tuple ):
'''simple docstring'''
UpperCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.csv.zip'''
with zipfile.ZipFile(_UpperCamelCase , '''w''' ) as f:
f.write(_UpperCamelCase , arcname=os.path.join('''main_dir''' , os.path.basename(_UpperCamelCase ) ) )
f.write(_UpperCamelCase , arcname=os.path.join('''main_dir''' , os.path.basename(_UpperCamelCase ) ) )
return path
@pytest.fixture(scope='''session''' )
def __lowerCamelCase ( _UpperCamelCase : Any ):
'''simple docstring'''
UpperCAmelCase_ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.parquet''' )
UpperCAmelCase_ = pa.schema(
{
'''col_1''': pa.string(),
'''col_2''': pa.intaa(),
'''col_3''': pa.floataa(),
} )
with open(_UpperCamelCase , '''wb''' ) as f:
UpperCAmelCase_ = pq.ParquetWriter(_UpperCamelCase , schema=_UpperCamelCase )
UpperCAmelCase_ = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(_UpperCamelCase ) )] for k in DATA[0]} , schema=_UpperCamelCase )
writer.write_table(_UpperCamelCase )
writer.close()
return path
@pytest.fixture(scope='''session''' )
def __lowerCamelCase ( _UpperCamelCase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.json''' )
UpperCAmelCase_ = {'''data''': DATA}
with open(_UpperCamelCase , '''w''' ) as f:
json.dump(_UpperCamelCase , _UpperCamelCase )
return path
@pytest.fixture(scope='''session''' )
def __lowerCamelCase ( _UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.json''' )
UpperCAmelCase_ = {'''data''': DATA_DICT_OF_LISTS}
with open(_UpperCamelCase , '''w''' ) as f:
json.dump(_UpperCamelCase , _UpperCamelCase )
return path
@pytest.fixture(scope='''session''' )
def __lowerCamelCase ( _UpperCamelCase : Tuple ):
'''simple docstring'''
UpperCAmelCase_ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl''' )
with open(_UpperCamelCase , '''w''' ) as f:
for item in DATA:
f.write(json.dumps(_UpperCamelCase ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def __lowerCamelCase ( _UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.jsonl''' )
with open(_UpperCamelCase , '''w''' ) as f:
for item in DATA:
f.write(json.dumps(_UpperCamelCase ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def __lowerCamelCase ( _UpperCamelCase : Dict ):
'''simple docstring'''
UpperCAmelCase_ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset_312.jsonl''' )
with open(_UpperCamelCase , '''w''' ) as f:
for item in DATA_312:
f.write(json.dumps(_UpperCamelCase ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def __lowerCamelCase ( _UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset-str.jsonl''' )
with open(_UpperCamelCase , '''w''' ) as f:
for item in DATA_STR:
f.write(json.dumps(_UpperCamelCase ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def __lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
import gzip
UpperCAmelCase_ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.txt.gz''' )
with open(_UpperCamelCase , '''rb''' ) as orig_file:
with gzip.open(_UpperCamelCase , '''wb''' ) as zipped_file:
zipped_file.writelines(_UpperCamelCase )
return path
@pytest.fixture(scope='''session''' )
def __lowerCamelCase ( _UpperCamelCase : Dict , _UpperCamelCase : Tuple ):
'''simple docstring'''
import gzip
UpperCAmelCase_ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.gz''' )
with open(_UpperCamelCase , '''rb''' ) as orig_file:
with gzip.open(_UpperCamelCase , '''wb''' ) as zipped_file:
zipped_file.writelines(_UpperCamelCase )
return path
@pytest.fixture(scope='''session''' )
def __lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : Tuple , _UpperCamelCase : Tuple ):
'''simple docstring'''
UpperCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.zip'''
with zipfile.ZipFile(_UpperCamelCase , '''w''' ) as f:
f.write(_UpperCamelCase , arcname=os.path.basename(_UpperCamelCase ) )
f.write(_UpperCamelCase , arcname=os.path.basename(_UpperCamelCase ) )
return path
@pytest.fixture(scope='''session''' )
def __lowerCamelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : int , _UpperCamelCase : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / '''dataset_nested.jsonl.zip'''
with zipfile.ZipFile(_UpperCamelCase , '''w''' ) as f:
f.write(_UpperCamelCase , arcname=os.path.join('''nested''' , os.path.basename(_UpperCamelCase ) ) )
return path
@pytest.fixture(scope='''session''' )
def __lowerCamelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.jsonl.zip'''
with zipfile.ZipFile(_UpperCamelCase , '''w''' ) as f:
f.write(_UpperCamelCase , arcname=os.path.join('''main_dir''' , os.path.basename(_UpperCamelCase ) ) )
f.write(_UpperCamelCase , arcname=os.path.join('''main_dir''' , os.path.basename(_UpperCamelCase ) ) )
return path
@pytest.fixture(scope='''session''' )
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.tar'''
with tarfile.TarFile(_UpperCamelCase , '''w''' ) as f:
f.add(_UpperCamelCase , arcname=os.path.basename(_UpperCamelCase ) )
f.add(_UpperCamelCase , arcname=os.path.basename(_UpperCamelCase ) )
return path
@pytest.fixture(scope='''session''' )
def __lowerCamelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str , _UpperCamelCase : Dict , _UpperCamelCase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / '''dataset_nested.jsonl.tar'''
with tarfile.TarFile(_UpperCamelCase , '''w''' ) as f:
f.add(_UpperCamelCase , arcname=os.path.join('''nested''' , os.path.basename(_UpperCamelCase ) ) )
return path
@pytest.fixture(scope='''session''' )
def __lowerCamelCase ( _UpperCamelCase : Tuple ):
'''simple docstring'''
UpperCAmelCase_ = ['''0''', '''1''', '''2''', '''3''']
UpperCAmelCase_ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.txt''' )
with open(_UpperCamelCase , '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def __lowerCamelCase ( _UpperCamelCase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ = ['''0''', '''1''', '''2''', '''3''']
UpperCAmelCase_ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.txt''' )
with open(_UpperCamelCase , '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def __lowerCamelCase ( _UpperCamelCase : Any ):
'''simple docstring'''
UpperCAmelCase_ = ['''0''', '''1''', '''2''', '''3''']
UpperCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / '''dataset.abc'''
with open(_UpperCamelCase , '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def __lowerCamelCase ( _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / '''dataset.text.zip'''
with zipfile.ZipFile(_UpperCamelCase , '''w''' ) as f:
f.write(_UpperCamelCase , arcname=os.path.basename(_UpperCamelCase ) )
f.write(_UpperCamelCase , arcname=os.path.basename(_UpperCamelCase ) )
return path
@pytest.fixture(scope='''session''' )
def __lowerCamelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.text.zip'''
with zipfile.ZipFile(_UpperCamelCase , '''w''' ) as f:
f.write(_UpperCamelCase , arcname=os.path.join('''main_dir''' , os.path.basename(_UpperCamelCase ) ) )
f.write(_UpperCamelCase , arcname=os.path.join('''main_dir''' , os.path.basename(_UpperCamelCase ) ) )
return path
@pytest.fixture(scope='''session''' )
def __lowerCamelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / '''dataset.ext.zip'''
with zipfile.ZipFile(_UpperCamelCase , '''w''' ) as f:
f.write(_UpperCamelCase , arcname=os.path.basename('''unsupported.ext''' ) )
f.write(_UpperCamelCase , arcname=os.path.basename('''unsupported_2.ext''' ) )
return path
@pytest.fixture(scope='''session''' )
def __lowerCamelCase ( _UpperCamelCase : str ):
'''simple docstring'''
UpperCAmelCase_ = '''\n'''.join(['''First''', '''Second\u2029with Unicode new line''', '''Third'''] )
UpperCAmelCase_ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset_with_unicode_new_lines.txt''' )
with open(_UpperCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(_UpperCamelCase )
return path
@pytest.fixture(scope='''session''' )
def __lowerCamelCase ( ):
'''simple docstring'''
return os.path.join('''tests''' , '''features''' , '''data''' , '''test_image_rgb.jpg''' )
@pytest.fixture(scope='''session''' )
def __lowerCamelCase ( ):
'''simple docstring'''
return os.path.join('''tests''' , '''features''' , '''data''' , '''test_audio_44100.wav''' )
@pytest.fixture(scope='''session''' )
def __lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : Tuple ):
'''simple docstring'''
UpperCAmelCase_ = tmp_path_factory.mktemp('''data''' ) / '''dataset.img.zip'''
with zipfile.ZipFile(_UpperCamelCase , '''w''' ) as f:
f.write(_UpperCamelCase , arcname=os.path.basename(_UpperCamelCase ) )
f.write(_UpperCamelCase , arcname=os.path.basename(_UpperCamelCase ).replace('''.jpg''' , '''2.jpg''' ) )
return path
@pytest.fixture(scope='''session''' )
def __lowerCamelCase ( _UpperCamelCase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ = tmp_path_factory.mktemp('''data_dir''' )
(data_dir / "subdir").mkdir()
with open(data_dir / '''subdir''' / '''train.txt''' , '''w''' ) as f:
f.write('''foo\n''' * 10 )
with open(data_dir / '''subdir''' / '''test.txt''' , '''w''' ) as f:
f.write('''bar\n''' * 10 )
# hidden file
with open(data_dir / '''subdir''' / '''.test.txt''' , '''w''' ) as f:
f.write('''bar\n''' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '''.subdir''' / '''train.txt''' , '''w''' ) as f:
f.write('''foo\n''' * 10 )
with open(data_dir / '''.subdir''' / '''test.txt''' , '''w''' ) as f:
f.write('''bar\n''' * 10 )
return data_dir
| 43
|
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : int = logging.get_logger(__name__)
lowercase__ : List[str] = {
"huggingface/time-series-transformer-tourism-monthly": (
"https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = '''time_series_transformer'''
lowerCAmelCase__ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self : Optional[int] , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : str = "student_t" , UpperCAmelCase__ : str = "nll" , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : List[int] = [1, 2, 3, 4, 5, 6, 7] , UpperCAmelCase__ : Optional[Union[str, bool]] = "mean" , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : int = 32 , UpperCAmelCase__ : int = 32 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : str = "gelu" , UpperCAmelCase__ : int = 64 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : int = 100 , UpperCAmelCase__ : float = 0.02 , UpperCAmelCase__ : Union[str, Any]=True , **UpperCAmelCase__ : Tuple , ) ->Optional[int]:
# time series specific configuration
UpperCAmelCase_ = prediction_length
UpperCAmelCase_ = context_length or prediction_length
UpperCAmelCase_ = distribution_output
UpperCAmelCase_ = loss
UpperCAmelCase_ = input_size
UpperCAmelCase_ = num_time_features
UpperCAmelCase_ = lags_sequence
UpperCAmelCase_ = scaling
UpperCAmelCase_ = num_dynamic_real_features
UpperCAmelCase_ = num_static_real_features
UpperCAmelCase_ = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(UpperCAmelCase__ ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
UpperCAmelCase_ = cardinality
else:
UpperCAmelCase_ = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(UpperCAmelCase__ ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
UpperCAmelCase_ = embedding_dimension
else:
UpperCAmelCase_ = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
UpperCAmelCase_ = num_parallel_samples
# Transformer architecture configuration
UpperCAmelCase_ = input_size * len(UpperCAmelCase__ ) + self._number_of_features
UpperCAmelCase_ = d_model
UpperCAmelCase_ = encoder_attention_heads
UpperCAmelCase_ = decoder_attention_heads
UpperCAmelCase_ = encoder_ffn_dim
UpperCAmelCase_ = decoder_ffn_dim
UpperCAmelCase_ = encoder_layers
UpperCAmelCase_ = decoder_layers
UpperCAmelCase_ = dropout
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = activation_dropout
UpperCAmelCase_ = encoder_layerdrop
UpperCAmelCase_ = decoder_layerdrop
UpperCAmelCase_ = activation_function
UpperCAmelCase_ = init_std
UpperCAmelCase_ = use_cache
super().__init__(is_encoder_decoder=UpperCAmelCase__ , **UpperCAmelCase__ )
@property
def lowerCAmelCase__ ( self : List[str] ) ->int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 43
| 1
|
'''simple docstring'''
def __lowerCamelCase ( _UpperCamelCase : int = 6008_5147_5143 ):
'''simple docstring'''
try:
UpperCAmelCase_ = int(_UpperCamelCase )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
UpperCAmelCase_ = 1
UpperCAmelCase_ = 2
while i * i <= n:
while n % i == 0:
UpperCAmelCase_ = i
n //= i
i += 1
if n > 1:
UpperCAmelCase_ = n
return int(_UpperCamelCase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 43
|
'''simple docstring'''
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
lowercase__ : Dict = logging.get_logger(__name__)
lowercase__ : List[Any] = "T5Config"
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = '''mt5'''
lowerCAmelCase__ = MTaConfig
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = '''mt5'''
lowerCAmelCase__ = MTaConfig
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = '''mt5'''
lowerCAmelCase__ = MTaConfig
| 43
| 1
|
'''simple docstring'''
def __lowerCamelCase ( _UpperCamelCase : list ):
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(_UpperCamelCase ) == 0:
raise ValueError('''Input list must be a non empty list''' )
if len(_UpperCamelCase ) == 1:
return True
UpperCAmelCase_ = series[1] - series[0]
for index in range(len(_UpperCamelCase ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def __lowerCamelCase ( _UpperCamelCase : list ):
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(_UpperCamelCase ) == 0:
raise ValueError('''Input list must be a non empty list''' )
UpperCAmelCase_ = 0
for val in series:
answer += val
return answer / len(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43
|
'''simple docstring'''
import comet # From: unbabel-comet
import torch
import datasets
lowercase__ : str = datasets.logging.get_logger(__name__)
lowercase__ : Dict = "\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel's Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = \"{COMET}: A Neural Framework for {MT} Evaluation\",\n author = \"Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon\",\n booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",\n pages = \"2685--2702\",\n}\n"
lowercase__ : str = "\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n"
lowercase__ : str = "\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric('comet')\n >>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use\n >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]\n >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]\n >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [0.19, 0.92]\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase__ ( self : List[Any] ) ->Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''sources''': datasets.Value('''string''' , id='''sequence''' ),
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[
'''https://github.com/Unbabel/COMET''',
'''https://www.aclweb.org/anthology/2020.emnlp-main.213/''',
'''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''',
] , )
def lowerCAmelCase__ ( self : int , UpperCAmelCase__ : Union[str, Any] ) ->Any:
if self.config_name == "default":
UpperCAmelCase_ = comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da''' ) )
else:
UpperCAmelCase_ = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def lowerCAmelCase__ ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : int=False ) ->Optional[Any]:
if gpus is None:
UpperCAmelCase_ = 1 if torch.cuda.is_available() else 0
UpperCAmelCase_ = {'''src''': sources, '''mt''': predictions, '''ref''': references}
UpperCAmelCase_ = [dict(zip(UpperCAmelCase__ , UpperCAmelCase__ ) ) for t in zip(*data.values() )]
UpperCAmelCase_ , UpperCAmelCase_ = self.scorer.predict(UpperCAmelCase__ , gpus=UpperCAmelCase__ , progress_bar=UpperCAmelCase__ )
return {"mean_score": mean_score, "scores": scores}
| 43
| 1
|
'''simple docstring'''
def __lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : int ):
'''simple docstring'''
return 1 if input_a == input_a else 0
def __lowerCamelCase ( ):
'''simple docstring'''
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 43
|
'''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : int = 32 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[int, float] = 1 / 255 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Union[float, List[float]]] = [0.4814_5466, 0.457_8275, 0.4082_1073] , UpperCAmelCase__ : Optional[Union[float, List[float]]] = [0.2686_2954, 0.2613_0258, 0.2757_7711] , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[str, Any]=7 , UpperCAmelCase__ : Optional[int]=30 , UpperCAmelCase__ : Union[str, Any]=400 , UpperCAmelCase__ : List[Any]=3 , ) ->Dict:
UpperCAmelCase_ = parent
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size if size is not None else {'''shortest_edge''': 288}
UpperCAmelCase_ = size_divisor
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = image_mean
UpperCAmelCase_ = image_std
UpperCAmelCase_ = do_pad
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
def lowerCAmelCase__ ( self : Tuple ) ->List[Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def lowerCAmelCase__ ( self : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict=False ) ->Any:
if not batched:
UpperCAmelCase_ = self.size['''shortest_edge''']
UpperCAmelCase_ = image_inputs[0]
if isinstance(UpperCAmelCase__ , Image.Image ):
UpperCAmelCase_ , UpperCAmelCase_ = image.size
else:
UpperCAmelCase_ , UpperCAmelCase_ = image.shape[1], image.shape[2]
UpperCAmelCase_ = size / min(UpperCAmelCase__ , UpperCAmelCase__ )
if h < w:
UpperCAmelCase_ , UpperCAmelCase_ = size, scale * w
else:
UpperCAmelCase_ , UpperCAmelCase_ = scale * h, size
UpperCAmelCase_ = int((1333 / 800) * size )
if max(UpperCAmelCase__ , UpperCAmelCase__ ) > max_size:
UpperCAmelCase_ = max_size / max(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = newh * scale
UpperCAmelCase_ = neww * scale
UpperCAmelCase_ , UpperCAmelCase_ = int(newh + 0.5 ), int(neww + 0.5 )
UpperCAmelCase_ , UpperCAmelCase_ = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
UpperCAmelCase_ = []
for image in image_inputs:
UpperCAmelCase_ , UpperCAmelCase_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase_ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : item[0] )[0]
UpperCAmelCase_ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCamelCase ( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = BridgeTowerImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self : Optional[int] ) ->str:
UpperCAmelCase_ = BridgeTowerImageProcessingTester(self )
@property
def lowerCAmelCase__ ( self : List[str] ) ->Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self : Optional[int] ) ->int:
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''image_std''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''size''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''size_divisor''' ) )
def lowerCAmelCase__ ( self : Optional[int] ) ->Union[str, Any]:
pass
def lowerCAmelCase__ ( self : List[str] ) ->Union[str, Any]:
# Initialize image processor
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self : Any ) ->Optional[int]:
# Initialize image processor
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self : int ) ->List[str]:
# Initialize image processor
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 43
| 1
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Tuple = logging.get_logger(__name__)
lowercase__ : Dict = torch.device("cpu")
def __lowerCamelCase ( ):
'''simple docstring'''
UpperCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase_ = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
return im
def __lowerCamelCase ( _UpperCamelCase : Any ):
'''simple docstring'''
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.17_03E00, 2.11_07E00, -2.08_11E00, 8.86_85E-01, 2.43_60E-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.96_36E-01, 2.34_78E-01, -1.69_63E00, -1.73_81E00, -8.63_37E-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.27_68E-01, -4.74_29E-01, -1.08_97E00, -1.02_48E00, 3.55_23E-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.53_30E-01, 2.42_11E-01, -6.01_85E-01, -8.27_89E-01, -6.04_46E-02] )
def __lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Tuple , _UpperCamelCase : Dict ):
'''simple docstring'''
UpperCAmelCase_ = dct.pop(_UpperCamelCase )
UpperCAmelCase_ = val
def __lowerCamelCase ( _UpperCamelCase : str ):
'''simple docstring'''
UpperCAmelCase_ = []
for k in state_dict.keys():
UpperCAmelCase_ = k
if ".pwconv" in k:
UpperCAmelCase_ = k_new.replace('''.pwconv''' , '''.point_wise_conv''' )
if ".dwconv" in k:
UpperCAmelCase_ = k_new.replace('''.dwconv''' , '''.depth_wise_conv''' )
if ".Proj." in k:
UpperCAmelCase_ = k_new.replace('''.Proj.''' , '''.proj.''' )
if "patch_embed" in k_new:
UpperCAmelCase_ = k_new.replace('''patch_embed''' , '''swiftformer.patch_embed.patch_embedding''' )
if "network" in k_new:
UpperCAmelCase_ = k_new.split('''.''' )
if ls[2].isdigit():
UpperCAmelCase_ = '''swiftformer.encoder.network.''' + ls[1] + '''.blocks.''' + ls[2] + '''.''' + '''.'''.join(ls[3:] )
else:
UpperCAmelCase_ = k_new.replace('''network''' , '''swiftformer.encoder.network''' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Tuple ):
'''simple docstring'''
UpperCAmelCase_ = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
UpperCAmelCase_ = 1000
UpperCAmelCase_ = '''huggingface/label-files'''
UpperCAmelCase_ = '''imagenet-1k-id2label.json'''
UpperCAmelCase_ = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase_ = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
UpperCAmelCase_ = [3, 3, 6, 4]
UpperCAmelCase_ = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
UpperCAmelCase_ = [3, 3, 9, 6]
UpperCAmelCase_ = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
UpperCAmelCase_ = [4, 3, 10, 5]
UpperCAmelCase_ = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
UpperCAmelCase_ = [4, 4, 12, 6]
UpperCAmelCase_ = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('''https''' ):
UpperCAmelCase_ = torch.hub.load_state_dict_from_url(_UpperCamelCase , map_location='''cpu''' , check_hash=_UpperCamelCase )
else:
UpperCAmelCase_ = torch.load(_UpperCamelCase , map_location='''cpu''' )
UpperCAmelCase_ = checkpoint
UpperCAmelCase_ = create_rename_keys(_UpperCamelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# load HuggingFace model
UpperCAmelCase_ = SwiftFormerForImageClassification(_UpperCamelCase ).eval()
hf_model.load_state_dict(_UpperCamelCase )
# prepare test inputs
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = ViTImageProcessor.from_pretrained('''preprocessor_config''' )
UpperCAmelCase_ = processor(images=_UpperCamelCase , return_tensors='''pt''' )
# compare outputs from both models
UpperCAmelCase_ = get_expected_output(_UpperCamelCase )
UpperCAmelCase_ = hf_model(inputs['''pixel_values'''] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , _UpperCamelCase , atol=1E-3 )
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
lowercase__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swiftformer_name",
default="swiftformer_xs",
choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"],
type=str,
help="Name of the SwiftFormer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="./converted_outputs/",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.")
lowercase__ : int = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 43
|
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Dict ) ->List[str]:
UpperCAmelCase_ = tf.convert_to_tensor(
[
[
8.222_0991, # 3rd highest value; idx. 0
-0.562_0044,
5.2322_9752,
4.038_6393,
-6.879_8378,
-0.5478_5802,
-3.201_2153,
2.9277_7176,
1.8817_1953,
7.3534_1276, # 5th highest value; idx. 9
8.4320_7833, # 2nd highest value; idx. 10
-9.8571_1836,
-5.9620_9236,
-1.1303_9161,
-7.111_5294,
-0.836_9633,
-5.318_6408,
7.0642_7407,
0.8136_9344,
-0.8202_3817,
-5.917_9796,
0.5881_3443,
-6.9977_8438,
4.7155_1189,
-0.1877_1637,
7.4402_0759, # 4th highest value; idx. 25
9.3845_0987, # 1st highest value; idx. 26
2.1266_2941,
-9.3256_2038,
2.3565_2522,
], # cummulative prob of 5 highest values <= 0.6
[
0.5842_5518,
4.5313_9238,
-5.5751_0464,
-6.2803_0699,
-7.1952_9503,
-4.0212_2551,
1.3933_7037,
-6.0670_7057,
1.5948_0517,
-9.64_3119,
0.0390_7799,
0.6723_1762,
-8.8820_6726,
6.2711_5922, # 4th highest value; idx. 13
2.2852_0723,
4.8276_7506,
4.3042_1368,
8.827_5313, # 2nd highest value; idx. 17
5.4402_9958, # 5th highest value; idx. 18
-4.473_5794,
7.3857_9536, # 3rd highest value; idx. 20
-2.9105_1663,
2.6194_6077,
-2.567_4762,
-9.4895_9302,
-4.0292_2645,
-1.3541_6918,
9.6770_2323, # 1st highest value; idx. 27
-5.8947_8553,
1.8537_0467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
UpperCAmelCase_ = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
UpperCAmelCase_ = tf.convert_to_tensor(
[8.22_2099, 7.353_4126, 8.43_2078, 7.440_2075, 9.3_8451, 6.27_1159, 8.82_7531, 5.440_2995, 7.385_7956, 9.67_7023] , dtype=tf.floataa , ) # expected non filtered values as noted above
UpperCAmelCase_ = tf_top_k_top_p_filtering(UpperCAmelCase__ , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
UpperCAmelCase_ = output[output != -float('''inf''' )]
UpperCAmelCase_ = tf.cast(
tf.where(tf.not_equal(UpperCAmelCase__ , tf.constant(-float('''inf''' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(UpperCAmelCase__ , UpperCAmelCase__ , rtol=1e-12 )
tf.debugging.assert_equal(UpperCAmelCase__ , UpperCAmelCase__ )
@require_tf
class lowerCamelCase ( unittest.TestCase , lowerCamelCase ):
'''simple docstring'''
if is_tf_available():
lowerCAmelCase__ = {
'''AutoModelForCausalLM''': TFAutoModelForCausalLM,
'''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq,
'''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM,
'''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq,
'''LogitsProcessorList''': TFLogitsProcessorList,
'''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor,
'''create_tensor_fn''': tf.convert_to_tensor,
'''floats_tensor''': floats_tensor,
'''return_tensors''': '''tf''',
}
@slow
def lowerCAmelCase__ ( self : Dict ) ->Optional[Any]:
# TF-only test: tf.saved_model export
UpperCAmelCase_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
UpperCAmelCase_ = 2
UpperCAmelCase_ = 2
class lowerCamelCase ( tf.Module ):
'''simple docstring'''
def __init__( self : List[str] , UpperCAmelCase__ : List[str] ) ->Dict:
super(UpperCAmelCase__ , self ).__init__()
UpperCAmelCase_ = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((None, input_length) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=UpperCAmelCase__ , )
def lowerCAmelCase__ ( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] ) ->int:
UpperCAmelCase_ = self.model.generate(
input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ , return_dict_in_generate=UpperCAmelCase__ , )
return {"sequences": outputs["sequences"]}
UpperCAmelCase_ = [[2, 0], [102, 103]]
UpperCAmelCase_ = [[1, 0], [1, 1]]
UpperCAmelCase_ = DummyModel(model=UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(UpperCAmelCase__ , UpperCAmelCase__ , signatures={'''serving_default''': dummy_model.serving} )
UpperCAmelCase_ = tf.saved_model.load(UpperCAmelCase__ ).signatures['''serving_default''']
for batch_size in range(1 , len(UpperCAmelCase__ ) + 1 ):
UpperCAmelCase_ = {
'''input_ids''': tf.constant(dummy_input_ids[:batch_size] ),
'''attention_mask''': tf.constant(dummy_attention_masks[:batch_size] ),
}
UpperCAmelCase_ = serving_func(**UpperCAmelCase__ )['''sequences''']
UpperCAmelCase_ = test_model.generate(**UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ )
tf.debugging.assert_equal(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def lowerCAmelCase__ ( self : Tuple ) ->Optional[int]:
# TF-only test: tf.saved_model export
UpperCAmelCase_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
UpperCAmelCase_ = 1
UpperCAmelCase_ = 2
class lowerCamelCase ( tf.Module ):
'''simple docstring'''
def __init__( self : int , UpperCAmelCase__ : int ) ->List[str]:
super(UpperCAmelCase__ , self ).__init__()
UpperCAmelCase_ = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((batch_size, None) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=UpperCAmelCase__ , )
def lowerCAmelCase__ ( self : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] ) ->int:
UpperCAmelCase_ = self.model.generate(
input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ , return_dict_in_generate=UpperCAmelCase__ , )
return {"sequences": outputs["sequences"]}
UpperCAmelCase_ = [[2], [102, 103]]
UpperCAmelCase_ = [[1], [1, 1]]
UpperCAmelCase_ = DummyModel(model=UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(UpperCAmelCase__ , UpperCAmelCase__ , signatures={'''serving_default''': dummy_model.serving} )
UpperCAmelCase_ = tf.saved_model.load(UpperCAmelCase__ ).signatures['''serving_default''']
for input_row in range(len(UpperCAmelCase__ ) ):
UpperCAmelCase_ = {
'''input_ids''': tf.constant([dummy_input_ids[input_row]] ),
'''attention_mask''': tf.constant([dummy_attention_masks[input_row]] ),
}
UpperCAmelCase_ = serving_func(**UpperCAmelCase__ )['''sequences''']
UpperCAmelCase_ = test_model.generate(**UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ )
tf.debugging.assert_equal(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
@require_tensorflow_text
def lowerCAmelCase__ ( self : Optional[Any] ) ->int:
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='''google/flan-t5-small''' , filename='''spiece.model''' , local_dir=UpperCAmelCase__ )
class lowerCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : List[str] ) ->Any:
super().__init__()
UpperCAmelCase_ = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(UpperCAmelCase__ , '''spiece.model''' ) , '''rb''' ).read() )
UpperCAmelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
def lowerCAmelCase__ ( self : Any , UpperCAmelCase__ : Union[str, Any] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : str ) ->List[str]:
UpperCAmelCase_ = self.tokenizer.tokenize(UpperCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = text.pad_model_inputs(
UpperCAmelCase__ , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
UpperCAmelCase_ = self.model.generate(input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
return self.tokenizer.detokenize(UpperCAmelCase__ )
UpperCAmelCase_ = CompleteSentenceTransformer()
UpperCAmelCase_ = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='''inputs''' )
UpperCAmelCase_ = complete_model(UpperCAmelCase__ )
UpperCAmelCase_ = tf.keras.Model(UpperCAmelCase__ , UpperCAmelCase__ )
keras_model.save(UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Optional[int] ) ->Tuple:
# Has PT equivalent: this test relies on random sampling
UpperCAmelCase_ = {
'''do_sample''': True,
'''num_beams''': 1,
'''top_p''': 0.7,
'''top_k''': 10,
'''temperature''': 0.7,
}
UpperCAmelCase_ = 14
UpperCAmelCase_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
UpperCAmelCase_ = '''Hello, my dog is cute and'''
UpperCAmelCase_ = tokenizer(UpperCAmelCase__ , return_tensors='''tf''' )
UpperCAmelCase_ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
UpperCAmelCase_ = 638
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
UpperCAmelCase_ = model.generate(**UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
UpperCAmelCase_ = [638, 198]
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
UpperCAmelCase_ = model.generate(**UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def lowerCAmelCase__ ( self : Optional[int] ) ->List[str]:
# Has PT equivalent: ample use of framework-specific code
UpperCAmelCase_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
UpperCAmelCase_ = '''Hugging Face is a technology company based in New York and Paris.'''
UpperCAmelCase_ = bart_tokenizer(UpperCAmelCase__ , return_tensors='''tf''' ).input_ids
UpperCAmelCase_ = TFBartForConditionalGeneration.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
UpperCAmelCase_ = bart_model.generate(UpperCAmelCase__ ).numpy()
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any]=None , **UpperCAmelCase__ : int ) ->List[str]:
return super().call(UpperCAmelCase__ , **UpperCAmelCase__ )
UpperCAmelCase_ = FakeBart.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
UpperCAmelCase_ = bart_model.generate(UpperCAmelCase__ , foo='''bar''' ).numpy()
self.assertTrue(np.array_equal(UpperCAmelCase__ , UpperCAmelCase__ ) )
class lowerCamelCase ( bart_model.model.encoder.__class__ ):
'''simple docstring'''
def lowerCAmelCase__ ( self : str , UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Dict ) ->Any:
return super().call(UpperCAmelCase__ , **UpperCAmelCase__ )
UpperCAmelCase_ = FakeEncoder(bart_model.config , bart_model.model.shared )
UpperCAmelCase_ = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
UpperCAmelCase_ = bart_model.generate(UpperCAmelCase__ ).numpy()
with self.assertRaises(UpperCAmelCase__ ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(UpperCAmelCase__ , foo='''bar''' )
| 43
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase__ : int = {
"configuration_graphormer": ["GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "GraphormerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : List[str] = [
"GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"GraphormerForGraphClassification",
"GraphormerModel",
"GraphormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
lowercase__ : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 43
|
'''simple docstring'''
from collections.abc import Callable
def __lowerCamelCase ( _UpperCamelCase : Callable[[float], float] , _UpperCamelCase : float , _UpperCamelCase : float ):
'''simple docstring'''
UpperCAmelCase_ = a
UpperCAmelCase_ = b
if function(_UpperCamelCase ) == 0: # one of the a or b is a root for the function
return a
elif function(_UpperCamelCase ) == 0:
return b
elif (
function(_UpperCamelCase ) * function(_UpperCamelCase ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('''could not find root in given interval.''' )
else:
UpperCAmelCase_ = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(_UpperCamelCase ) == 0:
return mid
elif function(_UpperCamelCase ) * function(_UpperCamelCase ) < 0:
UpperCAmelCase_ = mid
else:
UpperCAmelCase_ = mid
UpperCAmelCase_ = start + (end - start) / 2.0
return mid
def __lowerCamelCase ( _UpperCamelCase : float ):
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 43
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : Dict = logging.get_logger(__name__)
lowercase__ : int = {
"bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json",
}
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = '''gpt_bigcode'''
lowerCAmelCase__ = ['''past_key_values''']
lowerCAmelCase__ = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Optional[Any] , UpperCAmelCase__ : Union[str, Any]=5_0257 , UpperCAmelCase__ : Any=1024 , UpperCAmelCase__ : List[str]=768 , UpperCAmelCase__ : Union[str, Any]=12 , UpperCAmelCase__ : str=12 , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Tuple="gelu_pytorch_tanh" , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : str=0.1 , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Any=1e-5 , UpperCAmelCase__ : Union[str, Any]=0.02 , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Dict=5_0256 , UpperCAmelCase__ : str=5_0256 , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : str=True , UpperCAmelCase__ : int=True , **UpperCAmelCase__ : List[str] , ) ->Any:
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = n_positions
UpperCAmelCase_ = n_embd
UpperCAmelCase_ = n_layer
UpperCAmelCase_ = n_head
UpperCAmelCase_ = n_inner
UpperCAmelCase_ = activation_function
UpperCAmelCase_ = resid_pdrop
UpperCAmelCase_ = embd_pdrop
UpperCAmelCase_ = attn_pdrop
UpperCAmelCase_ = layer_norm_epsilon
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = scale_attn_weights
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = attention_softmax_in_fpaa
UpperCAmelCase_ = scale_attention_softmax_in_fpaa
UpperCAmelCase_ = multi_query
UpperCAmelCase_ = bos_token_id
UpperCAmelCase_ = eos_token_id
super().__init__(bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
| 43
|
'''simple docstring'''
import re
def __lowerCamelCase ( _UpperCamelCase : str ):
'''simple docstring'''
return [char.split() for char in re.split(R'''[^ a-z A-Z 0-9 \s]''' , str_ )]
def __lowerCamelCase ( _UpperCamelCase : str ):
'''simple docstring'''
UpperCAmelCase_ = split_input(str_ )
return "".join(
[''''''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : bool , _UpperCamelCase : str ):
'''simple docstring'''
try:
UpperCAmelCase_ = split_input(_UpperCamelCase )
if upper:
UpperCAmelCase_ = ''''''.join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
UpperCAmelCase_ = ''''''.join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def __lowerCamelCase ( _UpperCamelCase : str ):
'''simple docstring'''
return to_simple_case(_UpperCamelCase )
def __lowerCamelCase ( _UpperCamelCase : str ):
'''simple docstring'''
try:
UpperCAmelCase_ = to_simple_case(_UpperCamelCase )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : bool ):
'''simple docstring'''
return to_complex_case(_UpperCamelCase , _UpperCamelCase , '''_''' )
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : bool ):
'''simple docstring'''
return to_complex_case(_UpperCamelCase , _UpperCamelCase , '''-''' )
if __name__ == "__main__":
__import__("doctest").testmod()
| 43
| 1
|
'''simple docstring'''
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
lowercase__ : Dict = logging.get_logger(__name__)
lowercase__ : Dict = OrderedDict(
[
# Base model mapping
("albert", "FlaxAlbertModel"),
("bart", "FlaxBartModel"),
("beit", "FlaxBeitModel"),
("bert", "FlaxBertModel"),
("big_bird", "FlaxBigBirdModel"),
("blenderbot", "FlaxBlenderbotModel"),
("blenderbot-small", "FlaxBlenderbotSmallModel"),
("clip", "FlaxCLIPModel"),
("distilbert", "FlaxDistilBertModel"),
("electra", "FlaxElectraModel"),
("gpt-sw3", "FlaxGPT2Model"),
("gpt2", "FlaxGPT2Model"),
("gpt_neo", "FlaxGPTNeoModel"),
("gptj", "FlaxGPTJModel"),
("longt5", "FlaxLongT5Model"),
("marian", "FlaxMarianModel"),
("mbart", "FlaxMBartModel"),
("mt5", "FlaxMT5Model"),
("opt", "FlaxOPTModel"),
("pegasus", "FlaxPegasusModel"),
("regnet", "FlaxRegNetModel"),
("resnet", "FlaxResNetModel"),
("roberta", "FlaxRobertaModel"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"),
("roformer", "FlaxRoFormerModel"),
("t5", "FlaxT5Model"),
("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"),
("vit", "FlaxViTModel"),
("wav2vec2", "FlaxWav2Vec2Model"),
("whisper", "FlaxWhisperModel"),
("xglm", "FlaxXGLMModel"),
("xlm-roberta", "FlaxXLMRobertaModel"),
]
)
lowercase__ : int = OrderedDict(
[
# Model for pre-training mapping
("albert", "FlaxAlbertForPreTraining"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForPreTraining"),
("big_bird", "FlaxBigBirdForPreTraining"),
("electra", "FlaxElectraForPreTraining"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("t5", "FlaxT5ForConditionalGeneration"),
("wav2vec2", "FlaxWav2Vec2ForPreTraining"),
("whisper", "FlaxWhisperForConditionalGeneration"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
lowercase__ : Any = OrderedDict(
[
# Model for Masked LM mapping
("albert", "FlaxAlbertForMaskedLM"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForMaskedLM"),
("big_bird", "FlaxBigBirdForMaskedLM"),
("distilbert", "FlaxDistilBertForMaskedLM"),
("electra", "FlaxElectraForMaskedLM"),
("mbart", "FlaxMBartForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
lowercase__ : Union[str, Any] = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("bart", "FlaxBartForConditionalGeneration"),
("blenderbot", "FlaxBlenderbotForConditionalGeneration"),
("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"),
("encoder-decoder", "FlaxEncoderDecoderModel"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("marian", "FlaxMarianMTModel"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("pegasus", "FlaxPegasusForConditionalGeneration"),
("t5", "FlaxT5ForConditionalGeneration"),
]
)
lowercase__ : int = OrderedDict(
[
# Model for Image-classsification
("beit", "FlaxBeitForImageClassification"),
("regnet", "FlaxRegNetForImageClassification"),
("resnet", "FlaxResNetForImageClassification"),
("vit", "FlaxViTForImageClassification"),
]
)
lowercase__ : Optional[Any] = OrderedDict(
[
("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"),
]
)
lowercase__ : Optional[int] = OrderedDict(
[
# Model for Causal LM mapping
("bart", "FlaxBartForCausalLM"),
("bert", "FlaxBertForCausalLM"),
("big_bird", "FlaxBigBirdForCausalLM"),
("electra", "FlaxElectraForCausalLM"),
("gpt-sw3", "FlaxGPT2LMHeadModel"),
("gpt2", "FlaxGPT2LMHeadModel"),
("gpt_neo", "FlaxGPTNeoForCausalLM"),
("gptj", "FlaxGPTJForCausalLM"),
("opt", "FlaxOPTForCausalLM"),
("roberta", "FlaxRobertaForCausalLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"),
("xglm", "FlaxXGLMForCausalLM"),
("xlm-roberta", "FlaxXLMRobertaForCausalLM"),
]
)
lowercase__ : List[Any] = OrderedDict(
[
# Model for Sequence Classification mapping
("albert", "FlaxAlbertForSequenceClassification"),
("bart", "FlaxBartForSequenceClassification"),
("bert", "FlaxBertForSequenceClassification"),
("big_bird", "FlaxBigBirdForSequenceClassification"),
("distilbert", "FlaxDistilBertForSequenceClassification"),
("electra", "FlaxElectraForSequenceClassification"),
("mbart", "FlaxMBartForSequenceClassification"),
("roberta", "FlaxRobertaForSequenceClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"),
("roformer", "FlaxRoFormerForSequenceClassification"),
("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"),
]
)
lowercase__ : Dict = OrderedDict(
[
# Model for Question Answering mapping
("albert", "FlaxAlbertForQuestionAnswering"),
("bart", "FlaxBartForQuestionAnswering"),
("bert", "FlaxBertForQuestionAnswering"),
("big_bird", "FlaxBigBirdForQuestionAnswering"),
("distilbert", "FlaxDistilBertForQuestionAnswering"),
("electra", "FlaxElectraForQuestionAnswering"),
("mbart", "FlaxMBartForQuestionAnswering"),
("roberta", "FlaxRobertaForQuestionAnswering"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"),
("roformer", "FlaxRoFormerForQuestionAnswering"),
("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"),
]
)
lowercase__ : Union[str, Any] = OrderedDict(
[
# Model for Token Classification mapping
("albert", "FlaxAlbertForTokenClassification"),
("bert", "FlaxBertForTokenClassification"),
("big_bird", "FlaxBigBirdForTokenClassification"),
("distilbert", "FlaxDistilBertForTokenClassification"),
("electra", "FlaxElectraForTokenClassification"),
("roberta", "FlaxRobertaForTokenClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"),
("roformer", "FlaxRoFormerForTokenClassification"),
("xlm-roberta", "FlaxXLMRobertaForTokenClassification"),
]
)
lowercase__ : Optional[Any] = OrderedDict(
[
# Model for Multiple Choice mapping
("albert", "FlaxAlbertForMultipleChoice"),
("bert", "FlaxBertForMultipleChoice"),
("big_bird", "FlaxBigBirdForMultipleChoice"),
("distilbert", "FlaxDistilBertForMultipleChoice"),
("electra", "FlaxElectraForMultipleChoice"),
("roberta", "FlaxRobertaForMultipleChoice"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"),
("roformer", "FlaxRoFormerForMultipleChoice"),
("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"),
]
)
lowercase__ : List[str] = OrderedDict(
[
("bert", "FlaxBertForNextSentencePrediction"),
]
)
lowercase__ : Tuple = OrderedDict(
[
("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"),
("whisper", "FlaxWhisperForConditionalGeneration"),
]
)
lowercase__ : int = OrderedDict(
[
("whisper", "FlaxWhisperForAudioClassification"),
]
)
lowercase__ : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
lowercase__ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
lowercase__ : Union[str, Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
lowercase__ : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
lowercase__ : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
lowercase__ : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
lowercase__ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
lowercase__ : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
lowercase__ : Dict = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
lowercase__ : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
lowercase__ : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
lowercase__ : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
lowercase__ : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
lowercase__ : str = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class lowerCamelCase ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCAmelCase__ = FLAX_MODEL_MAPPING
lowercase__ : Dict = auto_class_update(FlaxAutoModel)
class lowerCamelCase ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCAmelCase__ = FLAX_MODEL_FOR_PRETRAINING_MAPPING
lowercase__ : List[str] = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining")
class lowerCamelCase ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCAmelCase__ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
lowercase__ : str = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling")
class lowerCamelCase ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCAmelCase__ = FLAX_MODEL_FOR_MASKED_LM_MAPPING
lowercase__ : str = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling")
class lowerCamelCase ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCAmelCase__ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowercase__ : Dict = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base"
)
class lowerCamelCase ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCAmelCase__ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowercase__ : Tuple = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="sequence classification"
)
class lowerCamelCase ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCAmelCase__ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
lowercase__ : int = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering")
class lowerCamelCase ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCAmelCase__ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowercase__ : Dict = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="token classification"
)
class lowerCamelCase ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCAmelCase__ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
lowercase__ : Tuple = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice")
class lowerCamelCase ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCAmelCase__ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
lowercase__ : Any = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
)
class lowerCamelCase ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCAmelCase__ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowercase__ : str = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="image classification"
)
class lowerCamelCase ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCAmelCase__ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
lowercase__ : Any = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling")
class lowerCamelCase ( _BaseAutoModelClass ):
'''simple docstring'''
lowerCAmelCase__ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
lowercase__ : List[Any] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling"
)
| 43
|
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowercase__ : Optional[Any] = "platform"
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def __lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Any , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : Any=None , _UpperCamelCase : int=None , _UpperCamelCase : int=None , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : Any=None , ):
'''simple docstring'''
if attention_mask is None:
UpperCAmelCase_ = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
UpperCAmelCase_ = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
UpperCAmelCase_ = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase_ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase_ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCamelCase :
'''simple docstring'''
def __init__( self : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple=13 , UpperCAmelCase__ : Tuple=7 , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Optional[int]=99 , UpperCAmelCase__ : Dict=16 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : Dict=4 , UpperCAmelCase__ : str=4 , UpperCAmelCase__ : int="gelu" , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : List[Any]=32 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : Dict=1 , UpperCAmelCase__ : Optional[int]=0 , UpperCAmelCase__ : Union[str, Any]=0.02 , ) ->Optional[int]:
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = eos_token_id
UpperCAmelCase_ = pad_token_id
UpperCAmelCase_ = bos_token_id
UpperCAmelCase_ = initializer_range
def lowerCAmelCase__ ( self : int ) ->Any:
UpperCAmelCase_ = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
UpperCAmelCase_ = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
UpperCAmelCase_ = shift_tokens_right(UpperCAmelCase__ , 1 , 2 )
UpperCAmelCase_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=UpperCAmelCase__ , )
UpperCAmelCase_ = prepare_blenderbot_inputs_dict(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return config, inputs_dict
def lowerCAmelCase__ ( self : Union[str, Any] ) ->List[str]:
UpperCAmelCase_ , UpperCAmelCase_ = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase__ ( self : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple ) ->Tuple:
UpperCAmelCase_ = 20
UpperCAmelCase_ = model_class_name(UpperCAmelCase__ )
UpperCAmelCase_ = model.encode(inputs_dict['''input_ids'''] )
UpperCAmelCase_ , UpperCAmelCase_ = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
UpperCAmelCase_ = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
UpperCAmelCase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase_ = model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , decoder_position_ids=UpperCAmelCase__ , )
UpperCAmelCase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
UpperCAmelCase_ = model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCAmelCase__ , )
UpperCAmelCase_ = model.decode(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def lowerCAmelCase__ ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any ) ->Union[str, Any]:
UpperCAmelCase_ = 20
UpperCAmelCase_ = model_class_name(UpperCAmelCase__ )
UpperCAmelCase_ = model.encode(inputs_dict['''input_ids'''] )
UpperCAmelCase_ , UpperCAmelCase_ = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
UpperCAmelCase_ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
UpperCAmelCase_ = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase_ = model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , decoder_position_ids=UpperCAmelCase__ , )
UpperCAmelCase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
UpperCAmelCase_ = model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCAmelCase__ , decoder_position_ids=UpperCAmelCase__ , )
UpperCAmelCase_ = model.decode(UpperCAmelCase__ , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ )
UpperCAmelCase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = 99
def lowerCAmelCase__ ( self : List[str] ) ->Optional[Any]:
UpperCAmelCase_ = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
UpperCAmelCase_ = input_ids.shape[0]
UpperCAmelCase_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def lowerCAmelCase__ ( self : Any ) ->str:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self._get_config_and_data()
UpperCAmelCase_ = FlaxBlenderbotForConditionalGeneration(UpperCAmelCase__ )
UpperCAmelCase_ = lm_model(input_ids=UpperCAmelCase__ )
UpperCAmelCase_ = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , UpperCAmelCase__ )
def lowerCAmelCase__ ( self : str ) ->int:
UpperCAmelCase_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
UpperCAmelCase_ = FlaxBlenderbotForConditionalGeneration(UpperCAmelCase__ )
UpperCAmelCase_ = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
UpperCAmelCase_ = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
UpperCAmelCase_ = lm_model(input_ids=UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ )
UpperCAmelCase_ = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->List[Any]:
UpperCAmelCase_ = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
UpperCAmelCase_ = shift_tokens_right(UpperCAmelCase__ , 1 , 2 )
UpperCAmelCase_ = np.equal(UpperCAmelCase__ , 1 ).astype(np.floataa ).sum()
UpperCAmelCase_ = np.equal(UpperCAmelCase__ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(UpperCAmelCase__ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowerCamelCase ( lowerCamelCase , unittest.TestCase , lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = True
lowerCAmelCase__ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowerCAmelCase__ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def lowerCAmelCase__ ( self : Optional[int] ) ->List[Any]:
UpperCAmelCase_ = FlaxBlenderbotModelTester(self )
def lowerCAmelCase__ ( self : str ) ->Tuple:
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Tuple ) ->str:
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Dict ) ->Tuple:
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = model_class(UpperCAmelCase__ )
@jax.jit
def encode_jitted(UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any]=None , **UpperCAmelCase__ : Union[str, Any] ):
return model.encode(input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase_ = encode_jitted(**UpperCAmelCase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase_ = encode_jitted(**UpperCAmelCase__ ).to_tuple()
self.assertEqual(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) )
for jitted_output, output in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCAmelCase__ ( self : str ) ->str:
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ = model_class(UpperCAmelCase__ )
UpperCAmelCase_ = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
UpperCAmelCase_ = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int ):
return model.decode(
decoder_input_ids=UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , encoder_outputs=UpperCAmelCase__ , )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase_ = decode_jitted(**UpperCAmelCase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase_ = decode_jitted(**UpperCAmelCase__ ).to_tuple()
self.assertEqual(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) )
for jitted_output, output in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCAmelCase__ ( self : int ) ->int:
for model_class_name in self.all_model_classes:
UpperCAmelCase_ = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
UpperCAmelCase_ = np.ones((1, 1) ) * model.config.eos_token_id
UpperCAmelCase_ = model(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' )
@slow
def lowerCAmelCase__ ( self : Dict ) ->Optional[Any]:
UpperCAmelCase_ = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25}
UpperCAmelCase_ = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True}
UpperCAmelCase_ = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=UpperCAmelCase__ )
UpperCAmelCase_ = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' )
UpperCAmelCase_ = ['''Sam''']
UpperCAmelCase_ = tokenizer(UpperCAmelCase__ , return_tensors='''jax''' )
UpperCAmelCase_ = model.generate(**UpperCAmelCase__ , **UpperCAmelCase__ )
UpperCAmelCase_ = '''Sam is a great name. It means "sun" in Gaelic.'''
UpperCAmelCase_ = tokenizer.batch_decode(UpperCAmelCase__ , **UpperCAmelCase__ )
assert generated_txt[0].strip() == tgt_text
| 43
| 1
|
'''simple docstring'''
def __lowerCamelCase ( _UpperCamelCase : int ):
'''simple docstring'''
UpperCAmelCase_ = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 43
|
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowercase__ : Tuple = pytest.mark.integration
@pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] )
def __lowerCamelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : int ):
'''simple docstring'''
inspect_dataset(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ = path + '''.py'''
assert script_name in os.listdir(_UpperCamelCase )
assert "__pycache__" not in os.listdir(_UpperCamelCase )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' , ['''accuracy'''] )
def __lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Optional[Any] ):
'''simple docstring'''
inspect_metric(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ = path + '''.py'''
assert script_name in os.listdir(_UpperCamelCase )
assert "__pycache__" not in os.listdir(_UpperCamelCase )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def __lowerCamelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any , _UpperCamelCase : int ):
'''simple docstring'''
UpperCAmelCase_ = get_dataset_config_info(_UpperCamelCase , config_name=_UpperCamelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def __lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
with pytest.raises(_UpperCamelCase ):
get_dataset_config_info(_UpperCamelCase , config_name=_UpperCamelCase )
@pytest.mark.parametrize(
'''path, expected''' , [
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] , )
def __lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ = get_dataset_config_names(_UpperCamelCase )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' , [
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] , )
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : int ):
'''simple docstring'''
UpperCAmelCase_ = get_dataset_infos(_UpperCamelCase )
assert list(infos.keys() ) == expected_configs
UpperCAmelCase_ = expected_configs[0]
assert expected_config in infos
UpperCAmelCase_ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : int , _UpperCamelCase : str ):
'''simple docstring'''
UpperCAmelCase_ = get_dataset_infos(_UpperCamelCase )
assert expected_config in infos
UpperCAmelCase_ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def __lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[int] ):
'''simple docstring'''
with pytest.raises(_UpperCamelCase ):
get_dataset_split_names(_UpperCamelCase , config_name=_UpperCamelCase )
| 43
| 1
|
'''simple docstring'''
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = (CMStochasticIterativeScheduler,)
lowerCAmelCase__ = 10
def lowerCAmelCase__ ( self : Tuple , **UpperCAmelCase__ : Union[str, Any] ) ->int:
UpperCAmelCase_ = {
'''num_train_timesteps''': 201,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
config.update(**UpperCAmelCase__ )
return config
def lowerCAmelCase__ ( self : int ) ->Dict:
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = self.scheduler_classes[0](**UpperCAmelCase__ )
scheduler.set_timesteps(UpperCAmelCase__ )
UpperCAmelCase_ = scheduler.timesteps[0]
UpperCAmelCase_ = scheduler.timesteps[1]
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample
UpperCAmelCase_ = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCAmelCase__ ( self : Tuple ) ->Optional[int]:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Dict ) ->List[str]:
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Any ) ->List[Any]:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**UpperCAmelCase__ )
UpperCAmelCase_ = 1
scheduler.set_timesteps(UpperCAmelCase__ )
UpperCAmelCase_ = scheduler.timesteps
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(UpperCAmelCase__ ):
# 1. scale model input
UpperCAmelCase_ = scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
# 2. predict noise residual
UpperCAmelCase_ = model(UpperCAmelCase__ , UpperCAmelCase__ )
# 3. predict previous sample x_t-1
UpperCAmelCase_ = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__ ).prev_sample
UpperCAmelCase_ = pred_prev_sample
UpperCAmelCase_ = torch.sum(torch.abs(UpperCAmelCase__ ) )
UpperCAmelCase_ = torch.mean(torch.abs(UpperCAmelCase__ ) )
assert abs(result_sum.item() - 192.7614 ) < 1e-2
assert abs(result_mean.item() - 0.2510 ) < 1e-3
def lowerCAmelCase__ ( self : int ) ->str:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**UpperCAmelCase__ )
UpperCAmelCase_ = [106, 0]
scheduler.set_timesteps(timesteps=UpperCAmelCase__ )
UpperCAmelCase_ = scheduler.timesteps
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
UpperCAmelCase_ = scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
# 2. predict noise residual
UpperCAmelCase_ = model(UpperCAmelCase__ , UpperCAmelCase__ )
# 3. predict previous sample x_t-1
UpperCAmelCase_ = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__ ).prev_sample
UpperCAmelCase_ = pred_prev_sample
UpperCAmelCase_ = torch.sum(torch.abs(UpperCAmelCase__ ) )
UpperCAmelCase_ = torch.mean(torch.abs(UpperCAmelCase__ ) )
assert abs(result_sum.item() - 347.6357 ) < 1e-2
assert abs(result_mean.item() - 0.4527 ) < 1e-3
def lowerCAmelCase__ ( self : Dict ) ->Any:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**UpperCAmelCase__ )
UpperCAmelCase_ = [39, 30, 12, 15, 0]
with self.assertRaises(UpperCAmelCase__ , msg='''`timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Any ) ->Optional[int]:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**UpperCAmelCase__ )
UpperCAmelCase_ = [39, 30, 12, 1, 0]
UpperCAmelCase_ = len(UpperCAmelCase__ )
with self.assertRaises(UpperCAmelCase__ , msg='''Can only pass one of `num_inference_steps` or `timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=UpperCAmelCase__ , timesteps=UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Dict:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**UpperCAmelCase__ )
UpperCAmelCase_ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCAmelCase__ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=UpperCAmelCase__ )
| 43
|
'''simple docstring'''
import collections
import os
import re
from pathlib import Path
lowercase__ : List[Any] = "src/transformers"
# Matches is_xxx_available()
lowercase__ : Optional[Any] = re.compile(R"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
lowercase__ : Any = re.compile(R"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowercase__ : Union[str, Any] = re.compile(R"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
lowercase__ : Optional[int] = re.compile(R"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
lowercase__ : List[str] = re.compile(R"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowercase__ : Any = re.compile(R"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
lowercase__ : List[Any] = re.compile(R"^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
lowercase__ : Optional[Any] = re.compile(R"^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
lowercase__ : Union[str, Any] = re.compile(R"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
lowercase__ : int = re.compile(R"^\s*try:")
# Catches a line with else:
lowercase__ : Any = re.compile(R"^\s*else:")
def __lowerCamelCase ( _UpperCamelCase : Optional[Any] ):
'''simple docstring'''
if _re_test_backend.search(_UpperCamelCase ) is None:
return None
UpperCAmelCase_ = [b[0] for b in _re_backend.findall(_UpperCamelCase )]
backends.sort()
return "_and_".join(_UpperCamelCase )
def __lowerCamelCase ( _UpperCamelCase : int ):
'''simple docstring'''
with open(_UpperCamelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase_ = f.readlines()
UpperCAmelCase_ = 0
while line_index < len(_UpperCamelCase ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_UpperCamelCase ):
return None
# First grab the objects without a specific backend in _import_structure
UpperCAmelCase_ = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
UpperCAmelCase_ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_UpperCamelCase ):
UpperCAmelCase_ = _re_one_line_import_struct.search(_UpperCamelCase ).groups()[0]
UpperCAmelCase_ = re.findall(R'''\[([^\]]+)\]''' , _UpperCamelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
UpperCAmelCase_ = _re_import_struct_key_value.search(_UpperCamelCase )
if single_line_import_search is not None:
UpperCAmelCase_ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(_UpperCamelCase ) > 0]
objects.extend(_UpperCamelCase )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
UpperCAmelCase_ = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
UpperCAmelCase_ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCAmelCase_ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCAmelCase_ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
UpperCAmelCase_ = lines[line_index]
if _re_import_struct_add_one.search(_UpperCamelCase ) is not None:
objects.append(_re_import_struct_add_one.search(_UpperCamelCase ).groups()[0] )
elif _re_import_struct_add_many.search(_UpperCamelCase ) is not None:
UpperCAmelCase_ = _re_import_struct_add_many.search(_UpperCamelCase ).groups()[0].split(''', ''' )
UpperCAmelCase_ = [obj[1:-1] for obj in imports if len(_UpperCamelCase ) > 0]
objects.extend(_UpperCamelCase )
elif _re_between_brackets.search(_UpperCamelCase ) is not None:
UpperCAmelCase_ = _re_between_brackets.search(_UpperCamelCase ).groups()[0].split(''', ''' )
UpperCAmelCase_ = [obj[1:-1] for obj in imports if len(_UpperCamelCase ) > 0]
objects.extend(_UpperCamelCase )
elif _re_quote_object.search(_UpperCamelCase ) is not None:
objects.append(_re_quote_object.search(_UpperCamelCase ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
UpperCAmelCase_ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
UpperCAmelCase_ = []
while (
line_index < len(_UpperCamelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
UpperCAmelCase_ = lines[line_index]
UpperCAmelCase_ = _re_import.search(_UpperCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
UpperCAmelCase_ = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(_UpperCamelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
UpperCAmelCase_ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCAmelCase_ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCAmelCase_ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
UpperCAmelCase_ = lines[line_index]
UpperCAmelCase_ = _re_import.search(_UpperCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
UpperCAmelCase_ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : Optional[Any] ):
'''simple docstring'''
def find_duplicates(_UpperCamelCase : Tuple ):
return [k for k, v in collections.Counter(_UpperCamelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
UpperCAmelCase_ = []
for key in import_dict_objects.keys():
UpperCAmelCase_ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
UpperCAmelCase_ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
UpperCAmelCase_ = '''base imports''' if key == '''none''' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def __lowerCamelCase ( ):
'''simple docstring'''
UpperCAmelCase_ = []
for root, _, files in os.walk(_UpperCamelCase ):
if "__init__.py" in files:
UpperCAmelCase_ = os.path.join(_UpperCamelCase , '''__init__.py''' )
UpperCAmelCase_ = parse_init(_UpperCamelCase )
if objects is not None:
UpperCAmelCase_ = analyze_results(*_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
UpperCAmelCase_ = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('''\n'''.join(_UpperCamelCase ) )
if len(_UpperCamelCase ) > 0:
raise ValueError('''\n\n'''.join(_UpperCamelCase ) )
def __lowerCamelCase ( ):
'''simple docstring'''
UpperCAmelCase_ = []
for path, directories, files in os.walk(_UpperCamelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(_UpperCamelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_UpperCamelCase ) / folder).glob('''*.py''' ) ) ) == 0:
continue
UpperCAmelCase_ = str((Path(_UpperCamelCase ) / folder).relative_to(_UpperCamelCase ) )
UpperCAmelCase_ = short_path.replace(os.path.sep , '''.''' )
submodules.append(_UpperCamelCase )
for fname in files:
if fname == "__init__.py":
continue
UpperCAmelCase_ = str((Path(_UpperCamelCase ) / fname).relative_to(_UpperCamelCase ) )
UpperCAmelCase_ = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(_UpperCamelCase )
return submodules
lowercase__ : Union[str, Any] = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
"models.esm.openfold_utils",
]
def __lowerCamelCase ( ):
'''simple docstring'''
from transformers.utils import direct_transformers_import
UpperCAmelCase_ = direct_transformers_import(_UpperCamelCase )
UpperCAmelCase_ = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(_UpperCamelCase , '''__init__.py''' ) , '''r''' ) as f:
UpperCAmelCase_ = f.read()
import_structure_keys.update(set(re.findall(R'''import_structure\[\"([^\"]*)\"\]''' , _UpperCamelCase ) ) )
UpperCAmelCase_ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(_UpperCamelCase ) > 0:
UpperCAmelCase_ = '''\n'''.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registed in the main init of Transformers:\n'''
F"""{list_of_modules}\n"""
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 43
| 1
|
'''simple docstring'''
def __lowerCamelCase ( _UpperCamelCase : int ):
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ = F"""Input value of [number={number}] must be an integer"""
raise TypeError(_UpperCamelCase )
if number < 1:
UpperCAmelCase_ = F"""Input value of [number={number}] must be > 0"""
raise ValueError(_UpperCamelCase )
UpperCAmelCase_ = 1
for i in range(1 , _UpperCamelCase ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43
|
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( _UpperCamelCase : tuple[int, int] , _UpperCamelCase : int ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = position
UpperCAmelCase_ = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
UpperCAmelCase_ = []
for position in positions:
UpperCAmelCase_ , UpperCAmelCase_ = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(_UpperCamelCase )
return permissible_positions
def __lowerCamelCase ( _UpperCamelCase : list[list[int]] ):
'''simple docstring'''
return not any(elem == 0 for row in board for elem in row )
def __lowerCamelCase ( _UpperCamelCase : list[list[int]] , _UpperCamelCase : tuple[int, int] , _UpperCamelCase : int ):
'''simple docstring'''
if is_complete(_UpperCamelCase ):
return True
for position in get_valid_pos(_UpperCamelCase , len(_UpperCamelCase ) ):
UpperCAmelCase_ , UpperCAmelCase_ = position
if board[y][x] == 0:
UpperCAmelCase_ = curr + 1
if open_knight_tour_helper(_UpperCamelCase , _UpperCamelCase , curr + 1 ):
return True
UpperCAmelCase_ = 0
return False
def __lowerCamelCase ( _UpperCamelCase : int ):
'''simple docstring'''
UpperCAmelCase_ = [[0 for i in range(_UpperCamelCase )] for j in range(_UpperCamelCase )]
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
UpperCAmelCase_ = 1
if open_knight_tour_helper(_UpperCamelCase , (i, j) , 1 ):
return board
UpperCAmelCase_ = 0
UpperCAmelCase_ = F"""Open Kight Tour cannot be performed on a board of size {n}"""
raise ValueError(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43
| 1
|
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowercase__ : Tuple = pytest.mark.integration
@pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] )
def __lowerCamelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : int ):
'''simple docstring'''
inspect_dataset(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ = path + '''.py'''
assert script_name in os.listdir(_UpperCamelCase )
assert "__pycache__" not in os.listdir(_UpperCamelCase )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' , ['''accuracy'''] )
def __lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Optional[Any] ):
'''simple docstring'''
inspect_metric(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ = path + '''.py'''
assert script_name in os.listdir(_UpperCamelCase )
assert "__pycache__" not in os.listdir(_UpperCamelCase )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def __lowerCamelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any , _UpperCamelCase : int ):
'''simple docstring'''
UpperCAmelCase_ = get_dataset_config_info(_UpperCamelCase , config_name=_UpperCamelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def __lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
with pytest.raises(_UpperCamelCase ):
get_dataset_config_info(_UpperCamelCase , config_name=_UpperCamelCase )
@pytest.mark.parametrize(
'''path, expected''' , [
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] , )
def __lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ = get_dataset_config_names(_UpperCamelCase )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' , [
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] , )
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : int ):
'''simple docstring'''
UpperCAmelCase_ = get_dataset_infos(_UpperCamelCase )
assert list(infos.keys() ) == expected_configs
UpperCAmelCase_ = expected_configs[0]
assert expected_config in infos
UpperCAmelCase_ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : int , _UpperCamelCase : str ):
'''simple docstring'''
UpperCAmelCase_ = get_dataset_infos(_UpperCamelCase )
assert expected_config in infos
UpperCAmelCase_ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def __lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[int] ):
'''simple docstring'''
with pytest.raises(_UpperCamelCase ):
get_dataset_split_names(_UpperCamelCase , config_name=_UpperCamelCase )
| 43
|
'''simple docstring'''
from __future__ import annotations
from typing import TypedDict
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
def __lowerCamelCase ( _UpperCamelCase : str ):
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError('''The parameter s type must be str.''' )
return [s[i:] + s[:i] for i in range(len(_UpperCamelCase ) )]
def __lowerCamelCase ( _UpperCamelCase : str ):
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError('''The parameter s type must be str.''' )
if not s:
raise ValueError('''The parameter s must not be empty.''' )
UpperCAmelCase_ = all_rotations(_UpperCamelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
UpperCAmelCase_ = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(_UpperCamelCase ),
}
return response
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : int ):
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError('''The parameter bwt_string type must be str.''' )
if not bwt_string:
raise ValueError('''The parameter bwt_string must not be empty.''' )
try:
UpperCAmelCase_ = int(_UpperCamelCase )
except ValueError:
raise TypeError(
'''The parameter idx_original_string type must be int or passive'''
''' of cast to int.''' )
if idx_original_string < 0:
raise ValueError('''The parameter idx_original_string must not be lower than 0.''' )
if idx_original_string >= len(_UpperCamelCase ):
raise ValueError(
'''The parameter idx_original_string must be lower than''' ''' len(bwt_string).''' )
UpperCAmelCase_ = [''''''] * len(_UpperCamelCase )
for _ in range(len(_UpperCamelCase ) ):
for i in range(len(_UpperCamelCase ) ):
UpperCAmelCase_ = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
lowercase__ : Optional[int] = "Provide a string that I will generate its BWT transform: "
lowercase__ : List[Any] = input(entry_msg).strip()
lowercase__ : Any = bwt_transform(s)
print(
F'''Burrows Wheeler transform for string \'{s}\' results '''
F'''in \'{result['bwt_string']}\''''
)
lowercase__ : Dict = reverse_bwt(result["bwt_string"], result["idx_original_string"])
print(
F'''Reversing Burrows Wheeler transform for entry \'{result['bwt_string']}\' '''
F'''we get original string \'{original_string}\''''
)
| 43
| 1
|
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
lowercase__ : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
lowercase__ : Union[str, Any] = "\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n"
@dataclass
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = 42
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
def __init__( self : List[str] , UpperCAmelCase__ : PriorTransformer , UpperCAmelCase__ : CLIPVisionModel , UpperCAmelCase__ : CLIPImageProcessor , UpperCAmelCase__ : HeunDiscreteScheduler , UpperCAmelCase__ : ShapERenderer , ) ->Any:
super().__init__()
self.register_modules(
prior=UpperCAmelCase__ , image_encoder=UpperCAmelCase__ , image_processor=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , renderer=UpperCAmelCase__ , )
def lowerCAmelCase__ ( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[Any] ) ->str:
if latents is None:
UpperCAmelCase_ = randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=UpperCAmelCase__ , dtype=UpperCAmelCase__ )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
UpperCAmelCase_ = latents.to(UpperCAmelCase__ )
UpperCAmelCase_ = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase__ ( self : str , UpperCAmelCase__ : List[str]=0 ) ->List[Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
UpperCAmelCase_ = torch.device(f"""cuda:{gpu_id}""" )
UpperCAmelCase_ = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCAmelCase__ , UpperCAmelCase__ )
@property
def lowerCAmelCase__ ( self : Union[str, Any] ) ->int:
if self.device != torch.device('''meta''' ) or not hasattr(self.image_encoder , '''_hf_hook''' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(UpperCAmelCase__ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowerCAmelCase__ ( self : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , ) ->Any:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(image[0] , torch.Tensor ):
UpperCAmelCase_ = torch.cat(UpperCAmelCase__ , axis=0 ) if image[0].ndim == 4 else torch.stack(UpperCAmelCase__ , axis=0 )
if not isinstance(UpperCAmelCase__ , torch.Tensor ):
UpperCAmelCase_ = self.image_processor(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values[0].unsqueeze(0 )
UpperCAmelCase_ = image.to(dtype=self.image_encoder.dtype , device=UpperCAmelCase__ )
UpperCAmelCase_ = self.image_encoder(UpperCAmelCase__ )['''last_hidden_state''']
UpperCAmelCase_ = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
UpperCAmelCase_ = image_embeds.repeat_interleave(UpperCAmelCase__ , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase_ = torch.zeros_like(UpperCAmelCase__ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase_ = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(UpperCAmelCase__ )
def __call__( self : Optional[int] , UpperCAmelCase__ : Union[PIL.Image.Image, List[PIL.Image.Image]] , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : int = 25 , UpperCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase__ : Optional[torch.FloatTensor] = None , UpperCAmelCase__ : float = 4.0 , UpperCAmelCase__ : int = 64 , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , ) ->int:
if isinstance(UpperCAmelCase__ , PIL.Image.Image ):
UpperCAmelCase_ = 1
elif isinstance(UpperCAmelCase__ , torch.Tensor ):
UpperCAmelCase_ = image.shape[0]
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
UpperCAmelCase_ = len(UpperCAmelCase__ )
else:
raise ValueError(
f"""`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(UpperCAmelCase__ )}""" )
UpperCAmelCase_ = self._execution_device
UpperCAmelCase_ = batch_size * num_images_per_prompt
UpperCAmelCase_ = guidance_scale > 1.0
UpperCAmelCase_ = self._encode_image(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# prior
self.scheduler.set_timesteps(UpperCAmelCase__ , device=UpperCAmelCase__ )
UpperCAmelCase_ = self.scheduler.timesteps
UpperCAmelCase_ = self.prior.config.num_embeddings
UpperCAmelCase_ = self.prior.config.embedding_dim
UpperCAmelCase_ = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
UpperCAmelCase_ = latents.reshape(latents.shape[0] , UpperCAmelCase__ , UpperCAmelCase__ )
for i, t in enumerate(self.progress_bar(UpperCAmelCase__ ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase_ = self.scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = self.prior(
UpperCAmelCase__ , timestep=UpperCAmelCase__ , proj_embedding=UpperCAmelCase__ , ).predicted_image_embedding
# remove the variance
UpperCAmelCase_ , UpperCAmelCase_ = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
UpperCAmelCase_ , UpperCAmelCase_ = noise_pred.chunk(2 )
UpperCAmelCase_ = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
UpperCAmelCase_ = self.scheduler.step(
UpperCAmelCase__ , timestep=UpperCAmelCase__ , sample=UpperCAmelCase__ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=UpperCAmelCase__ )
UpperCAmelCase_ = []
for i, latent in enumerate(UpperCAmelCase__ ):
print()
UpperCAmelCase_ = self.renderer.decode(
latent[None, :] , UpperCAmelCase__ , size=UpperCAmelCase__ , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(UpperCAmelCase__ )
UpperCAmelCase_ = torch.stack(UpperCAmelCase__ )
if output_type not in ["np", "pil"]:
raise ValueError(f"""Only the output types `pil` and `np` are supported not output_type={output_type}""" )
UpperCAmelCase_ = images.cpu().numpy()
if output_type == "pil":
UpperCAmelCase_ = [self.numpy_to_pil(UpperCAmelCase__ ) for image in images]
# Offload last model to CPU
if hasattr(self , '''final_offload_hook''' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=UpperCAmelCase__ )
| 43
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase__ : Union[str, Any] = {
"configuration_mobilevit": ["MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileViTConfig", "MobileViTOnnxConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Union[str, Any] = ["MobileViTFeatureExtractor"]
lowercase__ : List[Any] = ["MobileViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = [
"MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileViTForImageClassification",
"MobileViTForSemanticSegmentation",
"MobileViTModel",
"MobileViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Optional[int] = [
"TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileViTForImageClassification",
"TFMobileViTForSemanticSegmentation",
"TFMobileViTModel",
"TFMobileViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
lowercase__ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 43
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.