code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
A = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
A = 250_004
A = 250_020
@require_sentencepiece
@require_tokenizers
class a__ ( __magic_name__ , unittest.TestCase ):
lowercase_ = MBartTokenizer
lowercase_ = MBartTokenizerFast
lowercase_ = True
lowercase_ = True
def a_ ( self : str):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCAmelCase : Any = MBartTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_)
tokenizer.save_pretrained(self.tmpdirname)
def a_ ( self : int):
"""simple docstring"""
__UpperCAmelCase : Dict = MBartTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_)
__UpperCAmelCase : Optional[int] = tokenizer.tokenize("This is a test")
self.assertListEqual(UpperCamelCase_ , ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase_) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__UpperCAmelCase : List[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
UpperCamelCase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
__UpperCAmelCase : Any = tokenizer.convert_tokens_to_ids(UpperCamelCase_)
self.assertListEqual(
UpperCamelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
__UpperCAmelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(UpperCamelCase_)
self.assertListEqual(
UpperCamelCase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def a_ ( self : Dict):
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__UpperCAmelCase : Dict = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})"):
__UpperCAmelCase : List[str] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_)
__UpperCAmelCase : int = self.tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_)
__UpperCAmelCase : int = tempfile.mkdtemp()
__UpperCAmelCase : Optional[int] = tokenizer_r.save_pretrained(UpperCamelCase_)
__UpperCAmelCase : Any = tokenizer_p.save_pretrained(UpperCamelCase_)
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files))
__UpperCAmelCase : Any = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f)
self.assertSequenceEqual(UpperCamelCase_ , UpperCamelCase_)
# Checks everything loads correctly in the same way
__UpperCAmelCase : int = tokenizer_r.from_pretrained(UpperCamelCase_)
__UpperCAmelCase : Tuple = tokenizer_p.from_pretrained(UpperCamelCase_)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_))
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(UpperCamelCase_)
# Save tokenizer rust, legacy_format=True
__UpperCAmelCase : Optional[int] = tempfile.mkdtemp()
__UpperCAmelCase : Dict = tokenizer_r.save_pretrained(UpperCamelCase_ , legacy_format=UpperCamelCase_)
__UpperCAmelCase : int = tokenizer_p.save_pretrained(UpperCamelCase_)
# Checks it save with the same files
self.assertSequenceEqual(UpperCamelCase_ , UpperCamelCase_)
# Checks everything loads correctly in the same way
__UpperCAmelCase : int = tokenizer_r.from_pretrained(UpperCamelCase_)
__UpperCAmelCase : Optional[Any] = tokenizer_p.from_pretrained(UpperCamelCase_)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_))
shutil.rmtree(UpperCamelCase_)
# Save tokenizer rust, legacy_format=False
__UpperCAmelCase : Tuple = tempfile.mkdtemp()
__UpperCAmelCase : int = tokenizer_r.save_pretrained(UpperCamelCase_ , legacy_format=UpperCamelCase_)
__UpperCAmelCase : Optional[int] = tokenizer_p.save_pretrained(UpperCamelCase_)
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files))
# Checks everything loads correctly in the same way
__UpperCAmelCase : Optional[Any] = tokenizer_r.from_pretrained(UpperCamelCase_)
__UpperCAmelCase : str = tokenizer_p.from_pretrained(UpperCamelCase_)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_))
shutil.rmtree(UpperCamelCase_)
@require_torch
@require_sentencepiece
@require_tokenizers
class a__ ( unittest.TestCase ):
lowercase_ = "facebook/mbart-large-en-ro"
lowercase_ = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
lowercase_ = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
lowercase_ = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE]
@classmethod
def a_ ( cls : int):
"""simple docstring"""
__UpperCAmelCase : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO")
__UpperCAmelCase : Union[str, Any] = 1
return cls
def a_ ( self : List[Any]):
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 250001)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 250004)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 250020)
def a_ ( self : List[str]):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase_)
def a_ ( self : Optional[int]):
"""simple docstring"""
self.assertIn(UpperCamelCase_ , self.tokenizer.all_special_ids)
__UpperCAmelCase : Union[str, Any] = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
__UpperCAmelCase : Optional[Any] = self.tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_)
__UpperCAmelCase : int = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCamelCase_)
self.assertEqual(UpperCamelCase_ , UpperCamelCase_)
self.assertNotIn(self.tokenizer.eos_token , UpperCamelCase_)
def a_ ( self : int):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] , UpperCamelCase_)
__UpperCAmelCase : Tuple = 10
__UpperCAmelCase : List[Any] = self.tokenizer(UpperCamelCase_ , max_length=UpperCamelCase_ , truncation=UpperCamelCase_).input_ids[0]
self.assertEqual(ids[-2] , 2)
self.assertEqual(ids[-1] , UpperCamelCase_)
self.assertEqual(len(UpperCamelCase_) , UpperCamelCase_)
def a_ ( self : Any):
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"]) , [250026, 250001])
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : List[str] = tempfile.mkdtemp()
__UpperCAmelCase : Union[str, Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(UpperCamelCase_)
__UpperCAmelCase : List[Any] = MBartTokenizer.from_pretrained(UpperCamelCase_)
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCamelCase_)
@require_torch
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCamelCase_ , return_tensors="pt")
__UpperCAmelCase : Dict = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id)
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def a_ ( self : Optional[int]):
"""simple docstring"""
__UpperCAmelCase : Dict = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=len(self.expected_src_tokens) , return_tensors="pt" , )
__UpperCAmelCase : Tuple = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id)
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_)
self.assertEqual((2, 14) , batch.input_ids.shape)
self.assertEqual((2, 14) , batch.attention_mask.shape)
__UpperCAmelCase : List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase_)
self.assertEqual(2 , batch.decoder_input_ids[0, -1]) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [])
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE])
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : List[str] = self.tokenizer(self.src_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=3 , return_tensors="pt")
__UpperCAmelCase : Any = self.tokenizer(
text_target=self.tgt_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=10 , return_tensors="pt")
__UpperCAmelCase : int = targets["input_ids"]
__UpperCAmelCase : Any = shift_tokens_right(UpperCamelCase_ , self.tokenizer.pad_token_id)
self.assertEqual(batch.input_ids.shape[1] , 3)
self.assertEqual(batch.decoder_input_ids.shape[1] , 10)
@require_torch
def a_ ( self : int):
"""simple docstring"""
__UpperCAmelCase : int = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR")
self.assertEqual(
nested_simplify(UpperCamelCase_) , {
# A, test, EOS, en_XX
"input_ids": [[62, 3034, 2, 250004]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 250001,
} , )
| 77
|
"""simple docstring"""
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase =get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
UpperCAmelCase =50_003
UpperCAmelCase =50_002
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase = PLBartTokenizer
_lowerCamelCase = None
_lowerCamelCase = False
def UpperCamelCase__ ( self ) -> Tuple:
super().setUp()
# We have a SentencePiece fixture for testing
A = PLBartTokenizer(lowerCamelCase_ ,language_codes="""base""" ,keep_accents=lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self ) -> int:
A = PLBartTokenizer(lowerCamelCase_ ,language_codes="""base""" ,keep_accents=lowerCamelCase_ )
A = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCamelCase_ ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) ,[value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] ,)
A = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCamelCase_ ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] ,)
A = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ ,[
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] ,)
A = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] ,)
A = tokenizer.vocab_size
A = [tokenizer.convert_ids_to_tokens(lowerCamelCase_ ) for x in range(end - 4 ,lowerCamelCase_ )]
self.assertListEqual(lowerCamelCase_ ,["""__java__""", """__python__""", """__en_XX__""", """<mask>"""] )
A = """java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
A = tokenizer(lowerCamelCase_ ).input_ids
self.assertEqual(
tokenizer.decode(lowerCamelCase_ ,skip_special_tokens=lowerCamelCase_ ,clean_up_tokenization_spaces=lowerCamelCase_ ) ,lowerCamelCase_ ,)
def UpperCamelCase__ ( self ) -> Optional[Any]:
A = PLBartTokenizer(lowerCamelCase_ ,language_codes="""multi""" ,keep_accents=lowerCamelCase_ )
A = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCamelCase_ ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) ,[value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] ,)
A = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCamelCase_ ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] ,)
A = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ ,[
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] ,)
A = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] ,)
A = tokenizer.vocab_size
A = [tokenizer.convert_ids_to_tokens(lowerCamelCase_ ) for x in range(end - 7 ,lowerCamelCase_ )]
self.assertListEqual(
lowerCamelCase_ ,["""__java__""", """__python__""", """__en_XX__""", """__javascript__""", """__php__""", """__ruby__""", """__go__"""] )
A = """java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
A = tokenizer(lowerCamelCase_ ).input_ids
self.assertEqual(
tokenizer.decode(lowerCamelCase_ ,skip_special_tokens=lowerCamelCase_ ,clean_up_tokenization_spaces=lowerCamelCase_ ) ,lowerCamelCase_ ,)
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase = '''uclanlp/plbart-python-en_XX'''
_lowerCamelCase = [
'''def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])''',
'''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''',
]
_lowerCamelCase = [
'''Returns the maximum value of a b c.''',
'''Sums the values of a b c.''',
]
_lowerCamelCase = [
134,
5452,
33460,
33441,
33463,
33465,
33463,
33449,
988,
20,
33456,
19,
33456,
771,
39,
4258,
889,
3318,
33441,
33463,
33465,
33463,
33449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def UpperCamelCase__ ( cls ) -> List[str]:
A = PLBartTokenizer.from_pretrained(
cls.checkpoint_name ,language_codes="""base""" ,src_lang="""python""" ,tgt_lang="""en_XX""" )
A = 1
return cls
def UpperCamelCase__ ( self ) -> Optional[int]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__java__"""] ,5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__python__"""] ,5_0_0_0_2 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__en_XX__"""] ,5_0_0_0_3 )
def UpperCamelCase__ ( self ) -> Optional[int]:
A = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens ,lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> str:
self.assertIn(lowerCamelCase_ ,self.tokenizer.all_special_ids )
A = [EN_CODE, 9_0_3_7, 3_3_4_4_2, 5_7, 7_5_2, 1_5_3, 1_4, 5_6, 1_8, 9, 2]
A = self.tokenizer.decode(lowerCamelCase_ ,skip_special_tokens=lowerCamelCase_ )
A = self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
self.assertNotIn(self.tokenizer.eos_token ,lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> List[str]:
A = ["""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""" * 2_0]
self.assertIsInstance(src_text[0] ,lowerCamelCase_ )
A = 1_0
A = self.tokenizer(lowerCamelCase_ ,max_length=lowerCamelCase_ ,truncation=lowerCamelCase_ ).input_ids[0]
self.assertEqual(ids[-2] ,2 )
self.assertEqual(ids[-1] ,lowerCamelCase_ )
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> Any:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """__java__"""] ) ,[5_0_0_0_4, 5_0_0_0_1] )
def UpperCamelCase__ ( self ) -> Optional[int]:
A = tempfile.mkdtemp()
A = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCamelCase_ )
A = PLBartTokenizer.from_pretrained(lowerCamelCase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids ,lowerCamelCase_ )
@require_torch
def UpperCamelCase__ ( self ) -> Optional[int]:
A = self.tokenizer(self.src_text ,text_target=self.tgt_text ,padding=lowerCamelCase_ ,return_tensors="""pt""" )
A = shift_tokens_right(batch["""labels"""] ,self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() ,[2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] ,lowerCamelCase_ )
self.assertEqual(batch.decoder_input_ids[1][-1] ,2 )
self.assertEqual(batch.labels[1][-2:].tolist() ,[2, EN_CODE] )
@require_torch
def UpperCamelCase__ ( self ) -> str:
A = self.tokenizer(
self.src_text ,text_target=self.tgt_text ,padding=lowerCamelCase_ ,truncation=lowerCamelCase_ ,max_length=len(self.expected_src_tokens ) ,return_tensors="""pt""" ,)
A = shift_tokens_right(batch["""labels"""] ,self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCamelCase_ ,lowerCamelCase_ )
self.assertEqual((2, 2_6) ,batch.input_ids.shape )
self.assertEqual((2, 2_6) ,batch.attention_mask.shape )
A = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens ,lowerCamelCase_ )
self.assertEqual(2 ,batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens ,[] )
self.assertEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id, PYTHON_CODE] )
def UpperCamelCase__ ( self ) -> Tuple:
A = self.tokenizer(self.src_text ,padding=lowerCamelCase_ ,truncation=lowerCamelCase_ ,max_length=3 ,return_tensors="""pt""" )
A = self.tokenizer(
text_target=self.tgt_text ,padding=lowerCamelCase_ ,truncation=lowerCamelCase_ ,max_length=1_0 ,return_tensors="""pt""" )
A = targets["""input_ids"""]
A = shift_tokens_right(lowerCamelCase_ ,self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] ,3 )
self.assertEqual(batch.decoder_input_ids.shape[1] ,1_0 )
@require_torch
def UpperCamelCase__ ( self ) -> List[Any]:
A = self.tokenizer._build_translation_inputs(
"""A test""" ,return_tensors="""pt""" ,src_lang="""en_XX""" ,tgt_lang="""java""" )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) ,{
# A, test, EOS, en_XX
"""input_ids""": [[1_5_0, 2_4_2, 2, 5_0_0_0_3]],
"""attention_mask""": [[1, 1, 1, 1]],
# java
"""forced_bos_token_id""": 5_0_0_0_1,
} ,)
| 617
| 0
|
"""simple docstring"""
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class _UpperCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Tuple ="""pixel_values"""
__UpperCAmelCase : List[str] =False
__UpperCAmelCase : Tuple =TimmBackboneConfig
def __init__( self , __a , **__a ):
requires_backends(self , "timm" )
super().__init__(__lowerCAmelCase )
__lowerCAmelCase = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name." )
if config.backbone not in timm.list_models():
raise ValueError(f"backbone {config.backbone} is not supported by timm." )
if hasattr(__lowerCAmelCase , "out_features" ) and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead." )
__lowerCAmelCase = getattr(__lowerCAmelCase , "use_pretrained_backbone" , __lowerCAmelCase )
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False." )
# We just take the final layer by default. This matches the default for the transformers models.
__lowerCAmelCase = config.out_indices if getattr(__lowerCAmelCase , "out_indices" , __lowerCAmelCase ) is not None else (-1,)
__lowerCAmelCase = timm.create_model(
config.backbone , pretrained=__lowerCAmelCase , features_only=config.features_only , in_chans=config.num_channels , out_indices=__lowerCAmelCase , **__lowerCAmelCase , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
__lowerCAmelCase = self._backbone.return_layers
__lowerCAmelCase = {layer["module"]: str(__lowerCAmelCase ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(__lowerCAmelCase )
@classmethod
def snake_case ( cls , __a , *__a , **__a ):
requires_backends(cls , ["vision", "timm"] )
from ...models.timm_backbone import TimmBackboneConfig
__lowerCAmelCase = kwargs.pop("config" , TimmBackboneConfig() )
__lowerCAmelCase = kwargs.pop("use_timm_backbone" , __lowerCAmelCase )
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones" )
__lowerCAmelCase = kwargs.pop("num_channels" , config.num_channels )
__lowerCAmelCase = kwargs.pop("features_only" , config.features_only )
__lowerCAmelCase = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone )
__lowerCAmelCase = kwargs.pop("out_indices" , config.out_indices )
__lowerCAmelCase = TimmBackboneConfig(
backbone=__lowerCAmelCase , num_channels=__lowerCAmelCase , features_only=__lowerCAmelCase , use_pretrained_backbone=__lowerCAmelCase , out_indices=__lowerCAmelCase , )
return super()._from_config(__lowerCAmelCase , **__lowerCAmelCase )
def snake_case ( self , __a ):
pass
def snake_case ( self , __a , __a=None , __a=None , __a=None , **__a ):
__lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCAmelCase = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
__lowerCAmelCase = self._all_layers
__lowerCAmelCase = self._backbone(__lowerCAmelCase , **__lowerCAmelCase )
__lowerCAmelCase = self._return_layers
__lowerCAmelCase = tuple(hidden_states[i] for i in self.out_indices )
else:
__lowerCAmelCase = self._backbone(__lowerCAmelCase , **__lowerCAmelCase )
__lowerCAmelCase = None
__lowerCAmelCase = tuple(__lowerCAmelCase )
__lowerCAmelCase = tuple(__lowerCAmelCase ) if hidden_states is not None else None
if not return_dict:
__lowerCAmelCase = (feature_maps,)
if output_hidden_states:
__lowerCAmelCase = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=__lowerCAmelCase , hidden_states=__lowerCAmelCase , attentions=__lowerCAmelCase )
| 707
|
"""simple docstring"""
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
A : Optional[List[str]] = None
A : Optional[int] = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
A : int = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCAmelCase : bool =True
__UpperCAmelCase : Optional[str] =None
# Automatically constructed
__UpperCAmelCase : ClassVar[str] ="PIL.Image.Image"
__UpperCAmelCase : ClassVar[Any] =pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
__UpperCAmelCase : str =field(default="""Image""" ,init=lowerCAmelCase__ ,repr=lowerCAmelCase__ )
def __call__( self ):
return self.pa_type
def snake_case ( self , __a ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if isinstance(__a , __a ):
__lowerCAmelCase = np.array(__a )
if isinstance(__a , __a ):
return {"path": value, "bytes": None}
elif isinstance(__a , __a ):
return {"path": None, "bytes": value}
elif isinstance(__a , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(__a )
elif isinstance(__a , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(__a )
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f"An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}." )
def snake_case ( self , __a , __a=None ):
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead." )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support decoding images, please install 'Pillow'." )
if token_per_repo_id is None:
__lowerCAmelCase = {}
__lowerCAmelCase , __lowerCAmelCase = value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(f"An image should have one of 'path' or 'bytes' but both are None in {value}." )
else:
if is_local_path(__a ):
__lowerCAmelCase = PIL.Image.open(__a )
else:
__lowerCAmelCase = path.split("::" )[-1]
try:
__lowerCAmelCase = string_to_dict(__a , config.HUB_DATASETS_URL )["repo_id"]
__lowerCAmelCase = token_per_repo_id.get(__a )
except ValueError:
__lowerCAmelCase = None
with xopen(__a , "rb" , use_auth_token=__a ) as f:
__lowerCAmelCase = BytesIO(f.read() )
__lowerCAmelCase = PIL.Image.open(bytes_ )
else:
__lowerCAmelCase = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def snake_case ( self ):
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary" ),
"path": Value("string" ),
}
)
def snake_case ( self , __a ):
if pa.types.is_string(storage.type ):
__lowerCAmelCase = pa.array([None] * len(__a ) , type=pa.binary() )
__lowerCAmelCase = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
__lowerCAmelCase = pa.array([None] * len(__a ) , type=pa.string() )
__lowerCAmelCase = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
__lowerCAmelCase = storage.field("bytes" )
else:
__lowerCAmelCase = pa.array([None] * len(__a ) , type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
__lowerCAmelCase = storage.field("path" )
else:
__lowerCAmelCase = pa.array([None] * len(__a ) , type=pa.string() )
__lowerCAmelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
__lowerCAmelCase = pa.array(
[encode_np_array(np.array(__a ) )["bytes"] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
__lowerCAmelCase = pa.array([None] * len(__a ) , type=pa.string() )
__lowerCAmelCase = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(__a , self.pa_type )
def snake_case ( self , __a ):
@no_op_if_value_is_null
def path_to_bytes(__a ):
with xopen(__a , "rb" ) as f:
__lowerCAmelCase = f.read()
return bytes_
__lowerCAmelCase = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
__lowerCAmelCase = pa.array(
[os.path.basename(__a ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , )
__lowerCAmelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(__a , self.pa_type )
def _lowerCamelCase ( ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
__lowerCAmelCase = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = BytesIO()
if image.format in list_image_compression_formats():
__lowerCAmelCase = image.format
else:
__lowerCAmelCase = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
image.save(_UpperCamelCase , format=_UpperCamelCase )
return buffer.getvalue()
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if hasattr(_UpperCamelCase , "filename" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(_UpperCamelCase )}
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
__lowerCAmelCase = array.dtype
__lowerCAmelCase = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
__lowerCAmelCase = dtype.kind
__lowerCAmelCase = dtype.itemsize
__lowerCAmelCase = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
__lowerCAmelCase = np.dtype("|u1" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f"Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays." )
if dtype is not dest_dtype:
warnings.warn(f"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'" )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
__lowerCAmelCase = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
__lowerCAmelCase = dtype_byteorder + dtype_kind + str(_UpperCamelCase )
__lowerCAmelCase = np.dtype(_UpperCamelCase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'" )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f"Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}" )
__lowerCAmelCase = PIL.Image.fromarray(array.astype(_UpperCamelCase ) )
return {"path": None, "bytes": image_to_bytes(_UpperCamelCase )}
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if objs:
__lowerCAmelCase , __lowerCAmelCase = first_non_null_value(_UpperCamelCase )
if isinstance(_UpperCamelCase , _UpperCamelCase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(_UpperCamelCase , np.ndarray ):
__lowerCAmelCase = no_op_if_value_is_null(_UpperCamelCase )
return [obj_to_image_dict_func(_UpperCamelCase ) for obj in objs]
elif isinstance(_UpperCamelCase , PIL.Image.Image ):
__lowerCAmelCase = no_op_if_value_is_null(_UpperCamelCase )
return [obj_to_image_dict_func(_UpperCamelCase ) for obj in objs]
else:
return objs
else:
return objs
| 282
| 0
|
"""simple docstring"""
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = RobertaPreLayerNormConfig.from_pretrained(
lowerCAmelCase__ ,architectures=['''RobertaPreLayerNormForMaskedLM'''] )
# convert state_dict
lowerCamelCase_ = torch.load(hf_hub_download(repo_id=lowerCAmelCase__ ,filename='''pytorch_model.bin''' ) )
lowerCamelCase_ = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('''roberta.''' ):
lowerCamelCase_ = '''roberta_prelayernorm.''' + tensor_key[len('''roberta.''' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('''.self.LayerNorm.weight''' ) or tensor_key.endswith('''.self.LayerNorm.bias''' ):
continue
lowerCamelCase_ = tensor_value
lowerCamelCase_ = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=lowerCAmelCase__ ,config=lowerCAmelCase__ ,state_dict=lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
# convert tokenizer
lowerCamelCase_ = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
tokenizer.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint-repo""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
A_ = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 29
|
'''simple docstring'''
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class lowercase__ :
def __init__( self : Tuple ,lowerCamelCase__ : str ,lowerCamelCase__ : str=13 ,lowerCamelCase__ : List[Any]=32 ,lowerCamelCase__ : Optional[int]=2 ,lowerCamelCase__ : str=3 ,lowerCamelCase__ : Optional[int]=16 ,lowerCamelCase__ : Union[str, Any]=[1, 2, 1] ,lowerCamelCase__ : Optional[int]=[2, 2, 4] ,lowerCamelCase__ : int=2 ,lowerCamelCase__ : Any=2.0 ,lowerCamelCase__ : Dict=True ,lowerCamelCase__ : List[Any]=0.0 ,lowerCamelCase__ : List[str]=0.0 ,lowerCamelCase__ : Union[str, Any]=0.1 ,lowerCamelCase__ : List[Any]="gelu" ,lowerCamelCase__ : int=False ,lowerCamelCase__ : Any=True ,lowerCamelCase__ : List[Any]=0.0_2 ,lowerCamelCase__ : Dict=1E-5 ,lowerCamelCase__ : Optional[int]=True ,lowerCamelCase__ : int=None ,lowerCamelCase__ : int=True ,lowerCamelCase__ : List[Any]=10 ,lowerCamelCase__ : int=8 ,lowerCamelCase__ : Any=["stage1", "stage2", "stage3"] ,lowerCamelCase__ : List[str]=[1, 2, 3] ,):
'''simple docstring'''
_UpperCamelCase : str = parent
_UpperCamelCase : str = batch_size
_UpperCamelCase : Union[str, Any] = image_size
_UpperCamelCase : str = patch_size
_UpperCamelCase : Dict = num_channels
_UpperCamelCase : Union[str, Any] = embed_dim
_UpperCamelCase : Union[str, Any] = depths
_UpperCamelCase : Optional[int] = num_heads
_UpperCamelCase : Union[str, Any] = window_size
_UpperCamelCase : int = mlp_ratio
_UpperCamelCase : Optional[int] = qkv_bias
_UpperCamelCase : List[Any] = hidden_dropout_prob
_UpperCamelCase : Optional[int] = attention_probs_dropout_prob
_UpperCamelCase : Union[str, Any] = drop_path_rate
_UpperCamelCase : Union[str, Any] = hidden_act
_UpperCamelCase : str = use_absolute_embeddings
_UpperCamelCase : Any = patch_norm
_UpperCamelCase : Dict = layer_norm_eps
_UpperCamelCase : Optional[Any] = initializer_range
_UpperCamelCase : str = is_training
_UpperCamelCase : int = scope
_UpperCamelCase : Dict = use_labels
_UpperCamelCase : str = type_sequence_label_size
_UpperCamelCase : Optional[int] = encoder_stride
_UpperCamelCase : str = out_features
_UpperCamelCase : Optional[Any] = out_indices
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_UpperCamelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase : Optional[Any] = None
if self.use_labels:
_UpperCamelCase : str = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_UpperCamelCase : Dict = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
return MaskFormerSwinConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,out_features=self.out_features ,out_indices=self.out_indices ,)
def UpperCamelCase_ ( self : Optional[int] ,lowerCamelCase__ : Any ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Any ):
'''simple docstring'''
_UpperCamelCase : Tuple = MaskFormerSwinModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCamelCase : Dict = model(lowerCamelCase__ )
_UpperCamelCase : Tuple = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_UpperCamelCase : Optional[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def UpperCamelCase_ ( self : int ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ):
'''simple docstring'''
_UpperCamelCase : Dict = MaskFormerSwinBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCamelCase : List[Any] = model(lowerCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,[16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(lowerCamelCase__ ):
_UpperCamelCase : Union[str, Any] = ['stem']
_UpperCamelCase : int = MaskFormerSwinBackbone(config=lowerCamelCase__ )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase : Dict = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : str = config_and_inputs
_UpperCamelCase : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase__ ( lowercase , lowercase , unittest.TestCase ):
lowercase__ = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
lowercase__ = {"""feature-extraction""": MaskFormerSwinModel} if is_torch_available() else {}
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_UpperCamelCase : Dict = MaskFormerSwinModelTester(self )
_UpperCamelCase : Union[str, Any] = ConfigTester(self ,config_class=lowerCamelCase__ ,embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
) )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase__ )
@unittest.skip('Swin does not use inputs_embeds' )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip('Swin does not support feedforward chunking' )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Any = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_UpperCamelCase : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ ,nn.Linear ) )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : List[str] = model_class(lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : Union[str, Any] = [*signature.parameters.keys()]
_UpperCamelCase : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,lowerCamelCase__ )
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Tuple ):
'''simple docstring'''
_UpperCamelCase : Tuple = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
_UpperCamelCase : List[Any] = model(**self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ) )
_UpperCamelCase : List[Any] = outputs.hidden_states
_UpperCamelCase : Any = getattr(
self.model_tester ,'expected_num_hidden_layers' ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase__ ) ,lowerCamelCase__ )
# Swin has a different seq_length
_UpperCamelCase : Tuple = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCamelCase : Optional[int] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_UpperCamelCase : Union[str, Any] = True
self.check_hidden_states_output(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase : Dict = True
self.check_hidden_states_output(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : List[Any] = 3
_UpperCamelCase : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_UpperCamelCase : Tuple = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCamelCase : List[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_UpperCamelCase : Optional[Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_UpperCamelCase : Dict = True
self.check_hidden_states_output(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase : Tuple = True
self.check_hidden_states_output(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,(padded_height, padded_width) )
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(lowerCamelCase__ : str ):
_UpperCamelCase : List[str] = 0
return t
def check_equivalence(lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Optional[int]={} ):
with torch.no_grad():
_UpperCamelCase : List[str] = model(**lowerCamelCase__ ,return_dict=lowerCamelCase__ ,**lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = model(**lowerCamelCase__ ,return_dict=lowerCamelCase__ ,**lowerCamelCase__ ).to_tuple()
def recursive_check(lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : int ):
if isinstance(lowerCamelCase__ ,(List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(lowerCamelCase__ ,lowerCamelCase__ ):
recursive_check(lowerCamelCase__ ,lowerCamelCase__ )
elif isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() ,dict_object.values() ):
recursive_check(lowerCamelCase__ ,lowerCamelCase__ )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(lowerCamelCase__ ) ,set_nan_tensor_to_zero(lowerCamelCase__ ) ,atol=1E-5 ) ,msg=(
'Tuple and dict output are not equal. Difference:'
F' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'
F' {torch.isnan(lowerCamelCase__ ).any()} and `inf`: {torch.isinf(lowerCamelCase__ )}. Dict has'
F' `nan`: {torch.isnan(lowerCamelCase__ ).any()} and `inf`: {torch.isinf(lowerCamelCase__ )}.'
) ,)
recursive_check(lowerCamelCase__ ,lowerCamelCase__ )
for model_class in self.all_model_classes:
_UpperCamelCase : List[str] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCamelCase : Optional[Any] = self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ )
_UpperCamelCase : Dict = self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ )
check_equivalence(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
_UpperCamelCase : int = self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ,return_labels=lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ,return_labels=lowerCamelCase__ )
check_equivalence(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ )
_UpperCamelCase : Any = self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ )
check_equivalence(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,{'output_hidden_states': True} )
_UpperCamelCase : Union[str, Any] = self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ,return_labels=lowerCamelCase__ )
_UpperCamelCase : str = self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ,return_labels=lowerCamelCase__ )
check_equivalence(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,{'output_hidden_states': True} )
@require_torch
class lowercase__ ( unittest.TestCase , lowercase ):
lowercase__ = (MaskFormerSwinBackbone,) if is_torch_available() else ()
lowercase__ = MaskFormerSwinConfig
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = MaskFormerSwinModelTester(self )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : str = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
_UpperCamelCase : List[Any] = backbone_class(lowerCamelCase__ )
backbone.to(lowerCamelCase__ )
backbone.eval()
_UpperCamelCase : Optional[Any] = backbone(**lowerCamelCase__ )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps ,lowerCamelCase__ )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps ,backbone.channels ):
self.assertTrue(feature_map.shape[:2] ,(batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
_UpperCamelCase : List[str] = backbone(**lowerCamelCase__ ,output_hidden_states=lowerCamelCase__ )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) ,len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] ,backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) ,(batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
_UpperCamelCase : int = backbone(**lowerCamelCase__ ,output_attentions=lowerCamelCase__ )
self.assertIsNotNone(outputs.attentions )
| 195
| 0
|
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
lowerCamelCase_ : Optional[Any] = 5_00_00
lowerCamelCase_ : Optional[int] = 50_00
lowerCamelCase_ : int = os.path.split(__file__)
lowerCamelCase_ : str = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
for i in range(_UpperCAmelCase ):
A_ : Any = dataset[i]
@get_duration
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
for i in range(0 , len(_UpperCAmelCase ) , _UpperCAmelCase ):
A_ : Any = dataset[i : i + batch_size]
@get_duration
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
with dataset.formatted_as(type=_UpperCAmelCase ):
for i in range(_UpperCAmelCase ):
A_ : Optional[Any] = dataset[i]
@get_duration
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
with dataset.formatted_as(type=_UpperCAmelCase ):
for i in range(0 , _UpperCAmelCase , _UpperCAmelCase ):
A_ : Optional[Any] = dataset[i : i + batch_size]
def UpperCAmelCase__ ( ):
"""simple docstring"""
A_ : Optional[int] = {'num examples': SPEED_TEST_N_EXAMPLES}
A_ : Union[str, Any] = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1000}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted, {'type': 'pandas', 'length': SMALL_TEST}),
(read_formatted, {'type': 'torch', 'length': SMALL_TEST}),
(read_formatted, {'type': 'tensorflow', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1000}),
]
A_ : Tuple = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1000}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print('generating dataset' )
A_ : Optional[Any] = datasets.Features(
{'list': datasets.Sequence(datasets.Value('float32' ) ), 'numbers': datasets.Value('float32' )} )
A_ : Tuple = generate_example_dataset(
os.path.join(_UpperCAmelCase , 'dataset.arrow' ) , _UpperCAmelCase , num_examples=_UpperCAmelCase , seq_shapes={'list': (100,)} , )
print('first set of iterations' )
for func, kwargs in functions:
print(func.__name__ , str(_UpperCAmelCase ) )
A_ : Optional[int] = func(_UpperCAmelCase , **_UpperCAmelCase )
print('shuffling dataset' )
A_ : Optional[int] = dataset.shuffle()
print('Second set of iterations (after shuffling' )
for func, kwargs in functions_shuffled:
print('shuffled ' , func.__name__ , str(_UpperCAmelCase ) )
A_ : Union[str, Any] = func(
_UpperCAmelCase , **_UpperCAmelCase )
with open(_UpperCAmelCase , 'wb' ) as f:
f.write(json.dumps(_UpperCAmelCase ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 707
|
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
A_ , A_ : Union[str, Any] = array[indexa], array[indexa]
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
if length > 1:
A_ : Any = int(length / 2 )
for i in range(_UpperCAmelCase , low + middle ):
comp_and_swap(_UpperCAmelCase , _UpperCAmelCase , i + middle , _UpperCAmelCase )
bitonic_merge(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
bitonic_merge(_UpperCAmelCase , low + middle , _UpperCAmelCase , _UpperCAmelCase )
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
if length > 1:
A_ : str = int(length / 2 )
bitonic_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , 1 )
bitonic_sort(_UpperCAmelCase , low + middle , _UpperCAmelCase , 0 )
bitonic_merge(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
lowerCamelCase_ : Dict = input('Enter numbers separated by a comma:\n').strip()
lowerCamelCase_ : List[str] = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ')
| 302
| 0
|
'''simple docstring'''
class _lowerCAmelCase : # Public class to implement a graph
'''simple docstring'''
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> None:
_snake_case = row
_snake_case = col
_snake_case = graph
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> bool:
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> None:
_snake_case = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
_snake_case = [-1, 0, 1, -1, 1, -1, 0, 1]
_snake_case = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , _lowerCAmelCase ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , _lowerCAmelCase )
def lowercase (self ) -> int: # And finally, count all islands.
_snake_case = [[False for j in range(self.COL )] for i in range(self.ROW )]
_snake_case = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
count += 1
return count
| 585
|
from math import factorial
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if n < k or k < 0:
raise ValueError("""Please enter positive integers for n and k where n >= k""" )
return factorial(lowerCamelCase ) // (factorial(lowerCamelCase ) * factorial(n - k ))
if __name__ == "__main__":
print(
"""The number of five-card hands possible from a standard""",
F'''fifty-two card deck is: {combinations(52, 5)}\n''',
)
print(
"""If a class of 40 students must be arranged into groups of""",
F'''4 for group projects, there are {combinations(40, 4)} ways''',
"""to arrange them.\n""",
)
print(
"""If 10 teams are competing in a Formula One race, there""",
F'''are {combinations(10, 3)} ways that first, second and''',
"""third place can be awarded.""",
)
| 80
| 0
|
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A : Union[str, Any] = logging.get_logger(__name__)
A : Union[str, Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all BART models at https://huggingface.co/models?filter=bart
A : Optional[int] = {
"""vocab_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""",
},
"""merges_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""",
},
}
A : Any = {
"""facebook/bart-base""": 1_024,
"""facebook/bart-large""": 1_024,
"""facebook/bart-large-mnli""": 1_024,
"""facebook/bart-large-cnn""": 1_024,
"""facebook/bart-large-xsum""": 1_024,
"""yjernite/bart_eli5""": 1_024,
}
@lru_cache()
def snake_case_ ( ):
"""simple docstring"""
__lowercase = (
list(range(ord("""!""" ) ,ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) ,ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) ,ord("""ÿ""" ) + 1 ) )
)
__lowercase = bs[:]
__lowercase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(a__ )
cs.append(2**8 + n )
n += 1
__lowercase = [chr(a__ ) for n in cs]
return dict(zip(a__ ,a__ ) )
def snake_case_ ( a__ : Dict ):
"""simple docstring"""
__lowercase = set()
__lowercase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowercase = char
return pairs
class SCREAMING_SNAKE_CASE( __A ):
snake_case_ : List[Any] = VOCAB_FILES_NAMES
snake_case_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case_ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ : str = ["""input_ids""", """attention_mask"""]
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="replace" , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<pad>" , lowerCamelCase__="<mask>" , lowerCamelCase__=False , **lowerCamelCase__ , ) -> Optional[Any]:
"""simple docstring"""
__lowercase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else bos_token
__lowercase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else eos_token
__lowercase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else sep_token
__lowercase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else cls_token
__lowercase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else unk_token
__lowercase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__lowercase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
super().__init__(
errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , **lowerCamelCase__ , )
with open(lowerCamelCase__ , encoding="""utf-8""" ) as vocab_handle:
__lowercase = json.load(lowerCamelCase__ )
__lowercase = {v: k for k, v in self.encoder.items()}
__lowercase = errors # how to handle errors in decoding
__lowercase = bytes_to_unicode()
__lowercase = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase__ , encoding="""utf-8""" ) as merges_handle:
__lowercase = merges_handle.read().split("""\n""" )[1:-1]
__lowercase = [tuple(merge.split() ) for merge in bpe_merges]
__lowercase = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
__lowercase = {}
__lowercase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__lowercase = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def snake_case__ ( self ) -> List[str]:
"""simple docstring"""
return len(self.encoder )
def snake_case__ ( self ) -> str:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def snake_case__ ( self , lowerCamelCase__ ) -> List[Any]:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
__lowercase = tuple(lowerCamelCase__ )
__lowercase = get_pairs(lowerCamelCase__ )
if not pairs:
return token
while True:
__lowercase = min(lowerCamelCase__ , key=lambda lowerCamelCase__ : self.bpe_ranks.get(lowerCamelCase__ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__lowercase ,__lowercase = bigram
__lowercase = []
__lowercase = 0
while i < len(lowerCamelCase__ ):
try:
__lowercase = word.index(lowerCamelCase__ , lowerCamelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowercase = j
if word[i] == first and i < len(lowerCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowercase = tuple(lowerCamelCase__ )
__lowercase = new_word
if len(lowerCamelCase__ ) == 1:
break
else:
__lowercase = get_pairs(lowerCamelCase__ )
__lowercase = """ """.join(lowerCamelCase__ )
__lowercase = word
return word
def snake_case__ ( self , lowerCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
__lowercase = []
for token in re.findall(self.pat , lowerCamelCase__ ):
__lowercase = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase__ ).split(""" """ ) )
return bpe_tokens
def snake_case__ ( self , lowerCamelCase__ ) -> Any:
"""simple docstring"""
return self.encoder.get(lowerCamelCase__ , self.encoder.get(self.unk_token ) )
def snake_case__ ( self , lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
return self.decoder.get(lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ ) -> Optional[int]:
"""simple docstring"""
__lowercase = """""".join(lowerCamelCase__ )
__lowercase = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__lowercase = os.path.join(
lowerCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__lowercase = os.path.join(
lowerCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase__ , ensure_ascii=lowerCamelCase__ ) + """\n""" )
__lowercase = 0
with open(lowerCamelCase__ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
__lowercase = token_index
writer.write(""" """.join(lowerCamelCase__ ) + """\n""" )
index += 1
return vocab_file, merge_file
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowercase = [self.cls_token_id]
__lowercase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase__ )) + [1]
return [1] + ([0] * len(lowerCamelCase__ )) + [1, 1] + ([0] * len(lowerCamelCase__ )) + [1]
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> List[int]:
"""simple docstring"""
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__=False , **lowerCamelCase__ ) -> Optional[int]:
"""simple docstring"""
__lowercase = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase__ ) > 0 and not text[0].isspace()):
__lowercase = """ """ + text
return (text, kwargs)
| 711
|
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
A : int = logging.get_logger(__name__)
A : Dict = ["""model.decoder.embed_positions.weights"""]
def snake_case_ ( a__ : Union[str, Any] ):
"""simple docstring"""
if "emb" in name:
__lowercase = name.replace("""emb""" ,"""model.decoder.embed_tokens""" )
if "transformer" in name:
__lowercase = name.replace("""transformer""" ,"""model.decoder""" )
if "cross_attention" in name:
__lowercase = name.replace("""cross_attention""" ,"""encoder_attn""" )
if "linear1" in name:
__lowercase = name.replace("""linear1""" ,"""fc1""" )
if "linear2" in name:
__lowercase = name.replace("""linear2""" ,"""fc2""" )
if "norm1" in name:
__lowercase = name.replace("""norm1""" ,"""self_attn_layer_norm""" )
if "norm_cross" in name:
__lowercase = name.replace("""norm_cross""" ,"""encoder_attn_layer_norm""" )
if "norm2" in name:
__lowercase = name.replace("""norm2""" ,"""final_layer_norm""" )
if "out_norm" in name:
__lowercase = name.replace("""out_norm""" ,"""model.decoder.layer_norm""" )
if "linears" in name:
__lowercase = name.replace("""linears""" ,"""lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
__lowercase = name.replace("""condition_provider.conditioners.description.output_proj""" ,"""enc_to_dec_proj""" )
return name
def snake_case_ ( a__ : OrderedDict ,a__ : int ):
"""simple docstring"""
__lowercase = list(state_dict.keys() )
__lowercase = {}
for key in keys:
__lowercase = state_dict.pop(a__ )
__lowercase = rename_keys(a__ )
if "in_proj_weight" in key:
# split fused qkv proj
__lowercase = val[:hidden_size, :]
__lowercase = val[hidden_size : 2 * hidden_size, :]
__lowercase = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
__lowercase = val
else:
__lowercase = val
return state_dict, enc_dec_proj_state_dict
def snake_case_ ( a__ : str ):
"""simple docstring"""
if checkpoint == "small":
# default config values
__lowercase = 10_24
__lowercase = 24
__lowercase = 16
elif checkpoint == "medium":
__lowercase = 15_36
__lowercase = 48
__lowercase = 24
elif checkpoint == "large":
__lowercase = 20_48
__lowercase = 48
__lowercase = 32
else:
raise ValueError(f'Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.' )
__lowercase = MusicgenDecoderConfig(
hidden_size=a__ ,ffn_dim=hidden_size * 4 ,num_hidden_layers=a__ ,num_attention_heads=a__ ,)
return config
@torch.no_grad()
def snake_case_ ( a__ : Optional[Any] ,a__ : Dict=None ,a__ : Tuple=None ,a__ : Optional[int]="cpu" ):
"""simple docstring"""
__lowercase = MusicGen.get_pretrained(a__ ,device=a__ )
__lowercase = decoder_config_from_checkpoint(a__ )
__lowercase = fairseq_model.lm.state_dict()
__lowercase ,__lowercase = rename_state_dict(
a__ ,hidden_size=decoder_config.hidden_size )
__lowercase = TaEncoderModel.from_pretrained("""t5-base""" )
__lowercase = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
__lowercase = MusicgenForCausalLM(a__ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
__lowercase ,__lowercase = decoder.load_state_dict(a__ ,strict=a__ )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(a__ )
if len(a__ ) > 0:
raise ValueError(f'Missing key(s) in state_dict: {missing_keys}' )
if len(a__ ) > 0:
raise ValueError(f'Unexpected key(s) in state_dict: {unexpected_keys}' )
# init the composite model
__lowercase = MusicgenForConditionalGeneration(text_encoder=a__ ,audio_encoder=a__ ,decoder=a__ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(a__ )
# check we can do a forward pass
__lowercase = torch.arange(0 ,8 ,dtype=torch.long ).reshape(2 ,-1 )
__lowercase = input_ids.reshape(2 * 4 ,-1 )
with torch.no_grad():
__lowercase = model(input_ids=a__ ,decoder_input_ids=a__ ).logits
if logits.shape != (8, 1, 20_48):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
__lowercase = AutoTokenizer.from_pretrained("""t5-base""" )
__lowercase = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" ,padding_side="""left""" )
__lowercase = MusicgenProcessor(feature_extractor=a__ ,tokenizer=a__ )
# set the appropriate bos/pad token ids
__lowercase = 20_48
__lowercase = 20_48
# set other default generation config params
__lowercase = int(30 * audio_encoder.config.frame_rate )
__lowercase = True
__lowercase = 3.0
if pytorch_dump_folder is not None:
Path(a__ ).mkdir(exist_ok=a__ )
logger.info(f'Saving model {checkpoint} to {pytorch_dump_folder}' )
model.save_pretrained(a__ )
processor.save_pretrained(a__ )
if repo_id:
logger.info(f'Pushing model {checkpoint} to {repo_id}' )
model.push_to_hub(a__ )
processor.push_to_hub(a__ )
if __name__ == "__main__":
A : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint""",
default="""small""",
type=str,
help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""",
)
parser.add_argument(
"""--pytorch_dump_folder""",
required=True,
default=None,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
parser.add_argument(
"""--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda."""
)
A : Tuple = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 163
| 0
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_snake_case : List[str] = logging.get_logger(__name__)
_snake_case : str = {
"SenseTime/deformable-detr": "https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : str = "deformable_detr"
__UpperCAmelCase : List[str] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : Optional[int] , lowerCamelCase : int=True , lowerCamelCase : Dict=None , lowerCamelCase : Optional[Any]=3 , lowerCamelCase : str=300 , lowerCamelCase : Tuple=1024 , lowerCamelCase : Dict=6 , lowerCamelCase : Any=1024 , lowerCamelCase : List[Any]=8 , lowerCamelCase : Any=6 , lowerCamelCase : Dict=1024 , lowerCamelCase : Dict=8 , lowerCamelCase : Optional[Any]=0.0 , lowerCamelCase : List[str]=True , lowerCamelCase : List[str]="relu" , lowerCamelCase : Optional[Any]=256 , lowerCamelCase : Tuple=0.1 , lowerCamelCase : List[Any]=0.0 , lowerCamelCase : Dict=0.0 , lowerCamelCase : List[str]=0.02 , lowerCamelCase : Optional[Any]=1.0 , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : Any=False , lowerCamelCase : int="sine" , lowerCamelCase : Union[str, Any]="resnet50" , lowerCamelCase : int=True , lowerCamelCase : List[str]=False , lowerCamelCase : int=4 , lowerCamelCase : str=4 , lowerCamelCase : List[str]=4 , lowerCamelCase : Any=False , lowerCamelCase : Any=300 , lowerCamelCase : Dict=False , lowerCamelCase : List[Any]=1 , lowerCamelCase : Optional[int]=5 , lowerCamelCase : Any=2 , lowerCamelCase : Union[str, Any]=1 , lowerCamelCase : Optional[int]=1 , lowerCamelCase : Optional[Any]=5 , lowerCamelCase : List[str]=2 , lowerCamelCase : int=0.1 , lowerCamelCase : List[str]=0.25 , lowerCamelCase : Optional[int]=False , **lowerCamelCase : int , ) -> Dict:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__snake_case : Optional[Any] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(lowerCamelCase , lowerCamelCase ):
__snake_case : Optional[Any] = backbone_config.get("model_type" )
__snake_case : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
__snake_case : str = config_class.from_dict(lowerCamelCase )
__snake_case : Union[str, Any] = use_timm_backbone
__snake_case : List[str] = backbone_config
__snake_case : Optional[int] = num_channels
__snake_case : List[Any] = num_queries
__snake_case : Tuple = max_position_embeddings
__snake_case : Tuple = d_model
__snake_case : List[str] = encoder_ffn_dim
__snake_case : Tuple = encoder_layers
__snake_case : Tuple = encoder_attention_heads
__snake_case : Dict = decoder_ffn_dim
__snake_case : int = decoder_layers
__snake_case : Optional[int] = decoder_attention_heads
__snake_case : Any = dropout
__snake_case : List[Any] = attention_dropout
__snake_case : List[str] = activation_dropout
__snake_case : str = activation_function
__snake_case : Optional[int] = init_std
__snake_case : Any = init_xavier_std
__snake_case : Optional[Any] = encoder_layerdrop
__snake_case : Union[str, Any] = auxiliary_loss
__snake_case : List[Any] = position_embedding_type
__snake_case : List[str] = backbone
__snake_case : Tuple = use_pretrained_backbone
__snake_case : Dict = dilation
# deformable attributes
__snake_case : Any = num_feature_levels
__snake_case : List[Any] = encoder_n_points
__snake_case : List[Any] = decoder_n_points
__snake_case : int = two_stage
__snake_case : Any = two_stage_num_proposals
__snake_case : List[Any] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
__snake_case : str = class_cost
__snake_case : Union[str, Any] = bbox_cost
__snake_case : str = giou_cost
# Loss coefficients
__snake_case : int = mask_loss_coefficient
__snake_case : List[str] = dice_loss_coefficient
__snake_case : Any = bbox_loss_coefficient
__snake_case : List[Any] = giou_loss_coefficient
__snake_case : Optional[Any] = eos_coefficient
__snake_case : Optional[Any] = focal_alpha
__snake_case : int = disable_custom_kernels
super().__init__(is_encoder_decoder=lowerCamelCase , **lowerCamelCase )
@property
def __snake_case ( self : Optional[int] ) -> int:
return self.encoder_attention_heads
@property
def __snake_case ( self : Dict ) -> int:
return self.d_model
def __snake_case ( self : Tuple ) -> Optional[int]:
__snake_case : Union[str, Any] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
__snake_case : Union[str, Any] = self.backbone_config.to_dict()
__snake_case : Optional[Any] = self.__class__.model_type
return output
| 81
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : torch.FloatTensor
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=("DownEncoderBlock2D",),__SCREAMING_SNAKE_CASE=(64,),__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=32,__SCREAMING_SNAKE_CASE="silu",__SCREAMING_SNAKE_CASE=True,):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = layers_per_block
__lowerCAmelCase = torch.nn.Convad(
__SCREAMING_SNAKE_CASE,block_out_channels[0],kernel_size=3,stride=1,padding=1,)
__lowerCAmelCase = None
__lowerCAmelCase = nn.ModuleList([] )
# down
__lowerCAmelCase = block_out_channels[0]
for i, down_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = output_channel
__lowerCAmelCase = block_out_channels[i]
__lowerCAmelCase = i == len(__SCREAMING_SNAKE_CASE ) - 1
__lowerCAmelCase = get_down_block(
__SCREAMING_SNAKE_CASE,num_layers=self.layers_per_block,in_channels=__SCREAMING_SNAKE_CASE,out_channels=__SCREAMING_SNAKE_CASE,add_downsample=not is_final_block,resnet_eps=1e-6,downsample_padding=0,resnet_act_fn=__SCREAMING_SNAKE_CASE,resnet_groups=__SCREAMING_SNAKE_CASE,attention_head_dim=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,)
self.down_blocks.append(__SCREAMING_SNAKE_CASE )
# mid
__lowerCAmelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1],resnet_eps=1e-6,resnet_act_fn=__SCREAMING_SNAKE_CASE,output_scale_factor=1,resnet_time_scale_shift="""default""",attention_head_dim=block_out_channels[-1],resnet_groups=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,)
# out
__lowerCAmelCase = nn.GroupNorm(num_channels=block_out_channels[-1],num_groups=__SCREAMING_SNAKE_CASE,eps=1e-6 )
__lowerCAmelCase = nn.SiLU()
__lowerCAmelCase = 2 * out_channels if double_z else out_channels
__lowerCAmelCase = nn.Convad(block_out_channels[-1],__SCREAMING_SNAKE_CASE,3,padding=1 )
__lowerCAmelCase = False
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = x
__lowerCAmelCase = self.conv_in(__SCREAMING_SNAKE_CASE )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__SCREAMING_SNAKE_CASE ):
def custom_forward(*__SCREAMING_SNAKE_CASE ):
return module(*__SCREAMING_SNAKE_CASE )
return custom_forward
# down
if is_torch_version(""">=""","""1.11.0""" ):
for down_block in self.down_blocks:
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE )
# middle
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE )
else:
for down_block in self.down_blocks:
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE )
# middle
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE )
else:
# down
for down_block in self.down_blocks:
__lowerCAmelCase = down_block(__SCREAMING_SNAKE_CASE )
# middle
__lowerCAmelCase = self.mid_block(__SCREAMING_SNAKE_CASE )
# post-process
__lowerCAmelCase = self.conv_norm_out(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.conv_act(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.conv_out(__SCREAMING_SNAKE_CASE )
return sample
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=("UpDecoderBlock2D",),__SCREAMING_SNAKE_CASE=(64,),__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=32,__SCREAMING_SNAKE_CASE="silu",__SCREAMING_SNAKE_CASE="group",):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = layers_per_block
__lowerCAmelCase = nn.Convad(
__SCREAMING_SNAKE_CASE,block_out_channels[-1],kernel_size=3,stride=1,padding=1,)
__lowerCAmelCase = None
__lowerCAmelCase = nn.ModuleList([] )
__lowerCAmelCase = in_channels if norm_type == """spatial""" else None
# mid
__lowerCAmelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1],resnet_eps=1e-6,resnet_act_fn=__SCREAMING_SNAKE_CASE,output_scale_factor=1,resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type,attention_head_dim=block_out_channels[-1],resnet_groups=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,)
# up
__lowerCAmelCase = list(reversed(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = output_channel
__lowerCAmelCase = reversed_block_out_channels[i]
__lowerCAmelCase = i == len(__SCREAMING_SNAKE_CASE ) - 1
__lowerCAmelCase = get_up_block(
__SCREAMING_SNAKE_CASE,num_layers=self.layers_per_block + 1,in_channels=__SCREAMING_SNAKE_CASE,out_channels=__SCREAMING_SNAKE_CASE,prev_output_channel=__SCREAMING_SNAKE_CASE,add_upsample=not is_final_block,resnet_eps=1e-6,resnet_act_fn=__SCREAMING_SNAKE_CASE,resnet_groups=__SCREAMING_SNAKE_CASE,attention_head_dim=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,resnet_time_scale_shift=__SCREAMING_SNAKE_CASE,)
self.up_blocks.append(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = output_channel
# out
if norm_type == "spatial":
__lowerCAmelCase = SpatialNorm(block_out_channels[0],__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = nn.GroupNorm(num_channels=block_out_channels[0],num_groups=__SCREAMING_SNAKE_CASE,eps=1e-6 )
__lowerCAmelCase = nn.SiLU()
__lowerCAmelCase = nn.Convad(block_out_channels[0],__SCREAMING_SNAKE_CASE,3,padding=1 )
__lowerCAmelCase = False
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
__lowerCAmelCase = z
__lowerCAmelCase = self.conv_in(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__SCREAMING_SNAKE_CASE ):
def custom_forward(*__SCREAMING_SNAKE_CASE ):
return module(*__SCREAMING_SNAKE_CASE )
return custom_forward
if is_torch_version(""">=""","""1.11.0""" ):
# middle
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE )
else:
# middle
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
else:
# middle
__lowerCAmelCase = self.mid_block(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
__lowerCAmelCase = up_block(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
# post-process
if latent_embeds is None:
__lowerCAmelCase = self.conv_norm_out(__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = self.conv_norm_out(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.conv_act(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.conv_out(__SCREAMING_SNAKE_CASE )
return sample
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE="random",__SCREAMING_SNAKE_CASE=False,__SCREAMING_SNAKE_CASE=True ):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = n_e
__lowerCAmelCase = vq_embed_dim
__lowerCAmelCase = beta
__lowerCAmelCase = legacy
__lowerCAmelCase = nn.Embedding(self.n_e,self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e,1.0 / self.n_e )
__lowerCAmelCase = remap
if self.remap is not None:
self.register_buffer("""used""",torch.tensor(np.load(self.remap ) ) )
__lowerCAmelCase = self.used.shape[0]
__lowerCAmelCase = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
__lowerCAmelCase = self.re_embed
__lowerCAmelCase = self.re_embed + 1
print(
f'Remapping {self.n_e} indices to {self.re_embed} indices. '
f'Using {self.unknown_index} for unknown indices.' )
else:
__lowerCAmelCase = n_e
__lowerCAmelCase = sane_index_shape
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = inds.shape
assert len(__SCREAMING_SNAKE_CASE ) > 1
__lowerCAmelCase = inds.reshape(ishape[0],-1 )
__lowerCAmelCase = self.used.to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = (inds[:, :, None] == used[None, None, ...]).long()
__lowerCAmelCase = match.argmax(-1 )
__lowerCAmelCase = match.sum(2 ) < 1
if self.unknown_index == "random":
__lowerCAmelCase = torch.randint(0,self.re_embed,size=new[unknown].shape ).to(device=new.device )
else:
__lowerCAmelCase = self.unknown_index
return new.reshape(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = inds.shape
assert len(__SCREAMING_SNAKE_CASE ) > 1
__lowerCAmelCase = inds.reshape(ishape[0],-1 )
__lowerCAmelCase = self.used.to(__SCREAMING_SNAKE_CASE )
if self.re_embed > self.used.shape[0]: # extra token
__lowerCAmelCase = 0 # simply set to zero
__lowerCAmelCase = torch.gather(used[None, :][inds.shape[0] * [0], :],1,__SCREAMING_SNAKE_CASE )
return back.reshape(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = z.permute(0,2,3,1 ).contiguous()
__lowerCAmelCase = z.view(-1,self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
__lowerCAmelCase = torch.argmin(torch.cdist(__SCREAMING_SNAKE_CASE,self.embedding.weight ),dim=1 )
__lowerCAmelCase = self.embedding(__SCREAMING_SNAKE_CASE ).view(z.shape )
__lowerCAmelCase = None
__lowerCAmelCase = None
# compute loss for embedding
if not self.legacy:
__lowerCAmelCase = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
__lowerCAmelCase = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
__lowerCAmelCase = z + (z_q - z).detach()
# reshape back to match original input shape
__lowerCAmelCase = z_q.permute(0,3,1,2 ).contiguous()
if self.remap is not None:
__lowerCAmelCase = min_encoding_indices.reshape(z.shape[0],-1 ) # add batch axis
__lowerCAmelCase = self.remap_to_used(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = min_encoding_indices.reshape(-1,1 ) # flatten
if self.sane_index_shape:
__lowerCAmelCase = min_encoding_indices.reshape(z_q.shape[0],z_q.shape[2],z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if self.remap is not None:
__lowerCAmelCase = indices.reshape(shape[0],-1 ) # add batch axis
__lowerCAmelCase = self.unmap_to_all(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
__lowerCAmelCase = self.embedding(__SCREAMING_SNAKE_CASE )
if shape is not None:
__lowerCAmelCase = z_q.view(__SCREAMING_SNAKE_CASE )
# reshape back to match original input shape
__lowerCAmelCase = z_q.permute(0,3,1,2 ).contiguous()
return z_q
class _UpperCAmelCase ( lowerCAmelCase_ ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
__lowerCAmelCase = parameters
__lowerCAmelCase , __lowerCAmelCase = torch.chunk(__SCREAMING_SNAKE_CASE,2,dim=1 )
__lowerCAmelCase = torch.clamp(self.logvar,-30.0,20.0 )
__lowerCAmelCase = deterministic
__lowerCAmelCase = torch.exp(0.5 * self.logvar )
__lowerCAmelCase = torch.exp(self.logvar )
if self.deterministic:
__lowerCAmelCase = __lowerCAmelCase = torch.zeros_like(
self.mean,device=self.parameters.device,dtype=self.parameters.dtype )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
__lowerCAmelCase = randn_tensor(
self.mean.shape,generator=__SCREAMING_SNAKE_CASE,device=self.parameters.device,dtype=self.parameters.dtype )
__lowerCAmelCase = self.mean + self.std * sample
return x
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean,2 ) + self.var - 1.0 - self.logvar,dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean,2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar,dim=[1, 2, 3],)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=[1, 2, 3] ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
__lowerCAmelCase = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean,2 ) / self.var,dim=__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self.mean
| 689
| 0
|
'''simple docstring'''
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
__lowerCamelCase = logging.get_logger(__name__)
@add_end_docstrings(_snake_case )
class A__ ( _snake_case ):
def __init__( self , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
self.check_model_type(UpperCamelCase__ )
def snake_case_ ( self , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
A_ = {}
A_ = {}
A_ = {}
# preprocess args
if "points_per_batch" in kwargs:
A_ = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
A_ = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
A_ = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
A_ = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
A_ = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
A_ = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
A_ = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
A_ = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
A_ = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
A_ = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
A_ = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
A_ = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self , UpperCamelCase__ , *UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
return super().__call__(UpperCamelCase__ , *UpperCamelCase__ , num_workers=UpperCamelCase__ , batch_size=UpperCamelCase__ , **UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=64 , UpperCamelCase__ = 0 , UpperCamelCase__ = 512 / 1500 , UpperCamelCase__ = 32 , UpperCamelCase__ = 1 , ) -> List[Any]:
'''simple docstring'''
A_ = load_image(UpperCamelCase__ )
A_ = self.image_processor.size["""longest_edge"""]
A_ , A_ , A_ , A_ = self.image_processor.generate_crop_boxes(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A_ = self.image_processor(images=UpperCamelCase__ , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
A_ = self.get_inference_context()
with inference_context():
A_ = self._ensure_tensor_on_device(UpperCamelCase__ , device=self.device )
A_ = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
A_ = image_embeddings
A_ = grid_points.shape[1]
A_ = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , UpperCamelCase__ , UpperCamelCase__ ):
A_ = grid_points[:, i : i + points_per_batch, :, :]
A_ = input_labels[:, i : i + points_per_batch]
A_ = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=0.88 , UpperCamelCase__=0.95 , UpperCamelCase__=0 , UpperCamelCase__=1 , ) -> Optional[Any]:
'''simple docstring'''
A_ = model_inputs.pop("""input_boxes""" )
A_ = model_inputs.pop("""is_last""" )
A_ = model_inputs.pop("""original_sizes""" ).tolist()
A_ = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
A_ = self.model(**UpperCamelCase__ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
A_ = model_outputs["""pred_masks"""]
A_ = self.image_processor.post_process_masks(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , binarize=UpperCamelCase__ )
A_ = model_outputs["""iou_scores"""]
A_ , A_ , A_ = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=0.7 , ) -> str:
'''simple docstring'''
A_ = []
A_ = []
A_ = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
A_ = torch.cat(UpperCamelCase__ )
A_ = torch.cat(UpperCamelCase__ )
A_ , A_ , A_ , A_ = self.image_processor.post_process_for_mask_generation(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A_ = defaultdict(UpperCamelCase__ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(UpperCamelCase__ )
A_ = {}
if output_rle_mask:
A_ = rle_mask
if output_bboxes_mask:
A_ = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 720
|
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
if num < 0:
return False
A_ = num
A_ = 0
while num > 0:
A_ = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667
| 0
|
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
_snake_case : Optional[Any] = logging.get_logger(__name__)
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = """linear"""
a_ = """cosine"""
a_ = """cosine_with_restarts"""
a_ = """polynomial"""
a_ = """constant"""
a_ = """constant_with_warmup"""
a_ = """piecewise_constant"""
def a_ ( lowerCAmelCase_ : Optimizer, lowerCAmelCase_ : int = -1 ):
return LambdaLR(lowerCAmelCase_, lambda lowerCAmelCase_ : 1, last_epoch=lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : Optimizer, lowerCAmelCase_ : int, lowerCAmelCase_ : int = -1 ):
def lr_lambda(lowerCAmelCase_ : int ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1.0, lowerCAmelCase_ ) )
return 1.0
return LambdaLR(lowerCAmelCase_, lowerCAmelCase_, last_epoch=lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : Optimizer, lowerCAmelCase_ : str, lowerCAmelCase_ : int = -1 ):
__lowerCAmelCase = {}
__lowerCAmelCase = step_rules.split(',' )
for rule_str in rule_list[:-1]:
__lowerCAmelCase , __lowerCAmelCase = rule_str.split(':' )
__lowerCAmelCase = int(lowerCAmelCase_ )
__lowerCAmelCase = float(lowerCAmelCase_ )
__lowerCAmelCase = value
__lowerCAmelCase = float(rule_list[-1] )
def create_rules_function(lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : List[Any] ):
def rule_func(lowerCAmelCase_ : int ) -> float:
__lowerCAmelCase = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(lowerCAmelCase_ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__lowerCAmelCase = create_rules_function(lowerCAmelCase_, lowerCAmelCase_ )
return LambdaLR(lowerCAmelCase_, lowerCAmelCase_, last_epoch=lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : str, lowerCAmelCase_ : str, lowerCAmelCase_ : Any=-1 ):
def lr_lambda(lowerCAmelCase_ : int ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1, lowerCAmelCase_ ) )
return max(
0.0, float(num_training_steps - current_step ) / float(max(1, num_training_steps - num_warmup_steps ) ) )
return LambdaLR(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : Optimizer, lowerCAmelCase_ : int, lowerCAmelCase_ : int, lowerCAmelCase_ : float = 0.5, lowerCAmelCase_ : int = -1 ):
def lr_lambda(lowerCAmelCase_ : Tuple ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1, lowerCAmelCase_ ) )
__lowerCAmelCase = float(current_step - num_warmup_steps ) / float(max(1, num_training_steps - num_warmup_steps ) )
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(lowerCAmelCase_ ) * 2.0 * progress )) )
return LambdaLR(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : Optimizer, lowerCAmelCase_ : int, lowerCAmelCase_ : int, lowerCAmelCase_ : int = 1, lowerCAmelCase_ : int = -1 ):
def lr_lambda(lowerCAmelCase_ : str ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1, lowerCAmelCase_ ) )
__lowerCAmelCase = float(current_step - num_warmup_steps ) / float(max(1, num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCAmelCase_ ) * progress) % 1.0) )) )
return LambdaLR(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : Optional[int]=1E-7, lowerCAmelCase_ : int=1.0, lowerCAmelCase_ : Optional[int]=-1 ):
__lowerCAmelCase = optimizer.defaults['lr']
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(lowerCAmelCase_ : int ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1, lowerCAmelCase_ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__lowerCAmelCase = lr_init - lr_end
__lowerCAmelCase = num_training_steps - num_warmup_steps
__lowerCAmelCase = 1 - (current_step - num_warmup_steps) / decay_steps
__lowerCAmelCase = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
_snake_case : Optional[int] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def a_ ( lowerCAmelCase_ : Union[str, SchedulerType], lowerCAmelCase_ : Optimizer, lowerCAmelCase_ : Optional[str] = None, lowerCAmelCase_ : Optional[int] = None, lowerCAmelCase_ : Optional[int] = None, lowerCAmelCase_ : int = 1, lowerCAmelCase_ : float = 1.0, lowerCAmelCase_ : int = -1, ):
__lowerCAmelCase = SchedulerType(lowerCAmelCase_ )
__lowerCAmelCase = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(lowerCAmelCase_, last_epoch=lowerCAmelCase_ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(lowerCAmelCase_, step_rules=lowerCAmelCase_, last_epoch=lowerCAmelCase_ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(lowerCAmelCase_, num_warmup_steps=lowerCAmelCase_, last_epoch=lowerCAmelCase_ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
lowerCAmelCase_, num_warmup_steps=lowerCAmelCase_, num_training_steps=lowerCAmelCase_, num_cycles=lowerCAmelCase_, last_epoch=lowerCAmelCase_, )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
lowerCAmelCase_, num_warmup_steps=lowerCAmelCase_, num_training_steps=lowerCAmelCase_, power=lowerCAmelCase_, last_epoch=lowerCAmelCase_, )
return schedule_func(
lowerCAmelCase_, num_warmup_steps=lowerCAmelCase_, num_training_steps=lowerCAmelCase_, last_epoch=lowerCAmelCase_ )
| 53
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : Union[str, Any] = logging.get_logger(__name__)
def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : int=False ):
__lowerCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__lowerCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def a_ ( lowerCAmelCase_ : Any, lowerCAmelCase_ : Tuple, lowerCAmelCase_ : Optional[int]=False ):
for i in range(config.num_hidden_layers ):
if base_model:
__lowerCAmelCase = ''
else:
__lowerCAmelCase = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
__lowerCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[
: config.hidden_size, :
]
__lowerCAmelCase = in_proj_bias[: config.hidden_size]
__lowerCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
__lowerCAmelCase = in_proj_bias[-config.hidden_size :]
def a_ ( lowerCAmelCase_ : List[str] ):
__lowerCAmelCase = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(lowerCAmelCase_, lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : int, lowerCAmelCase_ : Union[str, Any] ):
__lowerCAmelCase = dct.pop(lowerCAmelCase_ )
__lowerCAmelCase = val
def a_ ( ):
__lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCAmelCase = Image.open(requests.get(lowerCAmelCase_, stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : List[str], lowerCAmelCase_ : Optional[Any]=True ):
__lowerCAmelCase = ViTConfig()
# patch_size
if model_name[-1] == "8":
__lowerCAmelCase = 8
# set labels if required
if not base_model:
__lowerCAmelCase = 1000
__lowerCAmelCase = 'huggingface/label-files'
__lowerCAmelCase = 'imagenet-1k-id2label.json'
__lowerCAmelCase = json.load(open(hf_hub_download(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ), 'r' ) )
__lowerCAmelCase = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
__lowerCAmelCase = 384
__lowerCAmelCase = 1536
__lowerCAmelCase = 12
__lowerCAmelCase = 6
# load original model from torch hub
__lowerCAmelCase = torch.hub.load('facebookresearch/dino:main', lowerCAmelCase_ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
__lowerCAmelCase = original_model.state_dict()
if base_model:
remove_classification_head_(lowerCAmelCase_ )
__lowerCAmelCase = create_rename_keys(lowerCAmelCase_, base_model=lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
read_in_q_k_v(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
# load HuggingFace model
if base_model:
__lowerCAmelCase = ViTModel(lowerCAmelCase_, add_pooling_layer=lowerCAmelCase_ ).eval()
else:
__lowerCAmelCase = ViTForImageClassification(lowerCAmelCase_ ).eval()
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image, prepared by ViTImageProcessor
__lowerCAmelCase = ViTImageProcessor()
__lowerCAmelCase = image_processor(images=prepare_img(), return_tensors='pt' )
__lowerCAmelCase = encoding['pixel_values']
__lowerCAmelCase = model(lowerCAmelCase_ )
if base_model:
__lowerCAmelCase = original_model(lowerCAmelCase_ )
assert torch.allclose(lowerCAmelCase_, outputs.last_hidden_state[:, 0, :], atol=1E-1 )
else:
__lowerCAmelCase = original_model(lowerCAmelCase_ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase_, outputs.logits, atol=1E-3 )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
_snake_case : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
_snake_case : List[Any] = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 53
| 1
|
"""simple docstring"""
from datetime import datetime as dt
import os
from github import Github
lowerCAmelCase__ : Optional[int] = [
'good first issue',
'good second issue',
'good difficult issue',
'feature request',
'new model',
'wip',
]
def a_ ( ):
UpperCAmelCase__ = Github(os.environ['GITHUB_TOKEN'] )
UpperCAmelCase__ = g.get_repo('huggingface/transformers' )
UpperCAmelCase__ = repo.get_issues(state='open' )
for issue in open_issues:
UpperCAmelCase__ = sorted([comment for comment in issue.get_comments()] , key=lambda lowerCamelCase : i.created_at , reverse=lowerCamelCase )
UpperCAmelCase__ = comments[0] if len(lowerCamelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='closed' )
elif (
(dt.utcnow() - issue.updated_at).days > 2_3
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 703
|
"""simple docstring"""
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
lowerCAmelCase__ : str = 'base_with_context'
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights['token_embedder']['embedding'] ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=lowerCamelCase )
for lyr_num, lyr in enumerate(model.encoders ):
UpperCAmelCase__ = weights[f'''layers_{lyr_num}''']
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
UpperCAmelCase__ = ly_weight['attention']
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights['input_proj']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=lowerCamelCase )
for lyr_num, lyr in enumerate(model.encoders ):
UpperCAmelCase__ = weights[f'''layers_{lyr_num}''']
UpperCAmelCase__ = ly_weight['attention']
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights['time_emb_dense0']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights['time_emb_dense1']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=lowerCamelCase )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(weights['continuous_inputs_projection']['kernel'].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
UpperCAmelCase__ = weights[f'''layers_{lyr_num}''']
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight['pre_self_attention_layer_norm']['scale'] ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_0']['DenseGeneral_0']['kernel'].T ) )
UpperCAmelCase__ = ly_weight['self_attention']
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
UpperCAmelCase__ = ly_weight['MultiHeadDotProductAttention_0']
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight['pre_cross_attention_layer_norm']['scale'] ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_1']['DenseGeneral_0']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights['decoder_norm']['scale'] ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights['spec_out_dense']['kernel'].T ) )
return model
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = checkpoints.load_tax_checkpoint(args.checkpoint_path )
UpperCAmelCase__ = jnp.tree_util.tree_map(onp.array , lowerCamelCase )
UpperCAmelCase__ = [
'from __gin__ import dynamic_registration',
'from music_spectrogram_diffusion.models.diffusion import diffusion_utils',
'diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0',
'diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()',
]
UpperCAmelCase__ = os.path.join(args.checkpoint_path , '..' , 'config.gin' )
UpperCAmelCase__ = inference.parse_training_gin_file(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = inference.InferenceModel(args.checkpoint_path , lowerCamelCase )
UpperCAmelCase__ = DDPMScheduler(beta_schedule='squaredcos_cap_v2' , variance_type='fixed_large' )
UpperCAmelCase__ = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['inputs'] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
UpperCAmelCase__ = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['targets_context'] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
UpperCAmelCase__ = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['targets_context'] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
UpperCAmelCase__ = load_notes_encoder(ta_checkpoint['target']['token_encoder'] , lowerCamelCase )
UpperCAmelCase__ = load_continuous_encoder(ta_checkpoint['target']['continuous_encoder'] , lowerCamelCase )
UpperCAmelCase__ = load_decoder(ta_checkpoint['target']['decoder'] , lowerCamelCase )
UpperCAmelCase__ = OnnxRuntimeModel.from_pretrained('kashif/soundstream_mel_decoder' )
UpperCAmelCase__ = SpectrogramDiffusionPipeline(
notes_encoder=lowerCamelCase , continuous_encoder=lowerCamelCase , decoder=lowerCamelCase , scheduler=lowerCamelCase , melgan=lowerCamelCase , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
lowerCAmelCase__ : Tuple = argparse.ArgumentParser()
parser.add_argument('--output_path', default=None, type=str, required=True, help='Path to the converted model.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument(
'--checkpoint_path',
default=F"""{MODEL}/checkpoint_500000""",
type=str,
required=False,
help='Path to the original jax model checkpoint.',
)
lowerCAmelCase__ : List[str] = parser.parse_args()
main(args)
| 632
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Any , snake_case_ : List[str] , snake_case_ : Optional[Any]=7 , snake_case_ : str=3 , snake_case_ : Dict=18 , snake_case_ : Dict=30 , snake_case_ : Optional[Any]=400 , snake_case_ : Dict=True , snake_case_ : Any=None , snake_case_ : List[Any]=True , ):
"""simple docstring"""
A : Optional[Any] = size if size is not None else {'''height''': 18, '''width''': 18}
A : str = parent
A : int = batch_size
A : int = num_channels
A : Any = image_size
A : Any = min_resolution
A : Union[str, Any] = max_resolution
A : List[Any] = do_resize
A : Tuple = size
A : Optional[int] = apply_ocr
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _SCREAMING_SNAKE_CASE ( snake_case, unittest.TestCase ):
lowerCamelCase_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
A : Optional[Any] = LayoutLMvaImageProcessingTester(self )
@property
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
A : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , '''do_resize''' ) )
self.assertTrue(hasattr(snake_case_ , '''size''' ) )
self.assertTrue(hasattr(snake_case_ , '''apply_ocr''' ) )
def _UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
A : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
A : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def _UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
pass
def _UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
A : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , Image.Image )
# Test not batched input
A : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , snake_case_ )
self.assertIsInstance(encoding.boxes , snake_case_ )
# Test batched
A : List[str] = image_processing(snake_case_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
A : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , numpify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , np.ndarray )
# Test not batched input
A : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
A : Optional[int] = image_processing(snake_case_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
A : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , torchify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , torch.Tensor )
# Test not batched input
A : int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
A : Union[str, Any] = image_processing(snake_case_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
A : Union[str, Any] = LayoutLMvaImageProcessor()
from datasets import load_dataset
A : Optional[int] = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
A : Tuple = Image.open(ds[0]['''file'''] ).convert('''RGB''' )
A : Union[str, Any] = image_processing(snake_case_ , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
A : List[Any] = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
A : List[Any] = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , snake_case_ )
self.assertListEqual(encoding.boxes , snake_case_ )
# with apply_OCR = False
A : Dict = LayoutLMvaImageProcessor(apply_ocr=snake_case_ )
A : Tuple = image_processing(snake_case_ , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 256
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
A : List[str] = inspect.getfile(accelerate.test_utils )
A : int = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
A : str = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] )
A : List[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] )
@require_multi_gpu
def _UpperCAmelCase ( self : Any ):
"""simple docstring"""
print(f"""Found {torch.cuda.device_count()} devices.""" )
A : List[Any] = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case_ , env=os.environ.copy() )
@require_multi_gpu
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
print(f"""Found {torch.cuda.device_count()} devices.""" )
A : str = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(f"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case_ , env=os.environ.copy() )
@require_multi_gpu
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
A : Any = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case_ , env=os.environ.copy() )
@require_multi_gpu
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
print(f"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
A : Optional[int] = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='''0,1''' ):
execute_subprocess_async(snake_case_ , env=os.environ.copy() )
if __name__ == "__main__":
UpperCamelCase_ = Accelerator()
UpperCamelCase_ = (accelerator.state.process_index + 2, 10)
UpperCamelCase_ = torch.randint(0, 10, shape).to(accelerator.device)
UpperCamelCase_ = ""
UpperCamelCase_ = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
UpperCamelCase_ = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
UpperCamelCase_ = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 256
| 1
|
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase=7 , __UpperCamelCase=3 , __UpperCamelCase=30 , __UpperCamelCase=400 , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=True , __UpperCamelCase=1 / 255 , __UpperCamelCase=True , ) -> Union[str, Any]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_a = size if size is not None else {"shortest_edge": 18, "longest_edge": 1_333}
_a = parent
_a = batch_size
_a = num_channels
_a = min_resolution
_a = max_resolution
_a = do_resize
_a = size
_a = do_normalize
_a = image_mean
_a = image_std
_a = do_rescale
_a = rescale_factor
_a = do_pad
def a_ ( self ) -> Union[str, Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def a_ ( self , __UpperCamelCase , __UpperCamelCase=False ) -> Any:
if not batched:
_a = image_inputs[0]
if isinstance(__UpperCamelCase , Image.Image ):
_a , _a = image.size
else:
_a , _a = image.shape[1], image.shape[2]
if w < h:
_a = int(self.size["shortest_edge"] * h / w )
_a = self.size["shortest_edge"]
elif w > h:
_a = self.size["shortest_edge"]
_a = int(self.size["shortest_edge"] * w / h )
else:
_a = self.size["shortest_edge"]
_a = self.size["shortest_edge"]
else:
_a = []
for image in image_inputs:
_a , _a = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_a = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[0] )[0]
_a = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ):
UpperCAmelCase = DetaImageProcessor if is_vision_available() else None
def a_ ( self ) -> Optional[Any]:
_a = DetaImageProcessingTester(self )
@property
def a_ ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def a_ ( self ) -> Dict:
_a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase , "image_mean" ) )
self.assertTrue(hasattr(__UpperCamelCase , "image_std" ) )
self.assertTrue(hasattr(__UpperCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(__UpperCamelCase , "do_resize" ) )
self.assertTrue(hasattr(__UpperCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(__UpperCamelCase , "do_pad" ) )
self.assertTrue(hasattr(__UpperCamelCase , "size" ) )
def a_ ( self ) -> Tuple:
_a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1_333} )
self.assertEqual(image_processor.do_pad , __UpperCamelCase )
def a_ ( self ) -> int:
pass
def a_ ( self ) -> Any:
# Initialize image_processing
_a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , Image.Image )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_a , _a = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_a , _a = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
_a = image_processing(__UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a_ ( self ) -> Optional[Any]:
# Initialize image_processing
_a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , np.ndarray )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_a , _a = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_a = image_processing(__UpperCamelCase , return_tensors="pt" ).pixel_values
_a , _a = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a_ ( self ) -> Any:
# Initialize image_processing
_a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_a , _a = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_a = image_processing(__UpperCamelCase , return_tensors="pt" ).pixel_values
_a , _a = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def a_ ( self ) -> Tuple:
# prepare image and target
_a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
_a = json.loads(f.read() )
_a = {"image_id": 39_769, "annotations": target}
# encode them
_a = DetaImageProcessor()
_a = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , return_tensors="pt" )
# verify pixel values
_a = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , __UpperCamelCase )
_a = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __UpperCamelCase , atol=1e-4 ) )
# verify area
_a = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __UpperCamelCase ) )
# verify boxes
_a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __UpperCamelCase )
_a = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __UpperCamelCase , atol=1e-3 ) )
# verify image_id
_a = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __UpperCamelCase ) )
# verify is_crowd
_a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __UpperCamelCase ) )
# verify class_labels
_a = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __UpperCamelCase ) )
# verify orig_size
_a = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __UpperCamelCase ) )
# verify size
_a = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __UpperCamelCase ) )
@slow
def a_ ( self ) -> str:
# prepare image, target and masks_path
_a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
_a = json.loads(f.read() )
_a = {"file_name": "000000039769.png", "image_id": 39_769, "segments_info": target}
_a = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
_a = DetaImageProcessor(format="coco_panoptic" )
_a = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , masks_path=__UpperCamelCase , return_tensors="pt" )
# verify pixel values
_a = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , __UpperCamelCase )
_a = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __UpperCamelCase , atol=1e-4 ) )
# verify area
_a = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __UpperCamelCase ) )
# verify boxes
_a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __UpperCamelCase )
_a = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __UpperCamelCase , atol=1e-3 ) )
# verify image_id
_a = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __UpperCamelCase ) )
# verify is_crowd
_a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __UpperCamelCase ) )
# verify class_labels
_a = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __UpperCamelCase ) )
# verify masks
_a = 822_873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __UpperCamelCase )
# verify orig_size
_a = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __UpperCamelCase ) )
# verify size
_a = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __UpperCamelCase ) )
| 276
|
'''simple docstring'''
def __UpperCamelCase ( __lowerCamelCase : int = 400_0000 ) -> int:
'''simple docstring'''
_a = []
_a , _a = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(__lowerCamelCase )
_a , _a = b, a + b
return sum(__lowerCamelCase )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 276
| 1
|
"""simple docstring"""
import math
from datetime import datetime, timedelta
def lowercase_ ( _lowerCamelCase: int ) -> datetime:
'''simple docstring'''
__lowerCamelCase : str = year % 19
__lowerCamelCase : Dict = year % 4
__lowerCamelCase : List[Any] = year % 7
__lowerCamelCase : Dict = math.floor(year / 100 )
__lowerCamelCase : List[Any] = math.floor((13 + 8 * leap_day_inhibits) / 25 )
__lowerCamelCase : Union[str, Any] = leap_day_inhibits / 4
__lowerCamelCase : List[str] = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
__lowerCamelCase : Union[str, Any] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
__lowerCamelCase : List[str] = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
__lowerCamelCase : Optional[Any] = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(_lowerCamelCase , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(_lowerCamelCase , 4 , 18 )
else:
return datetime(_lowerCamelCase , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1994, 2000, 2010, 2021, 2023):
__A = '''will be''' if year > datetime.now().year else '''was'''
print(F"""Easter in {year} {tense} {gauss_easter(year)}""")
| 646
|
"""simple docstring"""
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__A = '''\
Text data.
Second line of data.'''
__A = '''file'''
@pytest.fixture(scope="session" )
def lowercase_ ( _lowerCamelCase: List[Any] ) -> List[str]:
'''simple docstring'''
__lowerCamelCase : Tuple = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
__lowerCamelCase : Optional[int] = bytes(_lowerCamelCase , "utf-8" )
with zstd.open(_lowerCamelCase , "wb" ) as f:
f.write(_lowerCamelCase )
return path
@pytest.fixture
def lowercase_ ( _lowerCamelCase: Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir , _lowerCamelCase ) , "w" ) as f:
f.write(_lowerCamelCase )
return FILE_PATH
@pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] )
def lowercase_ ( _lowerCamelCase: Union[str, Any] , _lowerCamelCase: Dict , _lowerCamelCase: Tuple , _lowerCamelCase: Optional[Any] , _lowerCamelCase: Union[str, Any] , _lowerCamelCase: Optional[int] ) -> Dict:
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
__lowerCamelCase : Optional[int] = input_paths[compression_format]
__lowerCamelCase : List[str] = tmp_path / "cache"
__lowerCamelCase : Optional[Any] = DownloadConfig(cache_dir=_lowerCamelCase , extract_compressed_file=_lowerCamelCase )
__lowerCamelCase : str = cached_path(_lowerCamelCase , download_config=_lowerCamelCase )
with open(_lowerCamelCase ) as f:
__lowerCamelCase : int = f.read()
with open(_lowerCamelCase ) as f:
__lowerCamelCase : int = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted" , [True, False] )
@pytest.mark.parametrize("default_cache_dir" , [True, False] )
def lowercase_ ( _lowerCamelCase: Tuple , _lowerCamelCase: str , _lowerCamelCase: Union[str, Any] , _lowerCamelCase: Optional[int] , _lowerCamelCase: Any ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase : Any = "custom_cache"
__lowerCamelCase : Optional[int] = "custom_extracted_dir"
__lowerCamelCase : Tuple = tmp_path / "custom_extracted_path"
if default_extracted:
__lowerCamelCase : Dict = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , _lowerCamelCase )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(_lowerCamelCase ) )
__lowerCamelCase : Tuple = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
__lowerCamelCase : Union[str, Any] = xz_file
__lowerCamelCase : Union[str, Any] = (
DownloadConfig(extract_compressed_file=_lowerCamelCase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_lowerCamelCase )
)
__lowerCamelCase : List[Any] = cached_path(_lowerCamelCase , download_config=_lowerCamelCase )
assert Path(_lowerCamelCase ).parent.parts[-2:] == expected
def lowercase_ ( _lowerCamelCase: int ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase : List[str] = str(Path(_lowerCamelCase ).resolve() )
assert cached_path(_lowerCamelCase ) == text_file
# relative path
__lowerCamelCase : Optional[int] = str(Path(_lowerCamelCase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_lowerCamelCase ) == text_file
def lowercase_ ( _lowerCamelCase: Optional[int] ) -> Dict:
'''simple docstring'''
__lowerCamelCase : str = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(_lowerCamelCase ):
cached_path(_lowerCamelCase )
# relative path
__lowerCamelCase : Optional[int] = "./__missing_file__.txt"
with pytest.raises(_lowerCamelCase ):
cached_path(_lowerCamelCase )
def lowercase_ ( _lowerCamelCase: int ) -> int:
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = get_from_cache(F"""tmp://{tmpfs_file}""" )
with open(_lowerCamelCase ) as f:
__lowerCamelCase : Union[str, Any] = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowercase_ ( ) -> Any:
'''simple docstring'''
with pytest.raises(_lowerCamelCase ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowercase_ ( _lowerCamelCase: Optional[int] ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase : Optional[Any] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_lowerCamelCase ):
http_get("https://huggingface.co" , temp_file=_lowerCamelCase )
with pytest.raises(_lowerCamelCase ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowercase_ ( _lowerCamelCase: Tuple ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase : Optional[Any] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_lowerCamelCase ):
ftp_get("ftp://huggingface.co" , temp_file=_lowerCamelCase )
with pytest.raises(_lowerCamelCase ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase )
def lowercase_ ( _lowerCamelCase: Tuple ) -> Tuple:
'''simple docstring'''
__lowerCamelCase : List[str] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_lowerCamelCase ):
fsspec_get("s3://huggingface.co" , temp_file=_lowerCamelCase )
with pytest.raises(_lowerCamelCase ):
fsspec_head("s3://huggingface.co" )
| 646
| 1
|
import math
from collections.abc import Callable
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case ) -> Any:
_UpperCAmelCase = xa
_UpperCAmelCase = xa
while True:
if x_n == x_na or function(__snake_case ) == function(__snake_case ):
raise ZeroDivisionError("""float division by zero, could not find root""" )
_UpperCAmelCase = x_na - (
function(__snake_case ) / ((function(__snake_case ) - function(__snake_case )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 1_0**-5:
return x_na
_UpperCAmelCase = x_na
_UpperCAmelCase = x_na
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> Union[str, Any]:
return math.pow(__snake_case , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 717
|
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
__a: Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : List[Any] , *lowerCamelCase : List[str] , **lowerCamelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
super().__init__(*lowerCamelCase , **lowerCamelCase )
requires_backends(self , """vision""" )
self.check_model_type(lowerCamelCase )
def __call__( self : Any , lowerCamelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowerCamelCase : Tuple ) -> List[str]:
"""simple docstring"""
return super().__call__(lowerCamelCase , **lowerCamelCase )
def lowerCamelCase ( self : Any , **lowerCamelCase : str ) -> Optional[int]:
"""simple docstring"""
return {}, {}, {}
def lowerCamelCase ( self : str , lowerCamelCase : Any ) -> str:
"""simple docstring"""
_UpperCAmelCase = load_image(lowerCamelCase )
_UpperCAmelCase = image.size
_UpperCAmelCase = self.image_processor(images=lowerCamelCase , return_tensors=self.framework )
return model_inputs
def lowerCamelCase ( self : Optional[Any] , lowerCamelCase : Tuple ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.model(**lowerCamelCase )
return model_outputs
def lowerCamelCase ( self : str , lowerCamelCase : str ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = model_outputs.predicted_depth
_UpperCAmelCase = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="""bicubic""" , align_corners=lowerCamelCase )
_UpperCAmelCase = prediction.squeeze().cpu().numpy()
_UpperCAmelCase = (output * 255 / np.max(lowerCamelCase )).astype("""uint8""" )
_UpperCAmelCase = Image.fromarray(lowerCamelCase )
_UpperCAmelCase = {}
_UpperCAmelCase = predicted_depth
_UpperCAmelCase = depth
return output_dict
| 402
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case = {
'''configuration_megatron_bert''': ['''MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegatronBertConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
'''MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegatronBertForCausalLM''',
'''MegatronBertForMaskedLM''',
'''MegatronBertForMultipleChoice''',
'''MegatronBertForNextSentencePrediction''',
'''MegatronBertForPreTraining''',
'''MegatronBertForQuestionAnswering''',
'''MegatronBertForSequenceClassification''',
'''MegatronBertForTokenClassification''',
'''MegatronBertModel''',
'''MegatronBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 103
|
"""simple docstring"""
import math
from datetime import datetime, timedelta
def __magic_name__ ( _lowerCamelCase : int ):
__a : Dict = year % 1_9
__a : List[str] = year % 4
__a : Optional[Any] = year % 7
__a : Any = math.floor(year / 1_0_0 )
__a : Dict = math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 )
__a : Any = leap_day_inhibits / 4
__a : Dict = (
1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 3_0
__a : int = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
__a : Dict = (1_9 * metonic_cycle + secular_moon_shift) % 3_0
# PHM -> Paschal Full Moon
__a : str = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 2_9 and days_from_phm_to_sunday == 6:
return datetime(_lowerCamelCase , 4 , 1_9 )
elif days_to_add == 2_8 and days_from_phm_to_sunday == 6:
return datetime(_lowerCamelCase , 4 , 1_8 )
else:
return datetime(_lowerCamelCase , 3 , 2_2 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1994, 2000, 2010, 2021, 2023):
lowercase__ = "will be" if year > datetime.now().year else "was"
print(f'Easter in {year} {tense} {gauss_easter(year)}')
| 581
| 0
|
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class a (unittest.TestCase ):
"""simple docstring"""
def __init__( self : str , lowerCamelCase : str , lowerCamelCase : bool = True , lowerCamelCase : Dict[str, int] = None , lowerCamelCase : int = 32 , lowerCamelCase : bool = True , lowerCamelCase : Union[int, float] = 1 / 255 , lowerCamelCase : bool = True , lowerCamelCase : bool = True , lowerCamelCase : Optional[Union[float, List[float]]] = [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , lowerCamelCase : Optional[Union[float, List[float]]] = [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , lowerCamelCase : bool = True , lowerCamelCase : Optional[int]=7 , lowerCamelCase : str=30 , lowerCamelCase : int=400 , lowerCamelCase : Optional[Any]=3 , ) -> List[str]:
__snake_case : Optional[int] = parent
__snake_case : Optional[Any] = do_resize
__snake_case : Dict = size if size is not None else {"shortest_edge": 288}
__snake_case : Any = size_divisor
__snake_case : Tuple = do_rescale
__snake_case : Optional[Any] = rescale_factor
__snake_case : Tuple = do_normalize
__snake_case : Dict = do_center_crop
__snake_case : int = image_mean
__snake_case : Tuple = image_std
__snake_case : Tuple = do_pad
__snake_case : Union[str, Any] = batch_size
__snake_case : List[str] = num_channels
__snake_case : Optional[int] = min_resolution
__snake_case : Optional[int] = max_resolution
def __snake_case ( self : List[Any] ) -> Union[str, Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def __snake_case ( self : int , lowerCamelCase : Optional[Any] , lowerCamelCase : int=False ) -> List[str]:
if not batched:
__snake_case : List[Any] = self.size["shortest_edge"]
__snake_case : int = image_inputs[0]
if isinstance(lowerCamelCase , Image.Image ):
__snake_case , __snake_case : List[Any] = image.size
else:
__snake_case , __snake_case : int = image.shape[1], image.shape[2]
__snake_case : Any = size / min(lowerCamelCase , lowerCamelCase )
if h < w:
__snake_case , __snake_case : List[str] = size, scale * w
else:
__snake_case , __snake_case : Union[str, Any] = scale * h, size
__snake_case : int = int((1333 / 800) * size )
if max(lowerCamelCase , lowerCamelCase ) > max_size:
__snake_case : Union[str, Any] = max_size / max(lowerCamelCase , lowerCamelCase )
__snake_case : int = newh * scale
__snake_case : Tuple = neww * scale
__snake_case , __snake_case : int = int(newh + 0.5 ), int(neww + 0.5 )
__snake_case , __snake_case : str = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
__snake_case : Optional[Any] = []
for image in image_inputs:
__snake_case , __snake_case : Dict = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__snake_case : Any = max(lowerCamelCase , key=lambda lowerCamelCase : item[0] )[0]
__snake_case : Union[str, Any] = max(lowerCamelCase , key=lambda lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a (_lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = BridgeTowerImageProcessor if is_vision_available() else None
def __snake_case ( self : List[Any] ) -> List[str]:
__snake_case : Dict = BridgeTowerImageProcessingTester(self )
@property
def __snake_case ( self : List[str] ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case ( self : Any ) -> Union[str, Any]:
__snake_case : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(lowerCamelCase , "size" ) )
self.assertTrue(hasattr(lowerCamelCase , "size_divisor" ) )
def __snake_case ( self : Tuple ) -> int:
pass
def __snake_case ( self : str ) -> Optional[int]:
# Initialize image processor
__snake_case : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
__snake_case : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__snake_case , __snake_case : Union[str, Any] = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__snake_case : Optional[int] = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
__snake_case , __snake_case : Dict = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __snake_case ( self : Union[str, Any] ) -> Any:
# Initialize image processor
__snake_case : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
__snake_case : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__snake_case , __snake_case : int = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__snake_case : Optional[int] = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
__snake_case , __snake_case : List[Any] = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __snake_case ( self : Any ) -> Optional[Any]:
# Initialize image processor
__snake_case : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
__snake_case : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__snake_case , __snake_case : Dict = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__snake_case : List[Any] = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
__snake_case , __snake_case : Dict = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 203
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a (_lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = StableDiffusionXLImgaImgPipeline
__UpperCAmelCase : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
__UpperCAmelCase : Dict = PipelineTesterMixin.required_optional_params - {"latents"}
__UpperCAmelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__UpperCAmelCase : Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS
__UpperCAmelCase : str = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __snake_case ( self : Optional[Any] ) -> Tuple:
torch.manual_seed(0 )
__snake_case : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , attention_head_dim=(2, 4) , use_linear_projection=lowerCamelCase , addition_embed_type="text_time" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
__snake_case : Tuple = EulerDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , steps_offset=1 , beta_schedule="scaled_linear" , timestep_spacing="leading" , )
torch.manual_seed(0 )
__snake_case : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__snake_case : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=32 , )
__snake_case : List[str] = CLIPTextModel(lowerCamelCase )
__snake_case : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=lowerCamelCase )
__snake_case : List[str] = CLIPTextModelWithProjection(lowerCamelCase )
__snake_case : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=lowerCamelCase )
__snake_case : Optional[int] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_encoder_2": text_encoder_a,
"tokenizer_2": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def __snake_case ( self : Optional[Any] , lowerCamelCase : List[Any] , lowerCamelCase : List[Any]=0 ) -> Union[str, Any]:
__snake_case : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
__snake_case : Any = image / 2 + 0.5
if str(lowerCamelCase ).startswith("mps" ):
__snake_case : Dict = torch.manual_seed(lowerCamelCase )
else:
__snake_case : int = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
__snake_case : Optional[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 5.0,
"output_type": "numpy",
"strength": 0.75,
}
return inputs
def __snake_case ( self : Dict ) -> Any:
__snake_case : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
__snake_case : Any = self.get_dummy_components()
__snake_case : int = StableDiffusionXLImgaImgPipeline(**lowerCamelCase )
__snake_case : List[str] = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
__snake_case : Tuple = self.get_dummy_inputs(lowerCamelCase )
__snake_case : Dict = sd_pipe(**lowerCamelCase ).images
__snake_case : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__snake_case : str = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __snake_case ( self : str ) -> Optional[Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def __snake_case ( self : Any ) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __snake_case ( self : str ) -> Optional[int]:
pass
def __snake_case ( self : Tuple ) -> Union[str, Any]:
__snake_case : str = self.get_dummy_components()
__snake_case : List[Any] = StableDiffusionXLImgaImgPipeline(**lowerCamelCase )
__snake_case : Optional[Any] = sd_pipe.to(lowerCamelCase )
__snake_case : int = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
# forward without prompt embeds
__snake_case : List[str] = self.get_dummy_inputs(lowerCamelCase )
__snake_case : str = 3 * ["this is a negative prompt"]
__snake_case : Any = negative_prompt
__snake_case : Optional[Any] = 3 * [inputs["prompt"]]
__snake_case : int = sd_pipe(**lowerCamelCase )
__snake_case : List[Any] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
__snake_case : List[Any] = self.get_dummy_inputs(lowerCamelCase )
__snake_case : Optional[Any] = 3 * ["this is a negative prompt"]
__snake_case : int = 3 * [inputs.pop("prompt" )]
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : Dict = sd_pipe.encode_prompt(lowerCamelCase , negative_prompt=lowerCamelCase )
__snake_case : Tuple = sd_pipe(
**lowerCamelCase , prompt_embeds=lowerCamelCase , negative_prompt_embeds=lowerCamelCase , pooled_prompt_embeds=lowerCamelCase , negative_pooled_prompt_embeds=lowerCamelCase , )
__snake_case : List[str] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Optional[int] ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : Optional[Any] , lowerCamelCase : Any , lowerCamelCase : Optional[Any]="cpu" , lowerCamelCase : str=torch.floataa , lowerCamelCase : int=0 ) -> Dict:
__snake_case : int = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
__snake_case : Optional[Any] = np.random.RandomState(lowerCamelCase ).standard_normal((1, 4, 64, 64) )
__snake_case : Optional[Any] = torch.from_numpy(lowerCamelCase ).to(device=lowerCamelCase , dtype=lowerCamelCase )
__snake_case : List[str] = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def __snake_case ( self : str ) -> Any:
__snake_case : List[str] = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-base" )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__snake_case : int = self.get_inputs(lowerCamelCase )
__snake_case : Optional[Any] = pipe(**lowerCamelCase ).images
__snake_case : Any = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__snake_case : Optional[int] = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 203
| 1
|
import argparse
import struct
import unittest
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , _A : bytes ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = data
# Initialize hash values
__SCREAMING_SNAKE_CASE : List[str] = [
0x6A_09_E6_67,
0xBB_67_AE_85,
0x3C_6E_F3_72,
0xA5_4F_F5_3A,
0x51_0E_52_7F,
0x9B_05_68_8C,
0x1F_83_D9_AB,
0x5B_E0_CD_19,
]
# Initialize round constants
__SCREAMING_SNAKE_CASE : List[str] = [
0x42_8A_2F_98,
0x71_37_44_91,
0xB5_C0_FB_CF,
0xE9_B5_DB_A5,
0x39_56_C2_5B,
0x59_F1_11_F1,
0x92_3F_82_A4,
0xAB_1C_5E_D5,
0xD8_07_AA_98,
0x12_83_5B_01,
0x24_31_85_BE,
0x55_0C_7D_C3,
0x72_BE_5D_74,
0x80_DE_B1_FE,
0x9B_DC_06_A7,
0xC1_9B_F1_74,
0xE4_9B_69_C1,
0xEF_BE_47_86,
0x0F_C1_9D_C6,
0x24_0C_A1_CC,
0x2D_E9_2C_6F,
0x4A_74_84_AA,
0x5C_B0_A9_DC,
0x76_F9_88_DA,
0x98_3E_51_52,
0xA8_31_C6_6D,
0xB0_03_27_C8,
0xBF_59_7F_C7,
0xC6_E0_0B_F3,
0xD5_A7_91_47,
0x06_CA_63_51,
0x14_29_29_67,
0x27_B7_0A_85,
0x2E_1B_21_38,
0x4D_2C_6D_FC,
0x53_38_0D_13,
0x65_0A_73_54,
0x76_6A_0A_BB,
0x81_C2_C9_2E,
0x92_72_2C_85,
0xA2_BF_E8_A1,
0xA8_1A_66_4B,
0xC2_4B_8B_70,
0xC7_6C_51_A3,
0xD1_92_E8_19,
0xD6_99_06_24,
0xF4_0E_35_85,
0x10_6A_A0_70,
0x19_A4_C1_16,
0x1E_37_6C_08,
0x27_48_77_4C,
0x34_B0_BC_B5,
0x39_1C_0C_B3,
0x4E_D8_AA_4A,
0x5B_9C_CA_4F,
0x68_2E_6F_F3,
0x74_8F_82_EE,
0x78_A5_63_6F,
0x84_C8_78_14,
0x8C_C7_02_08,
0x90_BE_FF_FA,
0xA4_50_6C_EB,
0xBE_F9_A3_F7,
0xC6_71_78_F2,
]
__SCREAMING_SNAKE_CASE : List[Any] = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def UpperCAmelCase__ ( _A : bytes ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = B'''\x80''' + (B'''\x00''' * (63 - (len(_A ) + 8) % 64))
__SCREAMING_SNAKE_CASE : Optional[int] = struct.pack('''>Q''' , (len(_A ) * 8) )
return data + padding + big_endian_integer
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
__SCREAMING_SNAKE_CASE : Dict = list(struct.unpack('''>16L''' , _A ) )
# add 48 0-ed integers
words += [0] * 48
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
__SCREAMING_SNAKE_CASE : Optional[int] = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_00_00_00_00
# Compression
__SCREAMING_SNAKE_CASE : Any = self.ror(_A , 6 ) ^ self.ror(_A , 11 ) ^ self.ror(_A , 25 )
__SCREAMING_SNAKE_CASE : int = (e & f) ^ ((~e & 0xFF_FF_FF_FF) & g)
__SCREAMING_SNAKE_CASE : List[str] = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_00_00_00_00
__SCREAMING_SNAKE_CASE : Dict = self.ror(_A , 2 ) ^ self.ror(_A , 13 ) ^ self.ror(_A , 22 )
__SCREAMING_SNAKE_CASE : str = (a & b) ^ (a & c) ^ (b & c)
__SCREAMING_SNAKE_CASE : Dict = (sa + maj) % 0x1_00_00_00_00
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = (
g,
f,
e,
((d + tempa) % 0x1_00_00_00_00),
c,
b,
a,
((tempa + tempa) % 0x1_00_00_00_00),
)
__SCREAMING_SNAKE_CASE : Tuple = [a, b, c, d, e, f, g, h]
# Modify final values
__SCREAMING_SNAKE_CASE : List[str] = [
((element + mutated_hash_values[index]) % 0x1_00_00_00_00)
for index, element in enumerate(self.hashes )
]
__SCREAMING_SNAKE_CASE : List[Any] = ''''''.join([hex(_A )[2:].zfill(8 ) for value in self.hashes] )
def UpperCAmelCase__ ( self : List[str] , _A : int , _A : int ):
"""simple docstring"""
return 0xFF_FF_FF_FF & (value << (32 - rotations)) | (value >> rotations)
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
import hashlib
__SCREAMING_SNAKE_CASE : Tuple = bytes('''Test String''' , '''utf-8''' )
self.assertEqual(SHAaaa(_A ).hash , hashlib.shaaaa(_A ).hexdigest() )
def a__ ( ):
"""simple docstring"""
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
parser.add_argument(
'''-s''' , '''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , )
parser.add_argument(
'''-f''' , '''--file''' , dest='''input_file''' , help='''Hash contents of a file''' )
__SCREAMING_SNAKE_CASE : int = parser.parse_args()
__SCREAMING_SNAKE_CASE : Tuple = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , '''rb''' ) as f:
__SCREAMING_SNAKE_CASE : Union[str, Any] = f.read()
else:
__SCREAMING_SNAKE_CASE : str = bytes(snake_case , '''utf-8''' )
print(SHAaaa(snake_case ).hash )
if __name__ == "__main__":
main()
| 74
|
from math import isclose, sqrt
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = point_y / 4 / point_x
__SCREAMING_SNAKE_CASE : int = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
__SCREAMING_SNAKE_CASE : Tuple = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
__SCREAMING_SNAKE_CASE : int = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
__SCREAMING_SNAKE_CASE : int = outgoing_gradient**2 + 4
__SCREAMING_SNAKE_CASE : List[str] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
__SCREAMING_SNAKE_CASE : Optional[Any] = (point_y - outgoing_gradient * point_x) ** 2 - 100
__SCREAMING_SNAKE_CASE : str = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
__SCREAMING_SNAKE_CASE : int = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
__SCREAMING_SNAKE_CASE : Dict = x_minus if isclose(snake_case , snake_case ) else x_plus
__SCREAMING_SNAKE_CASE : Dict = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def a__ ( snake_case = 1.4 , snake_case = -9.6 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = 0
__SCREAMING_SNAKE_CASE : float = first_x_coord
__SCREAMING_SNAKE_CASE : float = first_y_coord
__SCREAMING_SNAKE_CASE : float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = next_point(snake_case , snake_case , snake_case )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f'''{solution() = }''')
| 74
| 1
|
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
_snake_case = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase :
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = 42
@dataclass
class lowerCAmelCase :
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = None
__lowerCamelCase = None
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'train'
__lowerCamelCase = 'dev'
__lowerCamelCase = 'test'
class lowerCAmelCase :
@staticmethod
def UpperCAmelCase ( _lowercase :List[Any] , _lowercase :Union[Split, str] ):
'''simple docstring'''
raise NotImplementedError
@staticmethod
def UpperCAmelCase ( _lowercase :str ):
'''simple docstring'''
raise NotImplementedError
@staticmethod
def UpperCAmelCase ( _lowercase :List[InputExample] , _lowercase :List[str] , _lowercase :int , _lowercase :PreTrainedTokenizer , _lowercase :Optional[Any]=False , _lowercase :Optional[int]="[CLS]" , _lowercase :Dict=1 , _lowercase :List[Any]="[SEP]" , _lowercase :Optional[Any]=False , _lowercase :Any=False , _lowercase :Dict=0 , _lowercase :List[str]=0 , _lowercase :Union[str, Any]=-1_00 , _lowercase :Any=0 , _lowercase :str=True , ):
'''simple docstring'''
lowercase__ = {label: i for i, label in enumerate(_lowercase )}
lowercase__ = []
for ex_index, example in enumerate(_lowercase ):
if ex_index % 1_00_00 == 0:
logger.info("Writing example %d of %d" , _lowercase , len(_lowercase ) )
lowercase__ = []
lowercase__ = []
for word, label in zip(example.words , example.labels ):
lowercase__ = tokenizer.tokenize(_lowercase )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(_lowercase ) > 0:
tokens.extend(_lowercase )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(_lowercase ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
lowercase__ = tokenizer.num_special_tokens_to_add()
if len(_lowercase ) > max_seq_length - special_tokens_count:
lowercase__ = tokens[: (max_seq_length - special_tokens_count)]
lowercase__ = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
lowercase__ = [sequence_a_segment_id] * len(_lowercase )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
lowercase__ = [cls_token] + tokens
lowercase__ = [pad_token_label_id] + label_ids
lowercase__ = [cls_token_segment_id] + segment_ids
lowercase__ = tokenizer.convert_tokens_to_ids(_lowercase )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
lowercase__ = [1 if mask_padding_with_zero else 0] * len(_lowercase )
# Zero-pad up to the sequence length.
lowercase__ = max_seq_length - len(_lowercase )
if pad_on_left:
lowercase__ = ([pad_token] * padding_length) + input_ids
lowercase__ = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
lowercase__ = ([pad_token_segment_id] * padding_length) + segment_ids
lowercase__ = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
if ex_index < 5:
logger.info("*** Example ***" )
logger.info("guid: %s" , example.guid )
logger.info("tokens: %s" , " ".join([str(_lowercase ) for x in tokens] ) )
logger.info("input_ids: %s" , " ".join([str(_lowercase ) for x in input_ids] ) )
logger.info("input_mask: %s" , " ".join([str(_lowercase ) for x in input_mask] ) )
logger.info("segment_ids: %s" , " ".join([str(_lowercase ) for x in segment_ids] ) )
logger.info("label_ids: %s" , " ".join([str(_lowercase ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
lowercase__ = None
features.append(
InputFeatures(
input_ids=_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , label_ids=_lowercase ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 42
__lowerCamelCase = nn.CrossEntropyLoss().ignore_index
def __init__( self :List[str] , _lowercase :TokenClassificationTask , _lowercase :str , _lowercase :PreTrainedTokenizer , _lowercase :List[str] , _lowercase :str , _lowercase :Optional[int] = None , _lowercase :Optional[int]=False , _lowercase :Split = Split.train , ):
'''simple docstring'''
lowercase__ = os.path.join(
_lowercase , "cached_{}_{}_{}".format(mode.value , tokenizer.__class__.__name__ , str(_lowercase ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowercase__ = cached_features_file + ".lock"
with FileLock(_lowercase ):
if os.path.exists(_lowercase ) and not overwrite_cache:
logger.info(f'''Loading features from cached file {cached_features_file}''' )
lowercase__ = torch.load(_lowercase )
else:
logger.info(f'''Creating features from dataset file at {data_dir}''' )
lowercase__ = token_classification_task.read_examples_from_file(_lowercase , _lowercase )
# TODO clean up all this to leverage built-in features of tokenizers
lowercase__ = token_classification_task.convert_examples_to_features(
_lowercase , _lowercase , _lowercase , _lowercase , cls_token_at_end=bool(model_type in ["xlnet"] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["xlnet"] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_lowercase , pad_on_left=bool(tokenizer.padding_side == "left" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f'''Saving features into cached file {cached_features_file}''' )
torch.save(self.features , _lowercase )
def __len__( self :Optional[int] ):
'''simple docstring'''
return len(self.features )
def __getitem__( self :str , _lowercase :int ):
'''simple docstring'''
return self.features[i]
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase :
__lowerCamelCase = 42
__lowerCamelCase = -100
def __init__( self :int , _lowercase :TokenClassificationTask , _lowercase :str , _lowercase :PreTrainedTokenizer , _lowercase :List[str] , _lowercase :str , _lowercase :Optional[int] = None , _lowercase :List[Any]=False , _lowercase :Split = Split.train , ):
'''simple docstring'''
lowercase__ = token_classification_task.read_examples_from_file(_lowercase , _lowercase )
# TODO clean up all this to leverage built-in features of tokenizers
lowercase__ = token_classification_task.convert_examples_to_features(
_lowercase , _lowercase , _lowercase , _lowercase , cls_token_at_end=bool(model_type in ["xlnet"] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["xlnet"] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_lowercase , pad_on_left=bool(tokenizer.padding_side == "left" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
lowercase__ = tf.data.Dataset.from_generator(
_lowercase , ({"input_ids": tf.intaa, "attention_mask": tf.intaa}, tf.intaa) , (
{"input_ids": tf.TensorShape([None] ), "attention_mask": tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
lowercase__ = tf.data.Dataset.from_generator(
_lowercase , ({"input_ids": tf.intaa, "attention_mask": tf.intaa, "token_type_ids": tf.intaa}, tf.intaa) , (
{
"input_ids": tf.TensorShape([None] ),
"attention_mask": tf.TensorShape([None] ),
"token_type_ids": tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
lowercase__ = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self :Union[str, Any] ):
'''simple docstring'''
return len(self.features )
def __getitem__( self :Any , _lowercase :List[str] ):
'''simple docstring'''
return self.features[i]
| 611
|
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_snake_case = logging.get_logger("""transformers.models.speecht5""")
def _A ( __magic_name__ , __magic_name__ , __magic_name__ ):
hf_model.apply_weight_norm()
lowercase__ = checkpoint["input_conv.weight_g"]
lowercase__ = checkpoint["input_conv.weight_v"]
lowercase__ = checkpoint["input_conv.bias"]
for i in range(len(config.upsample_rates ) ):
lowercase__ = checkpoint[f'''upsamples.{i}.1.weight_g''']
lowercase__ = checkpoint[f'''upsamples.{i}.1.weight_v''']
lowercase__ = checkpoint[f'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
lowercase__ = checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_g''']
lowercase__ = checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_v''']
lowercase__ = checkpoint[f'''blocks.{i}.convs1.{j}.1.bias''']
lowercase__ = checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_g''']
lowercase__ = checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_v''']
lowercase__ = checkpoint[f'''blocks.{i}.convs2.{j}.1.bias''']
lowercase__ = checkpoint["output_conv.1.weight_g"]
lowercase__ = checkpoint["output_conv.1.weight_v"]
lowercase__ = checkpoint["output_conv.1.bias"]
hf_model.remove_weight_norm()
@torch.no_grad()
def _A ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None , ):
if config_path is not None:
lowercase__ = SpeechTaHifiGanConfig.from_pretrained(__magic_name__ )
else:
lowercase__ = SpeechTaHifiGanConfig()
lowercase__ = SpeechTaHifiGan(__magic_name__ )
lowercase__ = torch.load(__magic_name__ )
load_weights(orig_checkpoint["model"]["generator"] , __magic_name__ , __magic_name__ )
lowercase__ = np.load(__magic_name__ )
lowercase__ = stats[0].reshape(-1 )
lowercase__ = stats[1].reshape(-1 )
lowercase__ = torch.from_numpy(__magic_name__ ).float()
lowercase__ = torch.from_numpy(__magic_name__ ).float()
model.save_pretrained(__magic_name__ )
if repo_id:
print("Pushing to the hub..." )
model.push_to_hub(__magic_name__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
_snake_case = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 611
| 1
|
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""1.0.0a"""):
raise Exception("""requires fairseq >= 1.0.0a""")
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = """Hello world! cécé herlolip"""
def lowerCamelCase_ ( lowerCAmelCase: str , lowerCAmelCase: str , lowerCAmelCase: bool )-> str:
_snake_case : Tuple = FairseqRobertaModel.from_pretrained(lowerCAmelCase )
roberta.eval() # disable dropout
_snake_case : List[str] = roberta.model.encoder.sentence_encoder
_snake_case : Union[str, Any] = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
_snake_case : Optional[int] = roberta.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our RoBERTa config:' , lowerCAmelCase )
_snake_case : str = XLMRobertaXLForSequenceClassification(lowerCAmelCase ) if classification_head else XLMRobertaXLForMaskedLM(lowerCAmelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
_snake_case : List[Any] = roberta_sent_encoder.embed_tokens.weight
_snake_case : Optional[Any] = roberta_sent_encoder.embed_positions.weight
_snake_case : Union[str, Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
_snake_case : List[Any] = roberta_sent_encoder.layer_norm.weight
_snake_case : Optional[int] = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
_snake_case : BertLayer = model.roberta.encoder.layer[i]
_snake_case : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
_snake_case : RobertaAttention = layer.attention
_snake_case : Any = roberta_layer.self_attn_layer_norm.weight
_snake_case : int = roberta_layer.self_attn_layer_norm.bias
# self attention
_snake_case : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
_snake_case : Tuple = roberta_layer.self_attn.q_proj.weight
_snake_case : List[str] = roberta_layer.self_attn.q_proj.bias
_snake_case : str = roberta_layer.self_attn.k_proj.weight
_snake_case : int = roberta_layer.self_attn.k_proj.bias
_snake_case : Dict = roberta_layer.self_attn.v_proj.weight
_snake_case : Tuple = roberta_layer.self_attn.v_proj.bias
# self-attention output
_snake_case : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
_snake_case : List[Any] = roberta_layer.self_attn.out_proj.weight
_snake_case : int = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
_snake_case : Dict = roberta_layer.final_layer_norm.weight
_snake_case : Optional[int] = roberta_layer.final_layer_norm.bias
# intermediate
_snake_case : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
_snake_case : Any = roberta_layer.fca.weight
_snake_case : Tuple = roberta_layer.fca.bias
# output
_snake_case : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
_snake_case : List[str] = roberta_layer.fca.weight
_snake_case : Optional[int] = roberta_layer.fca.bias
# end of layer
if classification_head:
_snake_case : Optional[Any] = roberta.model.classification_heads['mnli'].dense.weight
_snake_case : str = roberta.model.classification_heads['mnli'].dense.bias
_snake_case : str = roberta.model.classification_heads['mnli'].out_proj.weight
_snake_case : Optional[Any] = roberta.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
_snake_case : str = roberta.model.encoder.lm_head.dense.weight
_snake_case : str = roberta.model.encoder.lm_head.dense.bias
_snake_case : List[str] = roberta.model.encoder.lm_head.layer_norm.weight
_snake_case : int = roberta.model.encoder.lm_head.layer_norm.bias
_snake_case : Union[str, Any] = roberta.model.encoder.lm_head.weight
_snake_case : Optional[Any] = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
_snake_case : torch.Tensor = roberta.encode(lowerCAmelCase ).unsqueeze(0 ) # batch of size 1
_snake_case : Union[str, Any] = model(lowerCAmelCase )[0]
if classification_head:
_snake_case : Optional[int] = roberta.model.classification_heads['mnli'](roberta.extract_features(lowerCAmelCase ) )
else:
_snake_case : Optional[int] = roberta.model(lowerCAmelCase )[0]
print(our_output.shape , their_output.shape )
_snake_case : Tuple = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
_snake_case : List[Any] = torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
pathlib.Path(lowerCAmelCase ).mkdir(parents=lowerCAmelCase , exist_ok=lowerCAmelCase )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--roberta_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
lowerCAmelCase_ = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 411
|
from __future__ import annotations
from collections.abc import MutableSequence
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase : int , UpperCamelCase : MutableSequence[float] ):
'''simple docstring'''
if len(UpperCamelCase ) != degree + 1:
raise ValueError(
'The number of coefficients should be equal to the degree + 1.' )
_snake_case : list[float] = list(UpperCamelCase )
_snake_case : Dict = degree
def __add__( self : List[str] , UpperCamelCase : Polynomial ):
'''simple docstring'''
if self.degree > polynomial_a.degree:
_snake_case : int = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , UpperCamelCase )
else:
_snake_case : Union[str, Any] = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , UpperCamelCase )
def __sub__( self : Any , UpperCamelCase : Polynomial ):
'''simple docstring'''
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : Optional[int] ):
'''simple docstring'''
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : Tuple , UpperCamelCase : Polynomial ):
'''simple docstring'''
_snake_case : list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , UpperCamelCase )
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : int | float ):
'''simple docstring'''
_snake_case : int | float = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : Any ):
'''simple docstring'''
_snake_case : Dict = ''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(UpperCamelCase )
return polynomial
def __repr__( self : Tuple ):
'''simple docstring'''
return self.__str__()
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : list[float] = [0] * self.degree
for i in range(self.degree ):
_snake_case : List[str] = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , UpperCamelCase )
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : int | float = 0 ):
'''simple docstring'''
_snake_case : list[float] = [0] * (self.degree + 2)
_snake_case : Optional[int] = constant
for i in range(self.degree + 1 ):
_snake_case : str = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , UpperCamelCase )
def __eq__( self : str , UpperCamelCase : object ):
'''simple docstring'''
if not isinstance(UpperCamelCase , UpperCamelCase ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : Optional[int] , UpperCamelCase : object ):
'''simple docstring'''
return not self.__eq__(UpperCamelCase )
| 411
| 1
|
import math
def _lowerCAmelCase ( __lowerCAmelCase ) -> bool:
"""simple docstring"""
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
snake_case__ : Tuple = range(3 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase=1 , **__lowerCAmelCase ) -> Dict:
"""simple docstring"""
snake_case__ : List[Any] = factor * value
snake_case__ : int = value
while not is_prime(__lowerCAmelCase ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **__lowerCAmelCase )
return value
| 701
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class a ( __lowerCamelCase ):
__lowerCAmelCase : List[str] = """Speech2TextFeatureExtractor"""
__lowerCAmelCase : List[str] = """Speech2TextTokenizer"""
def __init__( self :List[str] ,__lowercase :Union[str, Any] ,__lowercase :Any ):
super().__init__(__lowercase ,__lowercase )
snake_case__ : Any = self.feature_extractor
snake_case__ : Union[str, Any] = False
def __call__( self :Dict ,*__lowercase :Dict ,**__lowercase :Tuple ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__lowercase ,**__lowercase )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
snake_case__ : List[Any] = kwargs.pop('''raw_speech''' )
else:
snake_case__ : Optional[Any] = kwargs.pop('''audio''' ,__lowercase )
snake_case__ : Tuple = kwargs.pop('''sampling_rate''' ,__lowercase )
snake_case__ : Dict = kwargs.pop('''text''' ,__lowercase )
if len(__lowercase ) > 0:
snake_case__ : List[Any] = args[0]
snake_case__ : Dict = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
snake_case__ : Tuple = self.feature_extractor(__lowercase ,*__lowercase ,sampling_rate=__lowercase ,**__lowercase )
if text is not None:
snake_case__ : List[Any] = self.tokenizer(__lowercase ,**__lowercase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
snake_case__ : int = encodings['''input_ids''']
return inputs
def __lowerCamelCase ( self :List[Any] ,*__lowercase :int ,**__lowercase :List[str] ):
return self.tokenizer.batch_decode(*__lowercase ,**__lowercase )
def __lowerCamelCase ( self :List[Any] ,*__lowercase :Optional[Any] ,**__lowercase :str ):
return self.tokenizer.decode(*__lowercase ,**__lowercase )
@contextmanager
def __lowerCamelCase ( self :int ):
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
snake_case__ : Dict = True
snake_case__ : Dict = self.tokenizer
yield
snake_case__ : int = self.feature_extractor
snake_case__ : List[str] = False
| 219
| 0
|
import mpmath # for roots of unity
import numpy as np
class snake_case__ :
'''simple docstring'''
def __init__( self , a__=None , a__=None ) -> List[Any]:
'''simple docstring'''
__snake_case :str = list(poly_a or [0] )[:]
__snake_case :Tuple = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
__snake_case :List[str] = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
__snake_case :int = len(self.polyB )
# Add 0 to make lengths equal a power of 2
__snake_case :str = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
__snake_case :int = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
__snake_case :Optional[int] = self.__multiply()
def __lowercase ( self , a__ ) -> str:
'''simple docstring'''
__snake_case :Optional[Any] = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB]
# Corner case
if len(a__ ) <= 1:
return dft[0]
#
__snake_case :Union[str, Any] = self.c_max_length // 2
while next_ncol > 0:
__snake_case :List[Any] = [[] for i in range(a__ )]
__snake_case :int = self.root**next_ncol
# First half of next step
__snake_case :List[Any] = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(a__ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
__snake_case :Optional[int] = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(a__ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
__snake_case :Union[str, Any] = new_dft
__snake_case :Dict = next_ncol // 2
return dft[0]
def __lowercase ( self ) -> Tuple:
'''simple docstring'''
__snake_case :Optional[Any] = self.__dft("""A""" )
__snake_case :List[Any] = self.__dft("""B""" )
__snake_case :Any = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
__snake_case :List[str] = 2
while next_ncol <= self.c_max_length:
__snake_case :int = [[] for i in range(a__ )]
__snake_case :Optional[int] = self.root ** (next_ncol // 2)
__snake_case :List[str] = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
__snake_case :List[str] = new_inverse_c
next_ncol *= 2
# Unpack
__snake_case :Optional[int] = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self ) -> List[str]:
'''simple docstring'''
__snake_case :List[Any] = """A = """ + """ + """.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
__snake_case :Tuple = """B = """ + """ + """.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
__snake_case :Tuple = """A*B = """ + """ + """.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return F'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 455
|
def UpperCamelCase ( snake_case__ : float ,snake_case__ : int ):
'''simple docstring'''
if digit_amount > 0:
return round(number - int(snake_case__ ) ,snake_case__ )
return number - int(snake_case__ )
if __name__ == "__main__":
print(decimal_isolate(1.5_3, 0))
print(decimal_isolate(3_5.3_4_5, 1))
print(decimal_isolate(3_5.3_4_5, 2))
print(decimal_isolate(3_5.3_4_5, 3))
print(decimal_isolate(-1_4.7_8_9, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-1_4.1_2_3, 1))
print(decimal_isolate(-1_4.1_2_3, 2))
print(decimal_isolate(-1_4.1_2_3, 3))
| 455
| 1
|
"""simple docstring"""
def _snake_case ( UpperCamelCase : int = 50 ):
UpperCAmelCase : Optional[Any] = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 717
|
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
A: Union[str, Any] = 2_0_0
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
A: int = 5_0
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
A: Tuple = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_0_0_0))
def _snake_case ( UpperCamelCase : str , UpperCamelCase : str ):
UpperCAmelCase : Tuple = len([g for position, g in enumerate(UpperCamelCase ) if g == main_target[position]] )
return (item, float(UpperCamelCase ))
def _snake_case ( UpperCamelCase : str , UpperCamelCase : str ):
UpperCAmelCase : List[str] = random.randint(0 , len(UpperCamelCase ) - 1 )
UpperCAmelCase : List[str] = parent_a[:random_slice] + parent_a[random_slice:]
UpperCAmelCase : List[str] = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _snake_case ( UpperCamelCase : str , UpperCamelCase : list[str] ):
UpperCAmelCase : str = list(UpperCamelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
UpperCAmelCase : int = random.choice(UpperCamelCase )
return "".join(UpperCamelCase )
def _snake_case ( UpperCamelCase : tuple[str, float] , UpperCamelCase : list[tuple[str, float]] , UpperCamelCase : list[str] , ):
UpperCAmelCase : Optional[Any] = []
# Generate more children proportionally to the fitness score.
UpperCAmelCase : Optional[Any] = int(parent_a[1] * 100 ) + 1
UpperCAmelCase : List[str] = 10 if child_n >= 10 else child_n
for _ in range(UpperCamelCase ):
UpperCAmelCase : List[str] = population_score[random.randint(0 , UpperCamelCase )][0]
UpperCAmelCase , UpperCAmelCase : Any = crossover(parent_a[0] , UpperCamelCase )
# Append new string to the population list.
pop.append(mutate(UpperCamelCase , UpperCamelCase ) )
pop.append(mutate(UpperCamelCase , UpperCamelCase ) )
return pop
def _snake_case ( UpperCamelCase : str , UpperCamelCase : list[str] , UpperCamelCase : bool = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
UpperCAmelCase : Dict = F"{N_POPULATION} must be bigger than {N_SELECTED}"
raise ValueError(UpperCamelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
UpperCAmelCase : str = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
UpperCAmelCase : Optional[Any] = F"{not_in_genes_list} is not in genes list, evolution cannot converge"
raise ValueError(UpperCamelCase )
# Generate random starting population.
UpperCAmelCase : Optional[int] = []
for _ in range(UpperCamelCase ):
population.append("""""".join([random.choice(UpperCamelCase ) for i in range(len(UpperCamelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
UpperCAmelCase , UpperCAmelCase : Any = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(UpperCamelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
UpperCAmelCase : str = [evaluate(UpperCamelCase , UpperCamelCase ) for item in population]
# Check if there is a matching evolution.
UpperCAmelCase : Union[str, Any] = sorted(UpperCamelCase , key=lambda UpperCamelCase : x[1] , reverse=UpperCamelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F"\nGeneration: {generation}"
F"\nTotal Population:{total_population}"
F"\nBest score: {population_score[0][1]}"
F"\nBest string: {population_score[0][0]}" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
UpperCAmelCase : Tuple = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(UpperCamelCase )
# Normalize population score to be between 0 and 1.
UpperCAmelCase : List[str] = [
(item, score / len(UpperCamelCase )) for item, score in population_score
]
# This is selection
for i in range(UpperCamelCase ):
population.extend(select(population_score[int(UpperCamelCase )] , UpperCamelCase , UpperCamelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(UpperCamelCase ) > N_POPULATION:
break
if __name__ == "__main__":
A: Union[str, Any] = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
A: Dict = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
A , A , A: List[Any] = basic(target_str, genes_list)
print(
f"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 359
| 0
|
import math
def _lowercase ( lowercase__ , lowercase__ ):
__lowerCAmelCase : Any = len(lowercase__ )
__lowerCAmelCase : Any = int(math.floor(math.sqrt(lowercase__ ) ) )
__lowerCAmelCase : List[Any] = 0
while arr[min(lowercase__ , lowercase__ ) - 1] < x:
__lowerCAmelCase : List[Any] = step
step += int(math.floor(math.sqrt(lowercase__ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
__lowerCAmelCase : List[str] = prev + 1
if prev == min(lowercase__ , lowercase__ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
_UpperCamelCase = input("Enter numbers separated by a comma:\n").strip()
_UpperCamelCase = [int(item) for item in user_input.split(",")]
_UpperCamelCase = int(input("Enter the number to be searched:\n"))
_UpperCamelCase = jump_search(arr, x)
if res == -1:
print("Number not found!")
else:
print(F"Number {x} is at index {res}")
| 492
|
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowercase :
def __init__( self , A_ , A_=13 , A_=[30, 30] , A_=2 , A_=3 , A_=True , A_=True , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_=3 , A_=None , A_=8 , A_=10 , ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = parent
__lowerCAmelCase : List[Any] = batch_size
__lowerCAmelCase : List[Any] = image_size
__lowerCAmelCase : Tuple = patch_size
__lowerCAmelCase : int = num_channels
__lowerCAmelCase : Tuple = is_training
__lowerCAmelCase : Optional[Any] = use_labels
__lowerCAmelCase : str = hidden_size
__lowerCAmelCase : Dict = num_hidden_layers
__lowerCAmelCase : Optional[Any] = num_attention_heads
__lowerCAmelCase : Optional[int] = intermediate_size
__lowerCAmelCase : int = hidden_act
__lowerCAmelCase : Dict = hidden_dropout_prob
__lowerCAmelCase : Any = attention_probs_dropout_prob
__lowerCAmelCase : Optional[Any] = type_sequence_label_size
__lowerCAmelCase : Optional[Any] = initializer_range
__lowerCAmelCase : List[Any] = num_labels
__lowerCAmelCase : str = scope
__lowerCAmelCase : Union[str, Any] = n_targets
__lowerCAmelCase : Tuple = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
__lowerCAmelCase : Union[str, Any] = (image_size[1] // patch_size) * (image_size[0] // patch_size)
__lowerCAmelCase : List[Any] = num_patches + 1 + self.num_detection_tokens
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
__lowerCAmelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
__lowerCAmelCase : str = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
__lowerCAmelCase : Union[str, Any] = []
for i in range(self.batch_size ):
__lowerCAmelCase : int = {}
__lowerCAmelCase : Union[str, Any] = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=A_ )
__lowerCAmelCase : List[Any] = torch.rand(self.n_targets , 4 , device=A_ )
labels.append(A_ )
__lowerCAmelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def UpperCamelCase__ ( self , A_ , A_ , A_ ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : Dict = YolosModel(config=A_ )
model.to(A_ )
model.eval()
__lowerCAmelCase : Tuple = model(A_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def UpperCamelCase__ ( self , A_ , A_ , A_ ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : int = YolosForObjectDetection(A_ )
model.to(A_ )
model.eval()
__lowerCAmelCase : List[str] = model(pixel_values=A_ )
__lowerCAmelCase : List[Any] = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
__lowerCAmelCase : str = model(pixel_values=A_ , labels=A_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : str = self.prepare_config_and_inputs()
__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase : Optional[int] = config_and_inputs
__lowerCAmelCase : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowercase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
_UpperCamelCase = (
{"""feature-extraction""": YolosModel, """object-detection""": YolosForObjectDetection} if is_torch_available() else {}
)
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
def UpperCamelCase__ ( self , A_ , A_ , A_=False ) ->List[Any]:
'''simple docstring'''
__lowerCAmelCase : Any = super()._prepare_for_class(A_ , A_ , return_labels=A_ )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
__lowerCAmelCase : Union[str, Any] = []
for i in range(self.model_tester.batch_size ):
__lowerCAmelCase : List[str] = {}
__lowerCAmelCase : Optional[int] = torch.ones(
size=(self.model_tester.n_targets,) , device=A_ , dtype=torch.long )
__lowerCAmelCase : str = torch.ones(
self.model_tester.n_targets , 4 , device=A_ , dtype=torch.float )
labels.append(A_ )
__lowerCAmelCase : Union[str, Any] = labels
return inputs_dict
def UpperCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
__lowerCAmelCase : Tuple = YolosModelTester(self )
__lowerCAmelCase : Union[str, Any] = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
pass
def UpperCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase, __lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : List[str] = model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCAmelCase : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , nn.Linear ) )
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase, __lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : Optional[int] = model_class(A_ )
__lowerCAmelCase : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase : Dict = [*signature.parameters.keys()]
__lowerCAmelCase : Optional[int] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A_ )
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
__lowerCAmelCase, __lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : List[Any] = True
# in YOLOS, the seq_len is different
__lowerCAmelCase : Dict = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
__lowerCAmelCase : str = True
__lowerCAmelCase : List[str] = False
__lowerCAmelCase : List[str] = True
__lowerCAmelCase : Dict = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase : str = model(**self._prepare_for_class(A_ , A_ ) )
__lowerCAmelCase : Dict = outputs.attentions
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowerCAmelCase : str = True
__lowerCAmelCase : Tuple = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase : Union[str, Any] = model(**self._prepare_for_class(A_ , A_ ) )
__lowerCAmelCase : Any = outputs.attentions
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
__lowerCAmelCase : List[str] = len(A_ )
# Check attention is always last and order is fine
__lowerCAmelCase : str = True
__lowerCAmelCase : Tuple = True
__lowerCAmelCase : Dict = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase : str = model(**self._prepare_for_class(A_ , A_ ) )
__lowerCAmelCase : Optional[int] = 1
self.assertEqual(out_len + added_hidden_states , len(A_ ) )
__lowerCAmelCase : Optional[int] = outputs.attentions
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
def check_hidden_states_output(A_ , A_ , A_ ):
__lowerCAmelCase : Optional[Any] = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase : Optional[int] = model(**self._prepare_for_class(A_ , A_ ) )
__lowerCAmelCase : str = outputs.hidden_states
__lowerCAmelCase : List[Any] = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(A_ ) , A_ )
# YOLOS has a different seq_length
__lowerCAmelCase : List[str] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__lowerCAmelCase, __lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : Tuple = True
check_hidden_states_output(A_ , A_ , A_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase : Dict = True
check_hidden_states_output(A_ , A_ , A_ )
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
__lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*A_ )
@slow
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Union[str, Any] = YolosModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _lowercase ( ):
__lowerCAmelCase : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __lowercase (unittest.TestCase ):
@cached_property
def UpperCamelCase__ ( self ) ->str:
'''simple docstring'''
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(A_ )
__lowerCAmelCase : Optional[Any] = self.default_image_processor
__lowerCAmelCase : Optional[Any] = prepare_img()
__lowerCAmelCase : Optional[Any] = image_processor(images=A_ , return_tensors='''pt''' ).to(A_ )
# forward pass
with torch.no_grad():
__lowerCAmelCase : str = model(inputs.pixel_values )
# verify outputs
__lowerCAmelCase : Optional[Any] = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , A_ )
__lowerCAmelCase : Tuple = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] , device=A_ , )
__lowerCAmelCase : Optional[int] = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] , device=A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , A_ , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , A_ , atol=1e-4 ) )
# verify postprocessing
__lowerCAmelCase : Optional[Any] = image_processor.post_process_object_detection(
A_ , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
__lowerCAmelCase : Tuple = torch.tensor([0.9_994, 0.9_790, 0.9_964, 0.9_972, 0.9_861] ).to(A_ )
__lowerCAmelCase : Any = [75, 75, 17, 63, 17]
__lowerCAmelCase : Tuple = torch.tensor([335.0_609, 79.3_848, 375.4_216, 187.2_495] ).to(A_ )
self.assertEqual(len(results['''scores'''] ) , 5 )
self.assertTrue(torch.allclose(results['''scores'''] , A_ , atol=1e-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist() , A_ )
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , A_ ) )
| 492
| 1
|
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def lowerCamelCase_ ( lowerCAmelCase__ : Dict="" ) -> str:
'''simple docstring'''
A = tempfile.mkdtemp()
return os.path.join(lowerCAmelCase__ , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[Any] ) -> Any:
A = torch.rand(12 , dtype=torch.floataa ) - 0.5
A = AgentAudio(__UpperCamelCase )
A = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(__UpperCamelCase , agent_type.to_raw() , atol=1e-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(__UpperCamelCase ) )
# Ensure that the file contains the same value as the original tensor
A , A = sf.read(__UpperCamelCase )
self.assertTrue(torch.allclose(__UpperCamelCase , torch.tensor(__UpperCamelCase ) , atol=1e-4 ) )
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
A = torch.rand(12 , dtype=torch.floataa ) - 0.5
A = get_new_path(suffix='.wav' )
sf.write(__UpperCamelCase , __UpperCamelCase , 16_000 )
A = AgentAudio(__UpperCamelCase )
self.assertTrue(torch.allclose(__UpperCamelCase , agent_type.to_raw() , atol=1e-4 ) )
self.assertEqual(agent_type.to_string() , __UpperCamelCase )
@require_vision
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Dict ) -> Optional[Any]:
A = torch.randint(0 , 256 , (64, 64, 3) )
A = AgentImage(__UpperCamelCase )
A = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(__UpperCamelCase , agent_type._tensor , atol=1e-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(__UpperCamelCase ) )
def __UpperCamelCase ( self : Any ) -> Any:
A = Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png'
A = Image.open(__UpperCamelCase )
A = AgentImage(__UpperCamelCase )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(__UpperCamelCase ) )
def __UpperCamelCase ( self : List[Any] ) -> int:
A = Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png'
A = Image.open(__UpperCamelCase )
A = AgentImage(__UpperCamelCase )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(__UpperCamelCase ) )
class lowerCAmelCase__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
A = 'Hey!'
A = AgentText(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , agent_type.to_string() )
self.assertEqual(__UpperCamelCase , agent_type.to_raw() )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
| 224
|
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__snake_case :Union[str, Any] =logging.get_logger(__name__) # pylint: disable=invalid-name
def lowerCamelCase_ ( lowerCAmelCase__ : Union[List, PIL.Image.Image, torch.Tensor] ) -> Union[str, Any]:
'''simple docstring'''
warnings.warn(
'The preprocess method is deprecated and will be removed in a future version. Please'
' use VaeImageProcessor.preprocess instead' , lowerCAmelCase__ , )
if isinstance(lowerCAmelCase__ , torch.Tensor ):
return image
elif isinstance(lowerCAmelCase__ , PIL.Image.Image ):
A = [image]
if isinstance(image[0] , PIL.Image.Image ):
A , A = image[0].size
A , A = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
A = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
A = np.concatenate(lowerCAmelCase__ , axis=0 )
A = np.array(lowerCAmelCase__ ).astype(np.floataa ) / 255.0
A = image.transpose(0 , 3 , 1 , 2 )
A = 2.0 * image - 1.0
A = torch.from_numpy(lowerCAmelCase__ )
elif isinstance(image[0] , torch.Tensor ):
A = torch.cat(lowerCAmelCase__ , dim=0 )
return image
def lowerCamelCase_ ( lowerCAmelCase__ : Union[List, PIL.Image.Image, torch.Tensor] ) -> Union[str, Any]:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , torch.Tensor ):
return mask
elif isinstance(lowerCAmelCase__ , PIL.Image.Image ):
A = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
A , A = mask[0].size
A , A = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
A = [np.array(m.convert('L' ).resize((w, h) , resample=PIL_INTERPOLATION['nearest'] ) )[None, :] for m in mask]
A = np.concatenate(lowerCAmelCase__ , axis=0 )
A = mask.astype(np.floataa ) / 255.0
A = 0
A = 1
A = torch.from_numpy(lowerCAmelCase__ )
elif isinstance(mask[0] , torch.Tensor ):
A = torch.cat(lowerCAmelCase__ , dim=0 )
return mask
class lowerCAmelCase__ ( _lowerCamelCase ):
A_ : UNetaDModel
A_ : RePaintScheduler
def __init__( self : int , __UpperCamelCase : List[str] , __UpperCamelCase : str ) -> int:
super().__init__()
self.register_modules(unet=__UpperCamelCase , scheduler=__UpperCamelCase )
@torch.no_grad()
def __call__( self : str , __UpperCamelCase : Union[torch.Tensor, PIL.Image.Image] , __UpperCamelCase : Union[torch.Tensor, PIL.Image.Image] , __UpperCamelCase : int = 250 , __UpperCamelCase : float = 0.0 , __UpperCamelCase : int = 10 , __UpperCamelCase : int = 10 , __UpperCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCamelCase : Optional[str] = "pil" , __UpperCamelCase : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
A = image
A = _preprocess_image(__UpperCamelCase )
A = original_image.to(device=self.device , dtype=self.unet.dtype )
A = _preprocess_mask(__UpperCamelCase )
A = mask_image.to(device=self.device , dtype=self.unet.dtype )
A = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(__UpperCamelCase , __UpperCamelCase ) and len(__UpperCamelCase ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(__UpperCamelCase )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
A = original_image.shape
A = randn_tensor(__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , self.device )
A = eta
A = self.scheduler.timesteps[0] + 1
A = generator[0] if isinstance(__UpperCamelCase , __UpperCamelCase ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
A = self.unet(__UpperCamelCase , __UpperCamelCase ).sample
# compute previous image: x_t -> x_t-1
A = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
A = self.scheduler.undo_step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A = t
A = (image / 2 + 0.5).clamp(0 , 1 )
A = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCamelCase )
| 224
| 1
|
from math import sqrt
def _A ( SCREAMING_SNAKE_CASE__ : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _A ( SCREAMING_SNAKE_CASE__ : int = 10001 ):
UpperCamelCase :List[str] = 0
UpperCamelCase :int = 1
while count != nth and number < 3:
number += 1
if is_prime(SCREAMING_SNAKE_CASE__ ):
count += 1
while count != nth:
number += 2
if is_prime(SCREAMING_SNAKE_CASE__ ):
count += 1
return number
if __name__ == "__main__":
print(f'''{solution() = }''')
| 658
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""microsoft/focalnet-tiny""": """https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json""",
}
class UpperCAmelCase_ ( lowercase, lowercase ):
"""simple docstring"""
UpperCamelCase_ : int ='focalnet'
def __init__( self , SCREAMING_SNAKE_CASE_=224 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=96 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=[192, 384, 768, 768] , SCREAMING_SNAKE_CASE_=[2, 2, 6, 2] , SCREAMING_SNAKE_CASE_=[2, 2, 2, 2] , SCREAMING_SNAKE_CASE_=[3, 3, 3, 3] , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=4.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=1e-4 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1e-5 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> Dict:
super().__init__(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = image_size
UpperCamelCase :Dict = patch_size
UpperCamelCase :Tuple = num_channels
UpperCamelCase :int = embed_dim
UpperCamelCase :Optional[Any] = use_conv_embed
UpperCamelCase :str = hidden_sizes
UpperCamelCase :str = depths
UpperCamelCase :Optional[int] = focal_levels
UpperCamelCase :Tuple = focal_windows
UpperCamelCase :Optional[int] = hidden_act
UpperCamelCase :Optional[int] = mlp_ratio
UpperCamelCase :Optional[Any] = hidden_dropout_prob
UpperCamelCase :int = drop_path_rate
UpperCamelCase :Dict = use_layerscale
UpperCamelCase :List[str] = layerscale_value
UpperCamelCase :Tuple = use_post_layernorm
UpperCamelCase :int = use_post_layernorm_in_modulation
UpperCamelCase :str = normalize_modulator
UpperCamelCase :Any = initializer_range
UpperCamelCase :Optional[Any] = layer_norm_eps
UpperCamelCase :Dict = encoder_stride
UpperCamelCase :int = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )]
UpperCamelCase , UpperCamelCase :int = get_aligned_output_features_output_indices(
out_features=SCREAMING_SNAKE_CASE_ , out_indices=SCREAMING_SNAKE_CASE_ , stage_names=self.stage_names )
| 658
| 1
|
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int ):
'''simple docstring'''
lowercase_ = str(__lowerCamelCase )
lowercase_ = [n]
for i in range(1 , len(__lowerCamelCase ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int ):
'''simple docstring'''
if len(str(__lowerCamelCase ) ) > 3:
if not is_prime(int(str(__lowerCamelCase )[-3:] ) ) or not is_prime(int(str(__lowerCamelCase )[:3] ) ):
return False
return True
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int = 11 ):
'''simple docstring'''
lowercase_ = []
lowercase_ = 13
while len(__lowerCamelCase ) != count:
if validate(__lowerCamelCase ):
lowercase_ = list_truncated_nums(__lowerCamelCase )
if all(is_prime(__lowerCamelCase ) for i in list_nums ):
list_truncated_primes.append(__lowerCamelCase )
num += 2
return list_truncated_primes
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(f"""{sum(compute_truncated_primes(1_1)) = }""")
| 601
|
from typing import Any
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase_ = data
lowercase_ = None
class __lowerCamelCase :
"""simple docstring"""
def __init__( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = None
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = self.head
while temp is not None:
print(temp.data , end=" " )
lowercase_ = temp.next
print()
def A__ ( self , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ = Node(UpperCAmelCase )
lowercase_ = self.head
lowercase_ = new_node
def A__ ( self , UpperCAmelCase , UpperCAmelCase ) -> str:
'''simple docstring'''
if node_data_a == node_data_a:
return
else:
lowercase_ = self.head
while node_a is not None and node_a.data != node_data_a:
lowercase_ = node_a.next
lowercase_ = self.head
while node_a is not None and node_a.data != node_data_a:
lowercase_ = node_a.next
if node_a is None or node_a is None:
return
lowercase_ , lowercase_ = node_a.data, node_a.data
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("""After swapping""")
ll.print_list()
| 601
| 1
|
'''simple docstring'''
import requests
lowercase_ = "YOUR API KEY"
def lowerCAmelCase (__A , __A = giphy_api_key):
"""simple docstring"""
_a = '''+'''.join(query.split())
_a = F'''https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'''
_a = requests.get(__A).json()['''data''']
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print("\n".join(get_gifs("space ship")))
| 11
|
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
SCREAMING_SNAKE_CASE_ : str = {
'''iou_prediction_head.layers.0''': '''iou_prediction_head.proj_in''',
'''iou_prediction_head.layers.1''': '''iou_prediction_head.layers.0''',
'''iou_prediction_head.layers.2''': '''iou_prediction_head.proj_out''',
'''mask_decoder.output_upscaling.0''': '''mask_decoder.upscale_conv1''',
'''mask_decoder.output_upscaling.1''': '''mask_decoder.upscale_layer_norm''',
'''mask_decoder.output_upscaling.3''': '''mask_decoder.upscale_conv2''',
'''mask_downscaling.0''': '''mask_embed.conv1''',
'''mask_downscaling.1''': '''mask_embed.layer_norm1''',
'''mask_downscaling.3''': '''mask_embed.conv2''',
'''mask_downscaling.4''': '''mask_embed.layer_norm2''',
'''mask_downscaling.6''': '''mask_embed.conv3''',
'''point_embeddings''': '''point_embed''',
'''pe_layer.positional_encoding_gaussian_matrix''': '''shared_embedding.positional_embedding''',
'''image_encoder''': '''vision_encoder''',
'''neck.0''': '''neck.conv1''',
'''neck.1''': '''neck.layer_norm1''',
'''neck.2''': '''neck.conv2''',
'''neck.3''': '''neck.layer_norm2''',
'''patch_embed.proj''': '''patch_embed.projection''',
'''.norm''': '''.layer_norm''',
'''blocks''': '''layers''',
}
def SCREAMING_SNAKE_CASE ( snake_case ) -> Tuple:
__lowercase = {}
state_dict.pop('pixel_mean' , snake_case )
state_dict.pop('pixel_std' , snake_case )
__lowercase = r'.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__lowercase = key.replace(snake_case , snake_case )
if re.match(snake_case , snake_case ):
__lowercase = int(re.match(snake_case , snake_case ).group(2 ) )
if layer_nb == 0:
__lowercase = key.replace('layers.0' , 'proj_in' )
elif layer_nb == 1:
__lowercase = key.replace('layers.1' , 'layers.0' )
elif layer_nb == 2:
__lowercase = key.replace('layers.2' , 'proj_out' )
__lowercase = value
__lowercase = model_state_dict[
'prompt_encoder.shared_embedding.positional_embedding'
]
return model_state_dict
def SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , snake_case="ybelkada/segment-anything" ) -> int:
__lowercase = hf_hub_download(snake_case , F"checkpoints/{model_name}.pth" )
if "sam_vit_b" in model_name:
__lowercase = SamConfig()
elif "sam_vit_l" in model_name:
__lowercase = SamVisionConfig(
hidden_size=1_024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
__lowercase = SamConfig(
vision_config=snake_case , )
elif "sam_vit_h" in model_name:
__lowercase = SamVisionConfig(
hidden_size=1_280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
__lowercase = SamConfig(
vision_config=snake_case , )
__lowercase = torch.load(snake_case , map_location='cpu' )
__lowercase = replace_keys(snake_case )
__lowercase = SamImageProcessor()
__lowercase = SamProcessor(image_processor=snake_case )
__lowercase = SamModel(snake_case )
hf_model.load_state_dict(snake_case )
__lowercase = hf_model.to('cuda' )
__lowercase = 'https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'
__lowercase = Image.open(requests.get(snake_case , stream=snake_case ).raw ).convert('RGB' )
__lowercase = [[[400, 650]]]
__lowercase = [[1]]
__lowercase = processor(images=np.array(snake_case ) , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
__lowercase = hf_model(**snake_case )
__lowercase = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_8902_5115_9668
__lowercase = processor(
images=np.array(snake_case ) , input_points=snake_case , input_labels=snake_case , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
__lowercase = hf_model(**snake_case )
__lowercase = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9712_6030_9219_3604
__lowercase = ((75, 275, 1_725, 850),)
__lowercase = processor(images=np.array(snake_case ) , input_boxes=snake_case , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
__lowercase = hf_model(**snake_case )
__lowercase = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8686_0156_0592_6514
# Test with 2 points and 1 image.
__lowercase = [[[400, 650], [800, 650]]]
__lowercase = [[1, 1]]
__lowercase = processor(
images=np.array(snake_case ) , input_points=snake_case , input_labels=snake_case , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
__lowercase = hf_model(**snake_case )
__lowercase = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9936_0477_9243_4692
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Optional[Any] = argparse.ArgumentParser()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ['''sam_vit_b_01ec64''', '''sam_vit_h_4b8939''', '''sam_vit_l_0b3195''']
parser.add_argument(
'''--model_name''',
default='''sam_vit_h_4b8939''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
parser.add_argument(
'''--model_hub_id''',
default='''ybelkada/segment-anything''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
SCREAMING_SNAKE_CASE_ : List[Any] = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 375
| 0
|
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
__UpperCAmelCase = pd.read_csv('sample_data.csv', header=None)
__UpperCAmelCase = df.shape[:1][0]
# If you're using some other dataset input the target column
__UpperCAmelCase = df.iloc[:, 1:2]
__UpperCAmelCase = actual_data.values.reshape(len_data, 1)
__UpperCAmelCase = MinMaxScaler().fit_transform(actual_data)
__UpperCAmelCase = 10
__UpperCAmelCase = 5
__UpperCAmelCase = 20
__UpperCAmelCase = len_data - periods * look_back
__UpperCAmelCase = actual_data[:division]
__UpperCAmelCase = actual_data[division - look_back :]
__UpperCAmelCase , __UpperCAmelCase = [], []
__UpperCAmelCase , __UpperCAmelCase = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
__UpperCAmelCase = np.array(train_x)
__UpperCAmelCase = np.array(test_x)
__UpperCAmelCase = np.array([list(i.ravel()) for i in train_y])
__UpperCAmelCase = np.array([list(i.ravel()) for i in test_y])
__UpperCAmelCase = Sequential()
model.add(LSTM(1_28, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(1_28, 1)))
model.add(Dense(forward_days))
model.compile(loss='mean_squared_error', optimizer='adam')
__UpperCAmelCase = model.fit(
x_train, y_train, epochs=1_50, verbose=1, shuffle=True, batch_size=4
)
__UpperCAmelCase = model.predict(x_test)
| 713
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Tuple = "bert-generation"
def __init__( self , __A=5_0358 , __A=1024 , __A=24 , __A=16 , __A=4096 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=0.0_2 , __A=1E-12 , __A=0 , __A=2 , __A=1 , __A="absolute" , __A=True , **__A , ) -> Union[str, Any]:
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
lowerCAmelCase_ :List[Any] = vocab_size
lowerCAmelCase_ :int = hidden_size
lowerCAmelCase_ :Union[str, Any] = num_hidden_layers
lowerCAmelCase_ :str = num_attention_heads
lowerCAmelCase_ :str = hidden_act
lowerCAmelCase_ :List[str] = intermediate_size
lowerCAmelCase_ :Optional[Any] = hidden_dropout_prob
lowerCAmelCase_ :List[str] = attention_probs_dropout_prob
lowerCAmelCase_ :Dict = max_position_embeddings
lowerCAmelCase_ :int = initializer_range
lowerCAmelCase_ :Optional[int] = layer_norm_eps
lowerCAmelCase_ :str = position_embedding_type
lowerCAmelCase_ :Dict = use_cache
| 256
| 0
|
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __lowerCAmelCase ( _A ):
"""simple docstring"""
_lowercase , _lowercase = analyze_text(lowerCAmelCase_ )
_lowercase = list(""" """ + ascii_lowercase )
# what is our total sum of probabilities.
_lowercase = sum(single_char_strings.values() )
# one length string
_lowercase = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
_lowercase = single_char_strings[ch]
_lowercase = my_str / all_sum
my_fir_sum += prob * math.loga(lowerCAmelCase_ ) # entropy formula.
# print entropy
print(f'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
_lowercase = sum(two_char_strings.values() )
_lowercase = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
_lowercase = cha + cha
if sequence in two_char_strings:
_lowercase = two_char_strings[sequence]
_lowercase = int(lowerCAmelCase_ ) / all_sum
my_sec_sum += prob * math.loga(lowerCAmelCase_ )
# print second entropy
print(f'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def __lowerCAmelCase ( _A ):
"""simple docstring"""
_lowercase = Counter() # type: ignore
_lowercase = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 ,len(lowerCAmelCase_ ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __lowerCAmelCase ( ):
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 398
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {
"""configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PegasusXForConditionalGeneration""",
"""PegasusXModel""",
"""PegasusXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 377
| 0
|
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
snake_case__ : Union[str, Any] = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('3.7'):
raise ImportWarning(
'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'
'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
snake_case__ : List[Any] = concatenate_datasets
snake_case__ : List[str] = DownloadConfig
snake_case__ : Union[str, Any] = DownloadManager
snake_case__ : str = DownloadMode
snake_case__ : Union[str, Any] = DownloadConfig
snake_case__ : List[str] = DownloadMode
snake_case__ : Dict = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 703
|
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def __lowerCamelCase ( A__ : int ) -> int:
lowerCamelCase_ : Union[str, Any] = prime_factors(A__ )
if is_square_free(A__ ):
return -1 if len(A__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 171
| 0
|
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
__lowerCAmelCase : Tuple = False
class a_ ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class a_ ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : int ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe.dual_guided(
prompt="""first prompt""" , image=snake_case__ , text_to_image_strength=0.75 , generator=snake_case__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(snake_case__ )
lowerCAmelCase__ = VersatileDiffusionPipeline.from_pretrained(snake_case__ , torch_dtype=torch.floataa )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase__ = generator.manual_seed(0 )
lowerCAmelCase__ = pipe.dual_guided(
prompt="""first prompt""" , image=snake_case__ , text_to_image_strength=0.75 , generator=snake_case__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def _SCREAMING_SNAKE_CASE ( self : str ):
lowerCAmelCase__ = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase__ = """cyberpunk 2077"""
lowerCAmelCase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe.dual_guided(
prompt=snake_case__ , image=snake_case__ , text_to_image_strength=0.75 , generator=snake_case__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
lowerCAmelCase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase__ = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
lowerCAmelCase__ = """A painting of a squirrel eating a burger """
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe.text_to_image(
prompt=snake_case__ , generator=snake_case__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
lowerCAmelCase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase__ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
lowerCAmelCase__ = pipe.image_variation(snake_case__ , generator=snake_case__ , output_type="""numpy""" ).images
lowerCAmelCase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase__ = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 644
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
class a_ :
def __init__( self : Union[str, Any] , snake_case__ : int = 6 ):
lowerCAmelCase__ = None
lowerCAmelCase__ = None
self.create_linked_list(snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : int ):
lowerCAmelCase__ = Node()
lowerCAmelCase__ = current_node
lowerCAmelCase__ = current_node
lowerCAmelCase__ = current_node
for _ in range(1 , snake_case__ ):
lowerCAmelCase__ = Node()
lowerCAmelCase__ = current_node
lowerCAmelCase__ = previous_node
lowerCAmelCase__ = current_node
lowerCAmelCase__ = self.front
lowerCAmelCase__ = previous_node
def _SCREAMING_SNAKE_CASE ( self : Any ):
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def _SCREAMING_SNAKE_CASE ( self : Any ):
self.check_can_perform_operation()
return self.front.data if self.front else None
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : Any ):
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
lowerCAmelCase__ = self.rear.next
if self.rear:
lowerCAmelCase__ = data
def _SCREAMING_SNAKE_CASE ( self : Any ):
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
lowerCAmelCase__ = self.front.data
lowerCAmelCase__ = None
return data
lowerCAmelCase__ = self.front
lowerCAmelCase__ = old_front.next
lowerCAmelCase__ = old_front.data
lowerCAmelCase__ = None
return data
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
if self.is_empty():
raise Exception("""Empty Queue""" )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""" )
class a_ :
def __init__( self : Union[str, Any] ):
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 644
| 1
|
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Any , __magic_name__ : List[str]="shi-labs/oneformer_demo" ) -> List[str]:
"""simple docstring"""
with open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="""dataset""" ) , """r""" ) as f:
UpperCamelCase :List[str] = json.load(__magic_name__ )
UpperCamelCase :int = {}
UpperCamelCase :Any = []
UpperCamelCase :Any = []
for key, info in class_info.items():
UpperCamelCase :Optional[int] = info["""name"""]
class_names.append(info["""name"""] )
if info["isthing"]:
thing_ids.append(int(__magic_name__ ) )
UpperCamelCase :List[Any] = thing_ids
UpperCamelCase :str = class_names
return metadata
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int]=7 , __lowerCamelCase : Optional[Any]=3 , __lowerCamelCase : Optional[Any]=30 , __lowerCamelCase : int=400 , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Any=True , __lowerCamelCase : str=[0.5, 0.5, 0.5] , __lowerCamelCase : Union[str, Any]=[0.5, 0.5, 0.5] , __lowerCamelCase : Union[str, Any]=10 , __lowerCamelCase : Tuple=False , __lowerCamelCase : int=255 , __lowerCamelCase : Optional[Any]="shi-labs/oneformer_demo" , __lowerCamelCase : str="ade20k_panoptic.json" , __lowerCamelCase : Dict=10 , ):
UpperCamelCase :Optional[Any] = parent
UpperCamelCase :Optional[int] = batch_size
UpperCamelCase :List[str] = num_channels
UpperCamelCase :List[Any] = min_resolution
UpperCamelCase :Optional[int] = max_resolution
UpperCamelCase :str = do_resize
UpperCamelCase :Optional[Any] = {"""shortest_edge""": 32, """longest_edge""": 1_333} if size is None else size
UpperCamelCase :Optional[Any] = do_normalize
UpperCamelCase :Dict = image_mean
UpperCamelCase :Any = image_std
UpperCamelCase :Optional[int] = class_info_file
UpperCamelCase :Optional[Any] = prepare_metadata(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Tuple = num_text
UpperCamelCase :Tuple = repo_path
# for the post_process_functions
UpperCamelCase :Union[str, Any] = 2
UpperCamelCase :Dict = 10
UpperCamelCase :Optional[Any] = 10
UpperCamelCase :int = 3
UpperCamelCase :Optional[Any] = 4
UpperCamelCase :List[Any] = num_labels
UpperCamelCase :Any = do_reduce_labels
UpperCamelCase :Dict = ignore_index
def _A ( self : int ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def _A ( self : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any]=False ):
if not batched:
UpperCamelCase :int = image_inputs[0]
if isinstance(__lowerCamelCase , Image.Image ):
UpperCamelCase , UpperCamelCase :int = image.size
else:
UpperCamelCase , UpperCamelCase :Any = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase :Optional[int] = int(self.size["""shortest_edge"""] * h / w )
UpperCamelCase :Union[str, Any] = self.size["""shortest_edge"""]
elif w > h:
UpperCamelCase :int = self.size["""shortest_edge"""]
UpperCamelCase :Union[str, Any] = int(self.size["""shortest_edge"""] * w / h )
else:
UpperCamelCase :Any = self.size["""shortest_edge"""]
UpperCamelCase :List[str] = self.size["""shortest_edge"""]
else:
UpperCamelCase :Optional[int] = []
for image in image_inputs:
UpperCamelCase , UpperCamelCase :List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase :Any = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[0] )[0]
UpperCamelCase :Optional[int] = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[1] )[1]
return expected_height, expected_width
def _A ( self : Optional[int] ):
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
snake_case__ : Dict = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
snake_case__ : str = image_processing_class
def _A ( self : Optional[Any] ):
UpperCamelCase :Optional[Any] = OneFormerImageProcessorTester(self )
@property
def _A ( self : Union[str, Any] ):
return self.image_processing_tester.prepare_image_processor_dict()
def _A ( self : List[Any] ):
UpperCamelCase :Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """image_std""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """size""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """ignore_index""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """class_info_file""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """num_text""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """repo_path""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """metadata""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """do_reduce_labels""" ) )
def _A ( self : List[Any] ):
pass
def _A ( self : Optional[int] ):
# Initialize image_processor
UpperCamelCase :Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase :str = prepare_image_inputs(self.image_processing_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
UpperCamelCase :Union[str, Any] = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
UpperCamelCase , UpperCamelCase :Optional[int] = self.image_processing_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase , UpperCamelCase :Dict = self.image_processing_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
UpperCamelCase :Union[str, Any] = image_processor(
__lowerCamelCase , ["""semantic"""] * len(__lowerCamelCase ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def _A ( self : Optional[int] ):
# Initialize image_processor
UpperCamelCase :Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase :List[Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
UpperCamelCase :Union[str, Any] = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
UpperCamelCase , UpperCamelCase :Union[str, Any] = self.image_processing_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase , UpperCamelCase :Any = self.image_processing_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
UpperCamelCase :Any = image_processor(
__lowerCamelCase , ["""semantic"""] * len(__lowerCamelCase ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def _A ( self : Tuple ):
# Initialize image_processor
UpperCamelCase :Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase :Optional[Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
UpperCamelCase :List[Any] = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
UpperCamelCase , UpperCamelCase :List[str] = self.image_processing_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase , UpperCamelCase :Optional[int] = self.image_processing_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
UpperCamelCase :List[Any] = image_processor(
__lowerCamelCase , ["""semantic"""] * len(__lowerCamelCase ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def _A ( self : int , __lowerCamelCase : List[Any]=False , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Dict="np" ):
UpperCamelCase :Tuple = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
UpperCamelCase :List[Any] = self.image_processing_tester.num_labels
UpperCamelCase :Optional[Any] = None
UpperCamelCase :Dict = None
UpperCamelCase :Dict = prepare_image_inputs(self.image_processing_tester , equal_resolution=__lowerCamelCase )
if with_segmentation_maps:
UpperCamelCase :int = num_labels
if is_instance_map:
UpperCamelCase :Optional[int] = list(range(__lowerCamelCase ) ) * 2
UpperCamelCase :Tuple = dict(enumerate(__lowerCamelCase ) )
UpperCamelCase :Dict = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
UpperCamelCase :str = [Image.fromarray(__lowerCamelCase ) for annotation in annotations]
UpperCamelCase :Optional[Any] = image_processor(
__lowerCamelCase , ["""semantic"""] * len(__lowerCamelCase ) , __lowerCamelCase , return_tensors="""pt""" , instance_id_to_semantic_id=__lowerCamelCase , pad_and_return_pixel_mask=__lowerCamelCase , )
return inputs
def _A ( self : str ):
pass
def _A ( self : Dict ):
def common(__lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Dict=None ):
UpperCamelCase :Dict = self.comm_get_image_processor_inputs(
with_segmentation_maps=__lowerCamelCase , is_instance_map=__lowerCamelCase , segmentation_type=__lowerCamelCase )
UpperCamelCase :List[Any] = inputs["""mask_labels"""]
UpperCamelCase :Optional[int] = inputs["""class_labels"""]
UpperCamelCase :str = inputs["""pixel_values"""]
UpperCamelCase :str = inputs["""text_inputs"""]
# check the batch_size
for mask_label, class_label, text_input in zip(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(__lowerCamelCase ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=__lowerCamelCase )
common(is_instance_map=__lowerCamelCase , segmentation_type="""pil""" )
common(is_instance_map=__lowerCamelCase , segmentation_type="""pil""" )
def _A ( self : List[str] ):
UpperCamelCase :int = np.zeros((20, 50) )
UpperCamelCase :Dict = 1
UpperCamelCase :List[Any] = 1
UpperCamelCase :Any = 1
UpperCamelCase :List[Any] = binary_mask_to_rle(__lowerCamelCase )
self.assertEqual(len(__lowerCamelCase ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def _A ( self : Union[str, Any] ):
UpperCamelCase :Union[str, Any] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
UpperCamelCase :Optional[int] = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCamelCase :Union[str, Any] = fature_extractor.post_process_semantic_segmentation(__lowerCamelCase )
self.assertEqual(len(__lowerCamelCase ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
UpperCamelCase :Any = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
UpperCamelCase :int = fature_extractor.post_process_semantic_segmentation(__lowerCamelCase , target_sizes=__lowerCamelCase )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def _A ( self : Optional[int] ):
UpperCamelCase :int = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
UpperCamelCase :int = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCamelCase :Any = image_processor.post_process_instance_segmentation(__lowerCamelCase , threshold=0 )
self.assertTrue(len(__lowerCamelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ) , __lowerCamelCase )
self.assertEqual(
el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def _A ( self : Dict ):
UpperCamelCase :str = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
UpperCamelCase :Union[str, Any] = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCamelCase :Optional[Any] = image_processor.post_process_panoptic_segmentation(__lowerCamelCase , threshold=0 )
self.assertTrue(len(__lowerCamelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ) , __lowerCamelCase )
self.assertEqual(
el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 590
|
def SCREAMING_SNAKE_CASE_ ( ) -> Tuple:
"""simple docstring"""
UpperCamelCase :List[str] = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
UpperCamelCase :Optional[Any] = 6
UpperCamelCase :Optional[int] = 1
UpperCamelCase :Union[str, Any] = 1901
UpperCamelCase :Any = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
UpperCamelCase :Dict = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
UpperCamelCase :Any = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
UpperCamelCase :List[Any] = day - days_per_month[month - 2]
if month > 12:
year += 1
UpperCamelCase :Dict = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 590
| 1
|
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _A:
"""simple docstring"""
def __init__( self , _A , _A=13 , _A=[30, 30] , _A=2 , _A=3 , _A=True , _A=True , _A=32 , _A=5 , _A=4 , _A=37 , _A="gelu" , _A=0.1 , _A=0.1 , _A=10 , _A=0.0_2 , _A=3 , _A=None , _A=8 , _A=10 , ):
__A : Optional[int] = parent
__A : int = batch_size
__A : int = image_size
__A : Union[str, Any] = patch_size
__A : Dict = num_channels
__A : Tuple = is_training
__A : int = use_labels
__A : Union[str, Any] = hidden_size
__A : Optional[int] = num_hidden_layers
__A : Optional[Any] = num_attention_heads
__A : int = intermediate_size
__A : List[str] = hidden_act
__A : int = hidden_dropout_prob
__A : Any = attention_probs_dropout_prob
__A : Union[str, Any] = type_sequence_label_size
__A : Dict = initializer_range
__A : str = num_labels
__A : List[str] = scope
__A : Tuple = n_targets
__A : Dict = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
__A : int = (image_size[1] // patch_size) * (image_size[0] // patch_size)
__A : Optional[int] = num_patches + 1 + self.num_detection_tokens
def UpperCAmelCase_ ( self ):
__A : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
__A : Union[str, Any] = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
__A : Dict = []
for i in range(self.batch_size ):
__A : Optional[int] = {}
__A : Any = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=_A )
__A : List[str] = torch.rand(self.n_targets , 4 , device=_A )
labels.append(_A )
__A : List[str] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self ):
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def UpperCAmelCase_ ( self , _A , _A , _A ):
__A : Union[str, Any] = YolosModel(config=_A )
model.to(_A )
model.eval()
__A : str = model(_A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def UpperCAmelCase_ ( self , _A , _A , _A ):
__A : Dict = YolosForObjectDetection(_A )
model.to(_A )
model.eval()
__A : int = model(pixel_values=_A )
__A : List[str] = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
__A : List[Any] = model(pixel_values=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def UpperCAmelCase_ ( self ):
__A : List[Any] = self.prepare_config_and_inputs()
__A , __A , __A : List[Any] = config_and_inputs
__A : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _A( snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Optional[int] = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
UpperCamelCase : List[Any] = (
{'''feature-extraction''': YolosModel, '''object-detection''': YolosForObjectDetection} if is_torch_available() else {}
)
UpperCamelCase : Tuple = False
UpperCamelCase : Any = False
UpperCamelCase : Union[str, Any] = False
UpperCamelCase : Optional[Any] = False
def UpperCAmelCase_ ( self , _A , _A , _A=False ):
__A : Optional[int] = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
__A : int = []
for i in range(self.model_tester.batch_size ):
__A : List[str] = {}
__A : Union[str, Any] = torch.ones(
size=(self.model_tester.n_targets,) , device=_A , dtype=torch.long )
__A : List[Any] = torch.ones(
self.model_tester.n_targets , 4 , device=_A , dtype=torch.float )
labels.append(_A )
__A : Union[str, Any] = labels
return inputs_dict
def UpperCAmelCase_ ( self ):
__A : List[Any] = YolosModelTester(self )
__A : Optional[Any] = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def UpperCAmelCase_ ( self ):
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ):
# YOLOS does not use inputs_embeds
pass
def UpperCAmelCase_ ( self ):
__A , __A : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : str = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__A : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , nn.Linear ) )
def UpperCAmelCase_ ( self ):
__A , __A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : Any = model_class(_A )
__A : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A : Optional[Any] = [*signature.parameters.keys()]
__A : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , _A )
def UpperCAmelCase_ ( self ):
__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCAmelCase_ ( self ):
__A , __A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__A : Any = True
# in YOLOS, the seq_len is different
__A : List[Any] = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
__A : List[str] = True
__A : Optional[Any] = False
__A : Optional[Any] = True
__A : Dict = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
__A : Optional[int] = model(**self._prepare_for_class(_A , _A ) )
__A : Tuple = outputs.attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__A : Optional[int] = True
__A : str = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
__A : int = model(**self._prepare_for_class(_A , _A ) )
__A : Dict = outputs.attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
__A : List[Any] = len(_A )
# Check attention is always last and order is fine
__A : Optional[int] = True
__A : Dict = True
__A : List[str] = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
__A : Optional[int] = model(**self._prepare_for_class(_A , _A ) )
__A : Dict = 1
self.assertEqual(out_len + added_hidden_states , len(_A ) )
__A : str = outputs.attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def UpperCAmelCase_ ( self ):
def check_hidden_states_output(_A , _A , _A ):
__A : List[Any] = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
__A : List[Any] = model(**self._prepare_for_class(_A , _A ) )
__A : Optional[Any] = outputs.hidden_states
__A : int = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_A ) , _A )
# YOLOS has a different seq_length
__A : Optional[Any] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__A , __A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : Optional[Any] = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A : int = True
check_hidden_states_output(_A , _A , _A )
def UpperCAmelCase_ ( self ):
__A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*_A )
@slow
def UpperCAmelCase_ ( self ):
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A : Dict = YolosModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def _SCREAMING_SNAKE_CASE ( ) -> str:
__A : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _A( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase_ ( self ):
return AutoImageProcessor.from_pretrained('hustvl/yolos-small' ) if is_vision_available() else None
@slow
def UpperCAmelCase_ ( self ):
__A : int = YolosForObjectDetection.from_pretrained('hustvl/yolos-small' ).to(_A )
__A : int = self.default_image_processor
__A : List[str] = prepare_img()
__A : Optional[Any] = image_processor(images=_A , return_tensors='pt' ).to(_A )
# forward pass
with torch.no_grad():
__A : Union[str, Any] = model(inputs.pixel_values )
# verify outputs
__A : Union[str, Any] = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , _A )
__A : str = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] , device=_A , )
__A : List[Any] = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] , device=_A )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _A , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , _A , atol=1e-4 ) )
# verify postprocessing
__A : Tuple = image_processor.post_process_object_detection(
_A , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
__A : int = torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1] ).to(_A )
__A : int = [75, 75, 17, 63, 17]
__A : Tuple = torch.tensor([3_3_5.0_6_0_9, 7_9.3_8_4_8, 3_7_5.4_2_1_6, 1_8_7.2_4_9_5] ).to(_A )
self.assertEqual(len(results['scores'] ) , 5 )
self.assertTrue(torch.allclose(results['scores'] , _A , atol=1e-4 ) )
self.assertSequenceEqual(results['labels'].tolist() , _A )
self.assertTrue(torch.allclose(results['boxes'][0, :] , _A ) )
| 239
|
def _SCREAMING_SNAKE_CASE ( a ) -> list:
if len(a ) <= 1:
return lst
__A : Any = 1
while i < len(a ):
if lst[i - 1] <= lst[i]:
i += 1
else:
__A , __A : str = lst[i], lst[i - 1]
i -= 1
if i == 0:
__A : Optional[int] = 1
return lst
if __name__ == "__main__":
UpperCAmelCase : Tuple = input('''Enter numbers separated by a comma:\n''').strip()
UpperCAmelCase : Optional[int] = [int(item) for item in user_input.split(''',''')]
print(gnome_sort(unsorted))
| 239
| 1
|
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
__A : Optional[Any] = get_tests_dir("""fixtures""")
class lowercase ( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = mock.Mock()
lowerCamelCase__ = 500
lowerCamelCase__ = {}
lowerCamelCase__ = HTTPError
lowerCamelCase__ = {}
# Download this model to make sure it's in the cache.
lowerCamelCase__ = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=__lowerCamelCase ) as mock_head:
lowerCamelCase__ = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" )
# This check we did call the fake head request
mock_head.assert_called()
def a__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
lowerCamelCase__ = ViTImageProcessor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json" )
def a__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
with self.assertRaises(__lowerCamelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase__ = AutoImageProcessor.from_pretrained("hf-internal-testing/stable-diffusion-all-variants" )
lowerCamelCase__ = AutoImageProcessor.from_pretrained(
"hf-internal-testing/stable-diffusion-all-variants" , subfolder="feature_extractor" )
self.assertIsNotNone(__lowerCamelCase )
@is_staging_test
class lowercase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def a__ ( cls : List[str] ) -> Any:
'''simple docstring'''
lowerCamelCase__ = TOKEN
HfFolder.save_token(__lowerCamelCase )
@classmethod
def a__ ( cls : Optional[Any] ) -> Dict:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-image-processor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-image-processor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-image-processor" )
except HTTPError:
pass
def a__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = ViTImageProcessor.from_pretrained(__lowerCamelCase )
image_processor.push_to_hub("test-image-processor" , use_auth_token=self._token )
lowerCamelCase__ = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-image-processor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__lowerCamelCase , repo_id="test-image-processor" , push_to_hub=__lowerCamelCase , use_auth_token=self._token )
lowerCamelCase__ = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) )
def a__ ( self : int ) -> List[str]:
'''simple docstring'''
lowerCamelCase__ = ViTImageProcessor.from_pretrained(__lowerCamelCase )
image_processor.push_to_hub("valid_org/test-image-processor" , use_auth_token=self._token )
lowerCamelCase__ = ViTImageProcessor.from_pretrained("valid_org/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-image-processor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__lowerCamelCase , repo_id="valid_org/test-image-processor-org" , push_to_hub=__lowerCamelCase , use_auth_token=self._token )
lowerCamelCase__ = ViTImageProcessor.from_pretrained("valid_org/test-image-processor-org" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) )
def a__ ( self : Dict ) -> str:
'''simple docstring'''
CustomImageProcessor.register_for_auto_class()
lowerCamelCase__ = CustomImageProcessor.from_pretrained(__lowerCamelCase )
image_processor.push_to_hub("test-dynamic-image-processor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"AutoImageProcessor": "custom_image_processing.CustomImageProcessor"} , )
lowerCamelCase__ = AutoImageProcessor.from_pretrained(
f'''{USER}/test-dynamic-image-processor''' , trust_remote_code=__lowerCamelCase )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , "CustomImageProcessor" )
| 703
|
'''simple docstring'''
import tensorflow as tf
from ...tf_utils import shape_list
class lowercase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int=1 , __lowerCamelCase : Tuple=False , **__lowerCamelCase : Dict ) -> str:
'''simple docstring'''
super().__init__(**__lowerCamelCase )
lowerCamelCase__ = vocab_size
lowerCamelCase__ = d_embed
lowerCamelCase__ = d_proj
lowerCamelCase__ = cutoffs + [vocab_size]
lowerCamelCase__ = [0] + self.cutoffs
lowerCamelCase__ = div_val
lowerCamelCase__ = self.cutoffs[0]
lowerCamelCase__ = len(self.cutoffs ) - 1
lowerCamelCase__ = self.shortlist_size + self.n_clusters
lowerCamelCase__ = keep_order
lowerCamelCase__ = []
lowerCamelCase__ = []
def a__ ( self : Optional[int] , __lowerCamelCase : str ) -> List[str]:
'''simple docstring'''
if self.n_clusters > 0:
lowerCamelCase__ = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer="zeros" , trainable=__lowerCamelCase , name="cluster_weight" )
lowerCamelCase__ = self.add_weight(
shape=(self.n_clusters,) , initializer="zeros" , trainable=__lowerCamelCase , name="cluster_bias" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
lowerCamelCase__ = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer="zeros" , trainable=__lowerCamelCase , name=f'''out_projs_._{i}''' , )
self.out_projs.append(__lowerCamelCase )
else:
self.out_projs.append(__lowerCamelCase )
lowerCamelCase__ = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer="zeros" , trainable=__lowerCamelCase , name=f'''out_layers_._{i}_._weight''' , )
lowerCamelCase__ = self.add_weight(
shape=(self.vocab_size,) , initializer="zeros" , trainable=__lowerCamelCase , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
lowerCamelCase__ , lowerCamelCase__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCamelCase__ = self.d_embed // (self.div_val**i)
lowerCamelCase__ = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer="zeros" , trainable=__lowerCamelCase , name=f'''out_projs_._{i}''' )
self.out_projs.append(__lowerCamelCase )
lowerCamelCase__ = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer="zeros" , trainable=__lowerCamelCase , name=f'''out_layers_._{i}_._weight''' , )
lowerCamelCase__ = self.add_weight(
shape=(r_idx - l_idx,) , initializer="zeros" , trainable=__lowerCamelCase , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
super().build(__lowerCamelCase )
@staticmethod
def a__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : List[str]=None ) -> str:
'''simple docstring'''
lowerCamelCase__ = x
if proj is not None:
lowerCamelCase__ = tf.einsum("ibd,ed->ibe" , __lowerCamelCase , __lowerCamelCase )
return tf.einsum("ibd,nd->ibn" , __lowerCamelCase , __lowerCamelCase ) + b
@staticmethod
def a__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = shape_list(__lowerCamelCase )
lowerCamelCase__ = tf.range(lp_size[0] , dtype=target.dtype )
lowerCamelCase__ = tf.stack([r, target] , 1 )
return tf.gather_nd(__lowerCamelCase , __lowerCamelCase )
def a__ ( self : Any , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : str=True , __lowerCamelCase : Tuple=False ) -> int:
'''simple docstring'''
lowerCamelCase__ = 0
if self.n_clusters == 0:
lowerCamelCase__ = self._logit(__lowerCamelCase , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
lowerCamelCase__ = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=__lowerCamelCase , logits=__lowerCamelCase )
lowerCamelCase__ = tf.nn.log_softmax(__lowerCamelCase , axis=-1 )
else:
lowerCamelCase__ = shape_list(__lowerCamelCase )
lowerCamelCase__ = []
lowerCamelCase__ = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
lowerCamelCase__ , lowerCamelCase__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
lowerCamelCase__ = (target >= l_idx) & (target < r_idx)
lowerCamelCase__ = tf.where(__lowerCamelCase )
lowerCamelCase__ = tf.boolean_mask(__lowerCamelCase , __lowerCamelCase ) - l_idx
if self.div_val == 1:
lowerCamelCase__ = self.out_layers[0][0][l_idx:r_idx]
lowerCamelCase__ = self.out_layers[0][1][l_idx:r_idx]
else:
lowerCamelCase__ = self.out_layers[i][0]
lowerCamelCase__ = self.out_layers[i][1]
if i == 0:
lowerCamelCase__ = tf.concat([cur_W, self.cluster_weight] , 0 )
lowerCamelCase__ = tf.concat([cur_b, self.cluster_bias] , 0 )
lowerCamelCase__ = self._logit(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , self.out_projs[0] )
lowerCamelCase__ = tf.nn.log_softmax(__lowerCamelCase )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
lowerCamelCase__ = tf.boolean_mask(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ = self._gather_logprob(__lowerCamelCase , __lowerCamelCase )
else:
lowerCamelCase__ = self._logit(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , self.out_projs[i] )
lowerCamelCase__ = tf.nn.log_softmax(__lowerCamelCase )
lowerCamelCase__ = self.cutoffs[0] + i - 1 # No probability for the head cluster
lowerCamelCase__ = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(__lowerCamelCase )
if target is not None:
lowerCamelCase__ = tf.boolean_mask(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ = tf.boolean_mask(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ = self._gather_logprob(__lowerCamelCase , __lowerCamelCase )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(__lowerCamelCase , -cur_logprob , shape_list(__lowerCamelCase ) )
lowerCamelCase__ = tf.concat(__lowerCamelCase , axis=-1 )
if target is not None:
if return_mean:
lowerCamelCase__ = tf.reduce_mean(__lowerCamelCase )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(__lowerCamelCase )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(__lowerCamelCase , name=self.name , aggregation="mean" if return_mean else "" )
return out
| 187
| 0
|
import copy
import re
class UpperCAmelCase_ :
__lowerCamelCase = 'hp'
__lowerCamelCase = {}
__lowerCamelCase = None
@classmethod
def __UpperCAmelCase ( cls , _lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase__ : List[str] = prefix
UpperCAmelCase__ : Tuple = defaults
cls.build_naming_info()
@staticmethod
def __UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase ):
if len(_lowerCAmelCase ) == 0:
return ""
UpperCAmelCase__ : int = None
if any(char.isdigit() for char in word ):
raise Exception(f"Parameters should not contain numbers: '{word}' contains a number" )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(_lowerCAmelCase ) + 1 ):
UpperCAmelCase__ : List[str] = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
UpperCAmelCase__ : List[str] = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(_lowerCAmelCase ):
UpperCAmelCase__ : Optional[int] = """"""
while integer != 0:
UpperCAmelCase__ : List[Any] = chr(ord("""A""" ) + integer % 10 ) + s
integer //= 10
return s
UpperCAmelCase__ : List[str] = 0
while True:
UpperCAmelCase__ : List[str] = word + """#""" + int_to_alphabetic(_lowerCAmelCase )
if sword in info["reverse_short_word"]:
continue
else:
UpperCAmelCase__ : List[str] = sword
break
UpperCAmelCase__ : str = short_word
UpperCAmelCase__ : Tuple = word
return short_word
@staticmethod
def __UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase__ : Dict = param_name.split("""_""" )
UpperCAmelCase__ : str = [TrialShortNamer.shortname_for_word(_lowerCAmelCase , _lowerCAmelCase ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
UpperCAmelCase__ : Union[str, Any] = ["""""", """_"""]
for separator in separators:
UpperCAmelCase__ : Optional[Any] = separator.join(_lowerCAmelCase )
if shortname not in info["reverse_short_param"]:
UpperCAmelCase__ : Any = shortname
UpperCAmelCase__ : Dict = param_name
return shortname
return param_name
@staticmethod
def __UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase__ : Optional[int] = TrialShortNamer.shortname_for_key(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase__ : Tuple = short_name
UpperCAmelCase__ : Union[str, Any] = param_name
@classmethod
def __UpperCAmelCase ( cls ):
if cls.NAMING_INFO is not None:
return
UpperCAmelCase__ : str = {
"""short_word""": {},
"""reverse_short_word""": {},
"""short_param""": {},
"""reverse_short_param""": {},
}
UpperCAmelCase__ : List[str] = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase__ : int = info
@classmethod
def __UpperCAmelCase ( cls , _lowerCAmelCase ):
cls.build_naming_info()
assert cls.PREFIX is not None
UpperCAmelCase__ : Optional[Any] = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f"You should provide a default value for the param name {k} with value {v}" )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
UpperCAmelCase__ : int = cls.NAMING_INFO["""short_param"""][k]
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase__ : Tuple = 1 if v else 0
UpperCAmelCase__ : int = """""" if isinstance(_lowerCAmelCase , (int, float) ) else """-"""
UpperCAmelCase__ : Union[str, Any] = f"{key}{sep}{v}"
name.append(_lowerCAmelCase )
return "_".join(_lowerCAmelCase )
@classmethod
def __UpperCAmelCase ( cls , _lowerCAmelCase ):
UpperCAmelCase__ : Union[str, Any] = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
UpperCAmelCase__ : Union[str, Any] = []
else:
UpperCAmelCase__ : List[Any] = repr.split("""_""" )
UpperCAmelCase__ : Union[str, Any] = {}
for value in values:
if "-" in value:
UpperCAmelCase__ , UpperCAmelCase__ : int = value.split("""-""" )
else:
UpperCAmelCase__ : int = re.sub("""[0-9.]""" , """""" , _lowerCAmelCase )
UpperCAmelCase__ : int = float(re.sub("""[^0-9.]""" , """""" , _lowerCAmelCase ) )
UpperCAmelCase__ : Any = cls.NAMING_INFO["""reverse_short_param"""][p_k]
UpperCAmelCase__ : Optional[Any] = p_v
for k in cls.DEFAULTS:
if k not in parameters:
UpperCAmelCase__ : Dict = cls.DEFAULTS[k]
return parameters
| 79
|
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
lowercase_ = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""enhancement""",
"""new pipeline/model""",
"""new scheduler""",
"""wip""",
]
def lowerCamelCase ( ) ->Union[str, Any]:
_SCREAMING_SNAKE_CASE = Github(os.environ["""GITHUB_TOKEN"""] )
_SCREAMING_SNAKE_CASE = g.get_repo("""huggingface/diffusers""" )
_SCREAMING_SNAKE_CASE = repo.get_issues(state="""open""" )
for issue in open_issues:
_SCREAMING_SNAKE_CASE = sorted(issue.get_comments() , key=lambda __lowerCamelCase : i.created_at , reverse=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = comments[0] if len(__lowerCamelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="""closed""" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="""open""" )
issue.remove_from_labels("""stale""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
issue.add_to_labels("""stale""" )
if __name__ == "__main__":
main()
| 314
| 0
|
"""simple docstring"""
from __future__ import annotations
import math
def A__ ( _UpperCAmelCase : list , _UpperCAmelCase : list ) -> list:
'''simple docstring'''
if len(_UpperCAmelCase ) != 2 or len(a[0] ) != 2 or len(_UpperCAmelCase ) != 2 or len(b[0] ) != 2:
raise Exception("Matrices are not 2x2" )
snake_case__ : Optional[Any] = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def A__ ( _UpperCAmelCase : list , _UpperCAmelCase : list ) -> Tuple:
'''simple docstring'''
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(_UpperCAmelCase ) )
]
def A__ ( _UpperCAmelCase : list , _UpperCAmelCase : list ) -> Optional[int]:
'''simple docstring'''
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(_UpperCAmelCase ) )
]
def A__ ( _UpperCAmelCase : list ) -> tuple[list, list, list, list]:
'''simple docstring'''
if len(_UpperCAmelCase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("Odd matrices are not supported!" )
snake_case__ : Dict = len(_UpperCAmelCase )
snake_case__ : List[str] = matrix_length // 2
snake_case__ : List[str] = [[a[i][j] for j in range(_UpperCAmelCase , _UpperCAmelCase )] for i in range(_UpperCAmelCase )]
snake_case__ : Optional[Any] = [
[a[i][j] for j in range(_UpperCAmelCase , _UpperCAmelCase )] for i in range(_UpperCAmelCase , _UpperCAmelCase )
]
snake_case__ : Union[str, Any] = [[a[i][j] for j in range(_UpperCAmelCase )] for i in range(_UpperCAmelCase )]
snake_case__ : List[str] = [[a[i][j] for j in range(_UpperCAmelCase )] for i in range(_UpperCAmelCase , _UpperCAmelCase )]
return top_left, top_right, bot_left, bot_right
def A__ ( _UpperCAmelCase : list ) -> tuple[int, int]:
'''simple docstring'''
return len(_UpperCAmelCase ), len(matrix[0] )
def A__ ( _UpperCAmelCase : list ) -> None:
'''simple docstring'''
print("\n".join(str(_UpperCAmelCase ) for line in matrix ) )
def A__ ( _UpperCAmelCase : list , _UpperCAmelCase : list ) -> list:
'''simple docstring'''
if matrix_dimensions(_UpperCAmelCase ) == (2, 2):
return default_matrix_multiplication(_UpperCAmelCase , _UpperCAmelCase )
snake_case__ : int = split_matrix(_UpperCAmelCase )
snake_case__ : str = split_matrix(_UpperCAmelCase )
snake_case__ : Optional[Any] = actual_strassen(_UpperCAmelCase , matrix_subtraction(_UpperCAmelCase , _UpperCAmelCase ) )
snake_case__ : Optional[int] = actual_strassen(matrix_addition(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase )
snake_case__ : List[str] = actual_strassen(matrix_addition(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase )
snake_case__ : Union[str, Any] = actual_strassen(_UpperCAmelCase , matrix_subtraction(_UpperCAmelCase , _UpperCAmelCase ) )
snake_case__ : Union[str, Any] = actual_strassen(matrix_addition(_UpperCAmelCase , _UpperCAmelCase ) , matrix_addition(_UpperCAmelCase , _UpperCAmelCase ) )
snake_case__ : Optional[Any] = actual_strassen(matrix_subtraction(_UpperCAmelCase , _UpperCAmelCase ) , matrix_addition(_UpperCAmelCase , _UpperCAmelCase ) )
snake_case__ : Dict = actual_strassen(matrix_subtraction(_UpperCAmelCase , _UpperCAmelCase ) , matrix_addition(_UpperCAmelCase , _UpperCAmelCase ) )
snake_case__ : int = matrix_addition(matrix_subtraction(matrix_addition(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase ) , _UpperCAmelCase )
snake_case__ : Union[str, Any] = matrix_addition(_UpperCAmelCase , _UpperCAmelCase )
snake_case__ : Optional[int] = matrix_addition(_UpperCAmelCase , _UpperCAmelCase )
snake_case__ : int = matrix_subtraction(matrix_subtraction(matrix_addition(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase ) , _UpperCAmelCase )
# construct the new matrix from our 4 quadrants
snake_case__ : Tuple = []
for i in range(len(_UpperCAmelCase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(_UpperCAmelCase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def A__ ( _UpperCAmelCase : list , _UpperCAmelCase : list ) -> list:
'''simple docstring'''
if matrix_dimensions(_UpperCAmelCase )[1] != matrix_dimensions(_UpperCAmelCase )[0]:
snake_case__ : Union[str, Any] = (
"Unable to multiply these matrices, please check the dimensions.\n"
F"""Matrix A: {matrixa}\n"""
F"""Matrix B: {matrixa}"""
)
raise Exception(_UpperCAmelCase )
snake_case__ : Tuple = matrix_dimensions(_UpperCAmelCase )
snake_case__ : Any = matrix_dimensions(_UpperCAmelCase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
snake_case__ : Any = max(*_UpperCAmelCase , *_UpperCAmelCase )
snake_case__ : Tuple = int(math.pow(2 , math.ceil(math.loga(_UpperCAmelCase ) ) ) )
snake_case__ : Optional[Any] = matrixa
snake_case__ : List[Any] = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , _UpperCAmelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , _UpperCAmelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , _UpperCAmelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
snake_case__ : str = actual_strassen(_UpperCAmelCase , _UpperCAmelCase )
# Removing the additional zeros
for i in range(0 , _UpperCAmelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , _UpperCAmelCase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
lowercase = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
lowercase = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 712
|
"""simple docstring"""
import math
import unittest
def A__ ( _UpperCAmelCase : int ) -> bool:
'''simple docstring'''
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase):
'''simple docstring'''
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
self.assertTrue(is_prime(2))
self.assertTrue(is_prime(3))
self.assertTrue(is_prime(5))
self.assertTrue(is_prime(7))
self.assertTrue(is_prime(11))
self.assertTrue(is_prime(13))
self.assertTrue(is_prime(17))
self.assertTrue(is_prime(19))
self.assertTrue(is_prime(23))
self.assertTrue(is_prime(29))
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
with self.assertRaises(lowerCamelCase__):
is_prime(-19)
self.assertFalse(
is_prime(0) , "Zero doesn't have any positive factors, primes must have exactly two." , )
self.assertFalse(
is_prime(1) , "One only has 1 positive factor, primes must have exactly two." , )
self.assertFalse(is_prime(2 * 2))
self.assertFalse(is_prime(2 * 3))
self.assertFalse(is_prime(3 * 3))
self.assertFalse(is_prime(3 * 5))
self.assertFalse(is_prime(3 * 5 * 7))
if __name__ == "__main__":
unittest.main()
| 150
| 0
|
"""simple docstring"""
import re
def _snake_case ( _snake_case : str ) -> str:
'''simple docstring'''
if len(re.findall('[ATCG]' , _snake_case ) ) != len(_snake_case ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7
|
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
A__= 42
A__= 42
def __init__( self : Tuple , _lowercase : UNetaDModel , _lowercase : ScoreSdeVeScheduler ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=_lowercase , scheduler=_lowercase )
@torch.no_grad()
def __call__( self : Dict , _lowercase : int = 1 , _lowercase : int = 20_00 , _lowercase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowercase : Optional[str] = "pil" , _lowercase : bool = True , **_lowercase : Any , ):
"""simple docstring"""
UpperCAmelCase__ = self.unet.config.sample_size
UpperCAmelCase__ = (batch_size, 3, img_size, img_size)
UpperCAmelCase__ = self.unet
UpperCAmelCase__ = randn_tensor(_lowercase , generator=_lowercase ) * self.scheduler.init_noise_sigma
UpperCAmelCase__ = sample.to(self.device )
self.scheduler.set_timesteps(_lowercase )
self.scheduler.set_sigmas(_lowercase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
UpperCAmelCase__ = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
UpperCAmelCase__ = self.unet(_lowercase , _lowercase ).sample
UpperCAmelCase__ = self.scheduler.step_correct(_lowercase , _lowercase , generator=_lowercase ).prev_sample
# prediction step
UpperCAmelCase__ = model(_lowercase , _lowercase ).sample
UpperCAmelCase__ = self.scheduler.step_pred(_lowercase , _lowercase , _lowercase , generator=_lowercase )
UpperCAmelCase__ , UpperCAmelCase__ = output.prev_sample, output.prev_sample_mean
UpperCAmelCase__ = sample_mean.clamp(0 , 1 )
UpperCAmelCase__ = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase__ = self.numpy_to_pil(_lowercase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=_lowercase )
| 475
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_:Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:Optional[int] = {
"""google/switch-base-8""": """https://huggingface.co/google/switch-base-8/blob/main/config.json""",
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = "switch_transformers"
__lowerCamelCase : List[str] = ["past_key_values"]
__lowerCamelCase : Union[str, Any] = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self, lowerCamelCase__=3_2128, lowerCamelCase__=768, lowerCamelCase__=64, lowerCamelCase__=2048, lowerCamelCase__=64, lowerCamelCase__=12, lowerCamelCase__=3, lowerCamelCase__=12, lowerCamelCase__=3, lowerCamelCase__=12, lowerCamelCase__=8, lowerCamelCase__=False, lowerCamelCase__=0.01, lowerCamelCase__="float32", lowerCamelCase__=False, lowerCamelCase__=32, lowerCamelCase__=128, lowerCamelCase__=0.1, lowerCamelCase__=1e-6, lowerCamelCase__=0.001, lowerCamelCase__=0.001, lowerCamelCase__=1.0, lowerCamelCase__="relu", lowerCamelCase__=True, lowerCamelCase__=False, lowerCamelCase__=True, lowerCamelCase__=0, lowerCamelCase__=1, **lowerCamelCase__, ):
A : List[str] = vocab_size
A : Dict = d_model
A : str = d_kv
A : Dict = d_ff
A : Optional[int] = num_sparse_encoder_layers
A : Union[str, Any] = num_layers
A : int = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
A : List[str] = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
A : Dict = self.num_layers // self.num_sparse_encoder_layers
else:
A : List[Any] = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
A : Dict = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
A : Optional[int] = self.num_decoder_layers # HACK: this will create 0 sparse layers
A : List[str] = num_heads
A : Dict = num_experts
A : int = expert_capacity
A : int = router_bias
A : Any = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
A : List[str] = router_dtype
A : Union[str, Any] = router_ignore_padding_tokens
A : Dict = relative_attention_num_buckets
A : Optional[Any] = relative_attention_max_distance
A : int = dropout_rate
A : Optional[Any] = layer_norm_epsilon
A : Any = initializer_factor
A : int = feed_forward_proj
A : str = use_cache
A : str = add_router_probs
A : Union[str, Any] = router_z_loss_coef
A : List[Any] = router_aux_loss_coef
A : Dict = self.feed_forward_proj.split("""-""" )
A : Optional[Any] = act_info[-1]
A : Optional[int] = act_info[0] == """gated"""
if len(lowerCamelCase__ ) > 1 and act_info[0] != "gated" or len(lowerCamelCase__ ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
A : Tuple = """gelu_new"""
super().__init__(
pad_token_id=lowerCamelCase__, eos_token_id=lowerCamelCase__, is_encoder_decoder=lowerCamelCase__, **lowerCamelCase__, )
| 701
|
from __future__ import annotations
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = False , ) -> tuple[int, float, str]:
"""simple docstring"""
A : str = cipher_alphabet or [chr(_lowerCAmelCase ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
A : List[Any] = {
"""a""": 0.08_497,
"""b""": 0.01_492,
"""c""": 0.02_202,
"""d""": 0.04_253,
"""e""": 0.11_162,
"""f""": 0.02_228,
"""g""": 0.02_015,
"""h""": 0.06_094,
"""i""": 0.07_546,
"""j""": 0.00_153,
"""k""": 0.01_292,
"""l""": 0.04_025,
"""m""": 0.02_406,
"""n""": 0.06_749,
"""o""": 0.07_507,
"""p""": 0.01_929,
"""q""": 0.00_095,
"""r""": 0.07_587,
"""s""": 0.06_327,
"""t""": 0.09_356,
"""u""": 0.02_758,
"""v""": 0.00_978,
"""w""": 0.02_560,
"""x""": 0.00_150,
"""y""": 0.01_994,
"""z""": 0.00_077,
}
else:
# Custom frequencies dictionary
A : int = frequencies_dict
if not case_sensitive:
A : int = ciphertext.lower()
# Chi squared statistic values
A : dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(_lowerCAmelCase ) ):
A : List[str] = """"""
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
A : Optional[Any] = (alphabet_letters.index(letter.lower() ) - shift) % len(
_lowerCAmelCase )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
A : Optional[int] = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
A : List[str] = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
A : Dict = decrypted_with_shift.lower().count(_lowerCAmelCase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
A : List[str] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
A : int = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
A : Union[str, Any] = decrypted_with_shift.count(_lowerCAmelCase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
A : List[Any] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
A : Union[str, Any] = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
A : List[Any] = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(_lowerCAmelCase ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
A : int = min(
_lowerCAmelCase , key=_lowerCAmelCase , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
A
) , (
A
) ,
) : Tuple = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 520
| 0
|
import os
from typing import Dict, List, Tuple, TypeVar, Union
SCREAMING_SNAKE_CASE :Optional[Any] = TypeVar('T')
SCREAMING_SNAKE_CASE :Dict = Union[List[T], Tuple[T, ...]]
SCREAMING_SNAKE_CASE :Optional[Any] = Union[T, List[T], Dict[str, T]]
SCREAMING_SNAKE_CASE :Any = Union[str, bytes, os.PathLike]
| 55
|
'''simple docstring'''
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = CodeGenTokenizer
UpperCAmelCase__ = CodeGenTokenizerFast
UpperCAmelCase__ = True
UpperCAmelCase__ = {'''add_prefix_space''': True}
UpperCAmelCase__ = False
def snake_case__ ( self : str ) ->str:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCamelCase : str = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
_UpperCamelCase : Optional[int] = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
_UpperCamelCase : List[Any] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_UpperCamelCase : Union[str, Any] = {"unk_token": "<unk>"}
_UpperCamelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCamelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowercase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowercase__ ) )
def snake_case__ ( self : Union[str, Any] , **lowercase__ : int ) ->str:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **lowercase__ )
def snake_case__ ( self : int , **lowercase__ : List[Any] ) ->Optional[Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **lowercase__ )
def snake_case__ ( self : str , lowercase__ : Dict ) ->List[str]:
'''simple docstring'''
_UpperCamelCase : int = "lower newer"
_UpperCamelCase : int = "lower newer"
return input_text, output_text
def snake_case__ ( self : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCamelCase : Optional[int] = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_UpperCamelCase : Dict = "lower newer"
_UpperCamelCase : Any = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
_UpperCamelCase : str = tokenizer.tokenize(lowercase__ , add_prefix_space=lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
_UpperCamelCase : str = tokens + [tokenizer.unk_token]
_UpperCamelCase : List[str] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) , lowercase__ )
def snake_case__ ( self : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_UpperCamelCase : Union[str, Any] = self.get_tokenizer()
_UpperCamelCase : Tuple = self.get_rust_tokenizer(add_prefix_space=lowercase__ )
_UpperCamelCase : Any = "lower newer"
# Testing tokenization
_UpperCamelCase : Optional[int] = tokenizer.tokenize(lowercase__ , add_prefix_space=lowercase__ )
_UpperCamelCase : Optional[Any] = rust_tokenizer.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
# Testing conversion to ids without special tokens
_UpperCamelCase : List[str] = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ )
_UpperCamelCase : Dict = rust_tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
# Testing conversion to ids with special tokens
_UpperCamelCase : Union[str, Any] = self.get_rust_tokenizer(add_prefix_space=lowercase__ )
_UpperCamelCase : str = tokenizer.encode(lowercase__ , add_prefix_space=lowercase__ )
_UpperCamelCase : Optional[int] = rust_tokenizer.encode(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
# Testing the unknown token
_UpperCamelCase : Optional[Any] = tokens + [rust_tokenizer.unk_token]
_UpperCamelCase : Dict = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(lowercase__ ) , lowercase__ )
def snake_case__ ( self : Any , *lowercase__ : Union[str, Any] , **lowercase__ : Union[str, Any] ) ->List[Any]:
'''simple docstring'''
pass
def snake_case__ ( self : List[Any] , lowercase__ : str=15 ) ->Any:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase : Any = self.rust_tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
# Simple input
_UpperCamelCase : str = "This is a simple input"
_UpperCamelCase : List[str] = ["This is a simple input 1", "This is a simple input 2"]
_UpperCamelCase : Tuple = ("This is a simple input", "This is a pair")
_UpperCamelCase : List[str] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(lowercase__ , tokenizer_r.encode , lowercase__ , max_length=lowercase__ , padding="max_length" )
# Simple input
self.assertRaises(lowercase__ , tokenizer_r.encode_plus , lowercase__ , max_length=lowercase__ , padding="max_length" )
# Simple input
self.assertRaises(
lowercase__ , tokenizer_r.batch_encode_plus , lowercase__ , max_length=lowercase__ , padding="max_length" , )
# Pair input
self.assertRaises(lowercase__ , tokenizer_r.encode , lowercase__ , max_length=lowercase__ , padding="max_length" )
# Pair input
self.assertRaises(lowercase__ , tokenizer_r.encode_plus , lowercase__ , max_length=lowercase__ , padding="max_length" )
# Pair input
self.assertRaises(
lowercase__ , tokenizer_r.batch_encode_plus , lowercase__ , max_length=lowercase__ , padding="max_length" , )
def snake_case__ ( self : Optional[int] ) ->Any:
'''simple docstring'''
_UpperCamelCase : List[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
_UpperCamelCase : Dict = "This is a simple input"
_UpperCamelCase : Any = ["This is a simple input looooooooong", "This is a simple input"]
_UpperCamelCase : Union[str, Any] = ("This is a simple input", "This is a pair")
_UpperCamelCase : Union[str, Any] = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
_UpperCamelCase : Tuple = tokenizer.pad_token_id
_UpperCamelCase : List[Any] = tokenizer(lowercase__ , padding="max_length" , max_length=30 , return_tensors="np" )
_UpperCamelCase : Optional[Any] = tokenizer(lowercase__ , padding=lowercase__ , truncate=lowercase__ , return_tensors="np" )
_UpperCamelCase : Union[str, Any] = tokenizer(*lowercase__ , padding="max_length" , max_length=60 , return_tensors="np" )
_UpperCamelCase : List[Any] = tokenizer(lowercase__ , padding=lowercase__ , truncate=lowercase__ , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def snake_case__ ( self : Tuple ) ->int:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = "$$$"
_UpperCamelCase : str = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=lowercase__ , add_bos_token=lowercase__ )
_UpperCamelCase : List[Any] = "This is a simple input"
_UpperCamelCase : Optional[int] = ["This is a simple input 1", "This is a simple input 2"]
_UpperCamelCase : Optional[Any] = tokenizer.bos_token_id
_UpperCamelCase : str = tokenizer(lowercase__ )
_UpperCamelCase : Any = tokenizer(lowercase__ )
self.assertEqual(out_s.input_ids[0] , lowercase__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_UpperCamelCase : int = tokenizer.decode(out_s.input_ids )
_UpperCamelCase : List[Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , lowercase__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def snake_case__ ( self : Union[str, Any] ) ->Tuple:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono" )
_UpperCamelCase : Any = "\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"
_UpperCamelCase : Optional[int] = "\nif len_a > len_b: result = a\nelse: result = b"
_UpperCamelCase : str = tokenizer.encode(lowercase__ )
_UpperCamelCase : List[Any] = ["^#", re.escape("<|endoftext|>" ), "^'''", "^\"\"\"", "\n\n\n"]
_UpperCamelCase : Optional[int] = tokenizer.decode(lowercase__ , truncate_before_pattern=lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
def snake_case__ ( self : int ) ->str:
'''simple docstring'''
pass
| 435
| 0
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
a = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase__ ( __magic_name__ ):
def __init__( self : Optional[Any] , UpperCamelCase__ : WhisperForConditionalGeneration , UpperCamelCase__ : WhisperProcessor , UpperCamelCase__ : AutoencoderKL , UpperCamelCase__ : CLIPTextModel , UpperCamelCase__ : CLIPTokenizer , UpperCamelCase__ : UNetaDConditionModel , UpperCamelCase__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCamelCase__ : StableDiffusionSafetyChecker , UpperCamelCase__ : CLIPImageProcessor , ):
'''simple docstring'''
super().__init__()
if safety_checker is None:
logger.warning(
F'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
speech_model=UpperCamelCase__ , speech_processor=UpperCamelCase__ , vae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , unet=UpperCamelCase__ , scheduler=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , )
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
lowercase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCamelCase__ )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
self.enable_attention_slicing(UpperCamelCase__ )
@torch.no_grad()
def __call__( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int]=16_000 , UpperCamelCase__ : int = 512 , UpperCamelCase__ : int = 512 , UpperCamelCase__ : int = 50 , UpperCamelCase__ : float = 7.5 , UpperCamelCase__ : Optional[Union[str, List[str]]] = None , UpperCamelCase__ : Optional[int] = 1 , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : Optional[torch.Generator] = None , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[str] = "pil" , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCamelCase__ : int = 1 , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
lowercase_ = self.speech_processor.feature_extractor(
UpperCamelCase__ , return_tensors="""pt""" , sampling_rate=UpperCamelCase__ ).input_features.to(self.device )
lowercase_ = self.speech_model.generate(UpperCamelCase__ , max_length=480_000 )
lowercase_ = self.speech_processor.tokenizer.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ , normalize=UpperCamelCase__ )[
0
]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowercase_ = 1
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowercase_ = len(UpperCamelCase__ )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(UpperCamelCase__ )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(UpperCamelCase__ )}.''' )
# get prompt text embeddings
lowercase_ = self.tokenizer(
UpperCamelCase__ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
lowercase_ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowercase_ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
lowercase_ = text_input_ids[:, : self.tokenizer.model_max_length]
lowercase_ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowercase_ , lowercase_ , lowercase_ = text_embeddings.shape
lowercase_ = text_embeddings.repeat(1 , UpperCamelCase__ , 1 )
lowercase_ = text_embeddings.view(bs_embed * num_images_per_prompt , UpperCamelCase__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowercase_ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowercase_ = 42
if negative_prompt is None:
lowercase_ = [""""""] * batch_size
elif type(UpperCamelCase__ ) is not type(UpperCamelCase__ ):
raise TypeError(
F'''`negative_prompt` should be the same type to `prompt`, but got {type(UpperCamelCase__ )} !='''
F''' {type(UpperCamelCase__ )}.''' )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowercase_ = [negative_prompt]
elif batch_size != len(UpperCamelCase__ ):
raise ValueError(
F'''`negative_prompt`: {negative_prompt} has batch size {len(UpperCamelCase__ )}, but `prompt`:'''
F''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
""" the batch size of `prompt`.""" )
else:
lowercase_ = negative_prompt
lowercase_ = text_input_ids.shape[-1]
lowercase_ = self.tokenizer(
UpperCamelCase__ , padding="""max_length""" , max_length=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors="""pt""" , )
lowercase_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase_ = uncond_embeddings.shape[1]
lowercase_ = uncond_embeddings.repeat(1 , UpperCamelCase__ , 1 )
lowercase_ = uncond_embeddings.view(batch_size * num_images_per_prompt , UpperCamelCase__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase_ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowercase_ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowercase_ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowercase_ = torch.randn(UpperCamelCase__ , generator=UpperCamelCase__ , device="""cpu""" , dtype=UpperCamelCase__ ).to(
self.device )
else:
lowercase_ = torch.randn(UpperCamelCase__ , generator=UpperCamelCase__ , device=self.device , dtype=UpperCamelCase__ )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
lowercase_ = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(UpperCamelCase__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowercase_ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowercase_ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase_ = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase_ = {}
if accepts_eta:
lowercase_ = eta
for i, t in enumerate(self.progress_bar(UpperCamelCase__ ) ):
# expand the latents if we are doing classifier free guidance
lowercase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase_ = self.scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
# predict the noise residual
lowercase_ = self.unet(UpperCamelCase__ , UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ ).sample
# perform guidance
if do_classifier_free_guidance:
lowercase_ , lowercase_ = noise_pred.chunk(2 )
lowercase_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowercase_ = self.scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = 1 / 0.18_215 * latents
lowercase_ = self.vae.decode(UpperCamelCase__ ).sample
lowercase_ = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase_ = self.numpy_to_pil(UpperCamelCase__ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=UpperCamelCase__ , nsfw_content_detected=UpperCamelCase__ )
| 650
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionDiffEditPipeline
__SCREAMING_SNAKE_CASE : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'}
__SCREAMING_SNAKE_CASE : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'}
__SCREAMING_SNAKE_CASE : int = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__SCREAMING_SNAKE_CASE : Any = frozenset([] )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCamelCase__ , )
lowercase_ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , )
lowercase_ = DDIMInverseScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase__ , set_alpha_to_zero=UpperCamelCase__ , )
torch.manual_seed(0 )
lowercase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowercase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , )
lowercase_ = CLIPTextModel(UpperCamelCase__ )
lowercase_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowercase_ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""inverse_scheduler""": inverse_scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : Any , UpperCamelCase__ : Any=0 ):
'''simple docstring'''
lowercase_ = floats_tensor((1, 16, 16) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
lowercase_ = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
if str(UpperCamelCase__ ).startswith("""mps""" ):
lowercase_ = torch.manual_seed(UpperCamelCase__ )
else:
lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
lowercase_ = {
"""prompt""": """a dog and a newt""",
"""mask_image""": mask,
"""image_latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str=0 ):
'''simple docstring'''
lowercase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
lowercase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase_ = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert("""RGB""" )
if str(UpperCamelCase__ ).startswith("""mps""" ):
lowercase_ = torch.manual_seed(UpperCamelCase__ )
else:
lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
lowercase_ = {
"""image""": image,
"""source_prompt""": """a cat and a frog""",
"""target_prompt""": """a dog and a newt""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""num_maps_per_mask""": 2,
"""mask_encode_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple=0 ):
'''simple docstring'''
lowercase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
lowercase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase_ = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert("""RGB""" )
if str(UpperCamelCase__ ).startswith("""mps""" ):
lowercase_ = torch.manual_seed(UpperCamelCase__ )
else:
lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
lowercase_ = {
"""image""": image,
"""prompt""": """a cat and a frog""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""decode_latents""": True,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
if not hasattr(self.pipeline_class , """_optional_components""" ):
return
lowercase_ = self.get_dummy_components()
lowercase_ = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
lowercase_ = self.get_dummy_inputs(UpperCamelCase__ )
lowercase_ = pipe(**UpperCamelCase__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCamelCase__ )
lowercase_ = self.pipeline_class.from_pretrained(UpperCamelCase__ )
pipe_loaded.to(UpperCamelCase__ )
pipe_loaded.set_progress_bar_config(disable=UpperCamelCase__ )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(UpperCamelCase__ , UpperCamelCase__ ) is None , F'''`{optional_component}` did not stay set to None after loading.''' , )
lowercase_ = self.get_dummy_inputs(UpperCamelCase__ )
lowercase_ = pipe_loaded(**UpperCamelCase__ )[0]
lowercase_ = np.abs(output - output_loaded ).max()
self.assertLess(UpperCamelCase__ , 1e-4 )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = """cpu"""
lowercase_ = self.get_dummy_components()
lowercase_ = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = self.get_dummy_mask_inputs(UpperCamelCase__ )
lowercase_ = pipe.generate_mask(**UpperCamelCase__ )
lowercase_ = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
lowercase_ = np.array([0] * 9 )
lowercase_ = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase__ , 1e-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = """cpu"""
lowercase_ = self.get_dummy_components()
lowercase_ = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = self.get_dummy_inversion_inputs(UpperCamelCase__ )
lowercase_ = pipe.invert(**UpperCamelCase__ ).images
lowercase_ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowercase_ = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
lowercase_ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase__ , 1e-3 )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = """cpu"""
lowercase_ = self.get_dummy_components()
lowercase_ = {"""beta_start""": 0.00_085, """beta_end""": 0.012, """beta_schedule""": """scaled_linear"""}
lowercase_ = DPMSolverMultistepScheduler(**UpperCamelCase__ )
lowercase_ = DPMSolverMultistepInverseScheduler(**UpperCamelCase__ )
lowercase_ = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = self.get_dummy_inversion_inputs(UpperCamelCase__ )
lowercase_ = pipe.invert(**UpperCamelCase__ ).images
lowercase_ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowercase_ = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
lowercase_ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase__ , 1e-3 )
@require_torch_gpu
@slow
class UpperCamelCase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def UpperCAmelCase__ ( cls : Dict ):
'''simple docstring'''
lowercase_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png""" )
lowercase_ = raw_image.convert("""RGB""" ).resize((768, 768) )
lowercase_ = raw_image
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = torch.manual_seed(0 )
lowercase_ = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=UpperCamelCase__ , torch_dtype=torch.floataa )
lowercase_ = DDIMScheduler.from_config(pipe.scheduler.config )
lowercase_ = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = """a bowl of fruit"""
lowercase_ = """a bowl of pears"""
lowercase_ = pipe.generate_mask(
image=self.raw_image , source_prompt=UpperCamelCase__ , target_prompt=UpperCamelCase__ , generator=UpperCamelCase__ , )
lowercase_ = pipe.invert(
prompt=UpperCamelCase__ , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCamelCase__ ).latents
lowercase_ = pipe(
prompt=UpperCamelCase__ , mask_image=UpperCamelCase__ , image_latents=UpperCamelCase__ , generator=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , inpaint_strength=0.7 , output_type="""numpy""" , ).images[0]
lowercase_ = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = torch.manual_seed(0 )
lowercase_ = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=UpperCamelCase__ , torch_dtype=torch.floataa )
lowercase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowercase_ = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = """a bowl of fruit"""
lowercase_ = """a bowl of pears"""
lowercase_ = pipe.generate_mask(
image=self.raw_image , source_prompt=UpperCamelCase__ , target_prompt=UpperCamelCase__ , generator=UpperCamelCase__ , )
lowercase_ = pipe.invert(
prompt=UpperCamelCase__ , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCamelCase__ , num_inference_steps=25 , ).latents
lowercase_ = pipe(
prompt=UpperCamelCase__ , mask_image=UpperCamelCase__ , image_latents=UpperCamelCase__ , generator=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , inpaint_strength=0.7 , num_inference_steps=25 , output_type="""numpy""" , ).images[0]
lowercase_ = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 650
| 1
|
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
@slow
@require_torch
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny' )
UpperCamelCase_ = BertTokenizer.from_pretrained('bert-base-uncased' )
UpperCamelCase_ = bertabert.config.encoder.vocab_size
UpperCamelCase_ = tokenizer.sep_token_id
UpperCamelCase_ = tokenizer.cls_token_id
UpperCamelCase_ = 128
UpperCamelCase_ = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]' )
UpperCamelCase_ = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]' )
UpperCamelCase_ = train_dataset.select(range(32 ) )
UpperCamelCase_ = val_dataset.select(range(16 ) )
UpperCamelCase_ = 4
def _map_to_encoder_decoder_inputs(_UpperCAmelCase ):
# Tokenizer will automatically set [BOS] <text> [EOS]
UpperCamelCase_ = tokenizer(batch['article'] , padding='max_length' , truncation=_UpperCAmelCase , max_length=512 )
UpperCamelCase_ = tokenizer(batch['highlights'] , padding='max_length' , truncation=_UpperCAmelCase , max_length=128 )
UpperCamelCase_ = inputs.input_ids
UpperCamelCase_ = inputs.attention_mask
UpperCamelCase_ = outputs.input_ids
UpperCamelCase_ = outputs.input_ids.copy()
UpperCamelCase_ = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels']
]
UpperCamelCase_ = outputs.attention_mask
assert all(len(_UpperCAmelCase ) == 512 for x in inputs.input_ids )
assert all(len(_UpperCAmelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_UpperCAmelCase ):
UpperCamelCase_ = pred.label_ids
UpperCamelCase_ = pred.predictions
# all unnecessary tokens are removed
UpperCamelCase_ = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
UpperCamelCase_ = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
UpperCamelCase_ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_UpperCAmelCase ) )] ) / len(_UpperCAmelCase )
return {"accuracy": accuracy}
# map train dataset
UpperCamelCase_ = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_UpperCAmelCase , batch_size=_UpperCAmelCase , remove_columns=['article', 'highlights'] , )
train_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
# same for validation dataset
UpperCamelCase_ = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_UpperCAmelCase , batch_size=_UpperCAmelCase , remove_columns=['article', 'highlights'] , )
val_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
UpperCamelCase_ = self.get_auto_remove_tmp_dir()
UpperCamelCase_ = SeqaSeqTrainingArguments(
output_dir=_UpperCAmelCase , per_device_train_batch_size=_UpperCAmelCase , per_device_eval_batch_size=_UpperCAmelCase , predict_with_generate=_UpperCAmelCase , evaluation_strategy='steps' , do_train=_UpperCAmelCase , do_eval=_UpperCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
UpperCamelCase_ = SeqaSeqTrainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , compute_metrics=_compute_metrics , train_dataset=_UpperCAmelCase , eval_dataset=_UpperCAmelCase , tokenizer=_UpperCAmelCase , )
# start training
trainer.train()
| 23
|
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
_lowerCAmelCase : List[Any] = '\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n'
_lowerCAmelCase : int = '\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n'
_lowerCAmelCase : int = '\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n'
_lowerCAmelCase : Optional[int] = '\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n'
_lowerCAmelCase : Dict = 'The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Value('string' ),
} ) , homepage='https://github.com/openai/human-eval' , codebase_urls=['https://github.com/openai/human-eval'] , reference_urls=['https://github.com/openai/human-eval'] , license=_LICENSE , )
def lowerCamelCase__ ( self : int , __snake_case : List[Any] , __snake_case : int , __snake_case : Any=[1, 10, 100] , __snake_case : str=4 , __snake_case : List[Any]=3.0 ) -> Union[str, Any]:
'''simple docstring'''
if os.getenv('HF_ALLOW_CODE_EVAL' , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('This metric is currently not supported on Windows.' )
with ThreadPoolExecutor(max_workers=__snake_case ) as executor:
lowerCamelCase = []
lowerCamelCase = Counter()
lowerCamelCase = 0
lowerCamelCase = defaultdict(__snake_case )
for task_id, (candidates, test_case) in enumerate(zip(__snake_case , __snake_case ) ):
for candidate in candidates:
lowerCamelCase = candidate + '\n' + test_case
lowerCamelCase = (test_program, timeout, task_id, completion_id[task_id])
lowerCamelCase = executor.submit(__snake_case , *__snake_case )
futures.append(__snake_case )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(__snake_case ):
lowerCamelCase = future.result()
results[result["task_id"]].append((result['completion_id'], result) )
lowerCamelCase , lowerCamelCase = [], []
for result in results.values():
result.sort()
lowerCamelCase = [r[1]['passed'] for r in result]
total.append(len(__snake_case ) )
correct.append(sum(__snake_case ) )
lowerCamelCase = np.array(__snake_case )
lowerCamelCase = np.array(__snake_case )
lowerCamelCase = k
lowerCamelCase = {F'''pass@{k}''': estimate_pass_at_k(__snake_case , __snake_case , __snake_case ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def a_ ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> Optional[Any]:
"""simple docstring"""
def estimator(UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowerCamelCase = itertools.repeat(UpperCamelCase_ , len(UpperCamelCase_ ) )
else:
assert len(UpperCamelCase_ ) == len(UpperCamelCase_ )
lowerCamelCase = iter(UpperCamelCase_ )
return np.array([estimator(int(UpperCamelCase_ ) , int(UpperCamelCase_ ) , UpperCamelCase_ ) for n, c in zip(UpperCamelCase_ , UpperCamelCase_ )] )
| 246
| 0
|
"""simple docstring"""
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
a : Optional[Any] = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
a : List[str] = direct_transformers_import(PATH_TO_TRANSFORMERS)
a : Union[str, Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
a : List[Any] = {
# used to compute the property `self.chunk_length`
"""EncodecConfig""": ["""overlap"""],
# used as `self.bert_model = BertModel(config, ...)`
"""DPRConfig""": True,
# not used in modeling files, but it's an important information
"""FSMTConfig""": ["""langs"""],
# used internally in the configuration class file
"""GPTNeoConfig""": ["""attention_types"""],
# used internally in the configuration class file
"""EsmConfig""": ["""is_folding_model"""],
# used during training (despite we don't have training script for these models yet)
"""Mask2FormerConfig""": ["""ignore_value"""],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"""OneFormerConfig""": ["""ignore_value""", """norm"""],
# used during preprocessing and collation, see `collating_graphormer.py`
"""GraphormerConfig""": ["""spatial_pos_max"""],
# used internally in the configuration class file
"""T5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"""MT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
"""UMT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
# used internally in the configuration class file
"""LongT5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
"""SwitchTransformersConfig""": ["""feed_forward_proj"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""BioGptConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""GLPNConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""SegformerConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""CvtConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""PerceiverConfig""": ["""layer_norm_eps"""],
# used internally to calculate the feature size
"""InformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""TimeSeriesTransformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""AutoformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate `mlp_dim`
"""SamVisionConfig""": ["""mlp_ratio"""],
# For (head) training, but so far not implemented
"""ClapAudioConfig""": ["""num_classes"""],
# Not used, but providing useful information to users
"""SpeechT5HifiGanConfig""": ["""sampling_rate"""],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"""CLIPSegConfig""": True,
"""DeformableDetrConfig""": True,
"""DetaConfig""": True,
"""DinatConfig""": True,
"""DonutSwinConfig""": True,
"""EfficientFormerConfig""": True,
"""FSMTConfig""": True,
"""JukeboxConfig""": True,
"""LayoutLMv2Config""": True,
"""MaskFormerSwinConfig""": True,
"""MT5Config""": True,
"""NatConfig""": True,
"""OneFormerConfig""": True,
"""PerceiverConfig""": True,
"""RagConfig""": True,
"""SpeechT5Config""": True,
"""SwinConfig""": True,
"""Swin2SRConfig""": True,
"""Swinv2Config""": True,
"""SwitchTransformersConfig""": True,
"""TableTransformerConfig""": True,
"""TapasConfig""": True,
"""TransfoXLConfig""": True,
"""UniSpeechConfig""": True,
"""UniSpeechSatConfig""": True,
"""WavLMConfig""": True,
"""WhisperConfig""": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"""JukeboxPriorConfig""": True,
# TODO: @Younes (for `is_decoder`)
"""Pix2StructTextConfig""": True,
}
)
def lowercase__(A , A , A , A ) ->Dict:
"""simple docstring"""
lowercase__ : Optional[Any]= False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f'''config.{attribute}''' in modeling_source
or f'''getattr(config, "{attribute}"''' in modeling_source
or f'''getattr(self.config, "{attribute}"''' in modeling_source
):
lowercase__ : Optional[int]= True
# Deal with multi-line cases
elif (
re.search(
Rf'''getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"''' , A , )
is not None
):
lowercase__ : Tuple= True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
lowercase__ : int= True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
lowercase__ : Optional[int]= [
"bos_index",
"eos_index",
"pad_index",
"unk_index",
"mask_index",
"image_size",
"use_cache",
"out_features",
"out_indices",
]
lowercase__ : List[Any]= ["encoder_no_repeat_ngram_size"]
# Special cases to be allowed
lowercase__ : Any= True
if not attribute_used:
lowercase__ : List[str]= False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
lowercase__ : Dict= True
elif attribute in ["tie_word_embeddings"] and default_value is False:
lowercase__ : Dict= True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
lowercase__ : str= True
elif attribute.endswith("_token_id" ):
lowercase__ : Optional[Any]= True
# configuration class specific cases
if not case_allowed:
lowercase__ : Optional[Any]= SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
lowercase__ : Dict= allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def lowercase__(A ) ->int:
"""simple docstring"""
lowercase__ : Optional[int]= dict(inspect.signature(config_class.__init__ ).parameters )
lowercase__ : List[str]= [x for x in list(signature.keys() ) if x not in ["self", "kwargs"]]
lowercase__ : str= [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
lowercase__ : str= {}
if len(config_class.attribute_map ) > 0:
lowercase__ : str= {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
lowercase__ : int= inspect.getsourcefile(A )
lowercase__ : Any= os.path.dirname(A )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
lowercase__ : str= [os.path.join(A , A ) for fn in os.listdir(A ) if fn.startswith("modeling_" )]
# Get the source code strings
lowercase__ : Tuple= []
for path in modeling_paths:
if os.path.isfile(A ):
with open(A ) as fp:
modeling_sources.append(fp.read() )
lowercase__ : List[Any]= []
for config_param, default_value in zip(A , A ):
# `attributes` here is all the variant names for `config_param`
lowercase__ : Dict= [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(A , A , A , A ):
unused_attributes.append(attributes[0] )
return sorted(A )
def lowercase__() ->Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[Any]= {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
lowercase__ : List[str]= [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda A : inspect.isclass(A )
and issubclass(A , A )
and inspect.getmodule(A ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
lowercase__ : List[Any]= check_config_attributes_being_used(A )
if len(A ) > 0:
lowercase__ : List[Any]= unused_attributes
if len(A ) > 0:
lowercase__ : List[str]= "The following configuration classes contain unused attributes in the corresponding modeling files:\n"
for name, attributes in configs_with_unused_attributes.items():
error += f'''{name}: {attributes}\n'''
raise ValueError(A )
if __name__ == "__main__":
check_config_attributes()
| 85
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = None
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = 2
@register_to_config
def __init__( self , snake_case__ = 0.02 , snake_case__ = 100 , snake_case__ = 1.0_07 , snake_case__ = 80 , snake_case__ = 0.05 , snake_case__ = 50 , ):
'''simple docstring'''
# standard deviation of the initial noise distribution
lowercase__ : int= sigma_max
# setable values
lowercase__ : int= None
lowercase__ : np.IntTensor= None
lowercase__ : torch.FloatTensor= None # sigma(t_i)
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
return sample
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
lowercase__ : List[Any]= num_inference_steps
lowercase__ : Any= np.arange(0 , self.num_inference_steps )[::-1].copy()
lowercase__ : Tuple= torch.from_numpy(snake_case__ ).to(snake_case__ )
lowercase__ : Union[str, Any]= [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
lowercase__ : int= torch.tensor(snake_case__ , dtype=torch.floataa , device=snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
lowercase__ : Optional[Any]= min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
lowercase__ : str= 0
# sample eps ~ N(0, S_noise^2 * I)
lowercase__ : List[Any]= self.config.s_noise * randn_tensor(sample.shape , generator=snake_case__ ).to(sample.device )
lowercase__ : str= sigma + gamma * sigma
lowercase__ : Any= sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = True , ):
'''simple docstring'''
lowercase__ : Union[str, Any]= sample_hat + sigma_hat * model_output
lowercase__ : Optional[int]= (sample_hat - pred_original_sample) / sigma_hat
lowercase__ : Optional[Any]= sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=snake_case__ , derivative=snake_case__ , pred_original_sample=snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = True , ):
'''simple docstring'''
lowercase__ : int= sample_prev + sigma_prev * model_output
lowercase__ : Optional[int]= (sample_prev - pred_original_sample) / sigma_prev
lowercase__ : Optional[Any]= sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=snake_case__ , derivative=snake_case__ , pred_original_sample=snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
raise NotImplementedError()
| 85
| 1
|
# Function to print upper half of diamond (pyramid)
def _snake_case (__lowercase):
for i in range(0 , __lowercase):
for _ in range(0 , n - i - 1): # printing spaces
print(' ' , end='')
for _ in range(0 , i + 1): # printing stars
print('* ' , end='')
print()
def _snake_case (__lowercase):
for i in range(__lowercase , 0 , -1):
for _ in range(__lowercase , 0 , -1): # printing stars
print('* ' , end='')
print()
for _ in range(n - i + 1 , 0 , -1): # printing spaces
print(' ' , end='')
def _snake_case (__lowercase):
if n <= 0:
print(' ... .... nothing printing :(')
return
floyd(__lowercase) # upper half
reverse_floyd(__lowercase) # lower half
if __name__ == "__main__":
print(R"""| /\ | |- | |- |--| |\ /| |-""")
print(R"""|/ \| |- |_ |_ |__| | \/ | |_""")
snake_case__ : Dict = 1
while K:
snake_case__ : Tuple = int(input("""enter the number and , and see the magic : """))
print()
pretty_print(user_number)
snake_case__ : List[Any] = int(input("""press 0 to exit... and 1 to continue..."""))
print("""Good Bye...""")
| 23
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : jnp.ndarray
@flax_register_to_config
class __A ( nn.Module , lowerCAmelCase , lowerCAmelCase ):
lowerCAmelCase_ : int = 32
lowerCAmelCase_ : int = 4
lowerCAmelCase_ : int = 4
lowerCAmelCase_ : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
lowerCAmelCase_ : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
lowerCAmelCase_ : Union[bool, Tuple[bool]] = False
lowerCAmelCase_ : Tuple[int] = (320, 640, 1280, 1280)
lowerCAmelCase_ : int = 2
lowerCAmelCase_ : Union[int, Tuple[int]] = 8
lowerCAmelCase_ : Optional[Union[int, Tuple[int]]] = None
lowerCAmelCase_ : int = 1280
lowerCAmelCase_ : float = 0.0
lowerCAmelCase_ : bool = False
lowerCAmelCase_ : jnp.dtype = jnp.floataa
lowerCAmelCase_ : bool = True
lowerCAmelCase_ : int = 0
lowerCAmelCase_ : bool = False
def lowercase__ ( self : Optional[int] , UpperCAmelCase_ : jax.random.KeyArray ):
# init input tensors
lowerCAmelCase : List[str] = (1, self.in_channels, self.sample_size, self.sample_size)
lowerCAmelCase : int = jnp.zeros(UpperCAmelCase_ , dtype=jnp.floataa )
lowerCAmelCase : Optional[Any] = jnp.ones((1,) , dtype=jnp.intaa )
lowerCAmelCase : int = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowerCAmelCase , lowerCAmelCase : Tuple = jax.random.split(UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = {'params': params_rng, 'dropout': dropout_rng}
return self.init(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )["params"]
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : Tuple = self.block_out_channels
lowerCAmelCase : Optional[Any] = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowerCAmelCase : Union[str, Any] = self.num_attention_heads or self.attention_head_dim
# input
lowerCAmelCase : Optional[Any] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowerCAmelCase : int = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowerCAmelCase : Tuple = FlaxTimestepEmbedding(UpperCAmelCase_ , dtype=self.dtype )
lowerCAmelCase : Union[str, Any] = self.only_cross_attention
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase : Optional[int] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase : Dict = (num_attention_heads,) * len(self.down_block_types )
# down
lowerCAmelCase : Optional[Any] = []
lowerCAmelCase : Dict = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
lowerCAmelCase : Tuple = output_channel
lowerCAmelCase : Optional[int] = block_out_channels[i]
lowerCAmelCase : int = i == len(UpperCAmelCase_ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowerCAmelCase : str = FlaxCrossAttnDownBlockaD(
in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
lowerCAmelCase : Any = FlaxDownBlockaD(
in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = down_blocks
# mid
lowerCAmelCase : Union[str, Any] = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
lowerCAmelCase : Union[str, Any] = []
lowerCAmelCase : int = list(reversed(UpperCAmelCase_ ) )
lowerCAmelCase : Dict = list(reversed(UpperCAmelCase_ ) )
lowerCAmelCase : Optional[int] = list(reversed(UpperCAmelCase_ ) )
lowerCAmelCase : str = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
lowerCAmelCase : Union[str, Any] = output_channel
lowerCAmelCase : Dict = reversed_block_out_channels[i]
lowerCAmelCase : int = reversed_block_out_channels[min(i + 1 , len(UpperCAmelCase_ ) - 1 )]
lowerCAmelCase : Optional[Any] = i == len(UpperCAmelCase_ ) - 1
if up_block_type == "CrossAttnUpBlock2D":
lowerCAmelCase : List[str] = FlaxCrossAttnUpBlockaD(
in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , prev_output_channel=UpperCAmelCase_ , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
lowerCAmelCase : Tuple = FlaxUpBlockaD(
in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , prev_output_channel=UpperCAmelCase_ , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = output_channel
lowerCAmelCase : Any = up_blocks
# out
lowerCAmelCase : Optional[Any] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
lowerCAmelCase : Optional[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = False , ):
# 1. time
if not isinstance(UpperCAmelCase_ , jnp.ndarray ):
lowerCAmelCase : Any = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(UpperCAmelCase_ , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowerCAmelCase : List[str] = timesteps.astype(dtype=jnp.floataa )
lowerCAmelCase : List[Any] = jnp.expand_dims(UpperCAmelCase_ , 0 )
lowerCAmelCase : Tuple = self.time_proj(UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = self.time_embedding(UpperCAmelCase_ )
# 2. pre-process
lowerCAmelCase : str = jnp.transpose(UpperCAmelCase_ , (0, 2, 3, 1) )
lowerCAmelCase : Optional[Any] = self.conv_in(UpperCAmelCase_ )
# 3. down
lowerCAmelCase : int = (sample,)
for down_block in self.down_blocks:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase , lowerCAmelCase : Optional[int] = down_block(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , deterministic=not train )
else:
lowerCAmelCase , lowerCAmelCase : int = down_block(UpperCAmelCase_ , UpperCAmelCase_ , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
lowerCAmelCase : Union[str, Any] = ()
for down_block_res_sample, down_block_additional_residual in zip(
UpperCAmelCase_ , UpperCAmelCase_ ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
lowerCAmelCase : Dict = new_down_block_res_samples
# 4. mid
lowerCAmelCase : List[Any] = self.mid_block(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
lowerCAmelCase : Optional[Any] = down_block_res_samples[-(self.layers_per_block + 1) :]
lowerCAmelCase : str = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase : List[str] = up_block(
UpperCAmelCase_ , temb=UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , res_hidden_states_tuple=UpperCAmelCase_ , deterministic=not train , )
else:
lowerCAmelCase : int = up_block(UpperCAmelCase_ , temb=UpperCAmelCase_ , res_hidden_states_tuple=UpperCAmelCase_ , deterministic=not train )
# 6. post-process
lowerCAmelCase : Optional[Any] = self.conv_norm_out(UpperCAmelCase_ )
lowerCAmelCase : Dict = nn.silu(UpperCAmelCase_ )
lowerCAmelCase : Dict = self.conv_out(UpperCAmelCase_ )
lowerCAmelCase : List[str] = jnp.transpose(UpperCAmelCase_ , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=UpperCAmelCase_ )
| 343
| 0
|
'''simple docstring'''
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
__lowerCamelCase = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def UpperCAmelCase ( UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int]=None):
lowerCamelCase : Union[str, Any] = XLNetConfig.from_json_file(lowerCamelCase__)
lowerCamelCase : Dict = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F'''Building PyTorch XLNetForSequenceClassification model from configuration: {config}''')
lowerCamelCase : int = finetuning_task
lowerCamelCase : int = GLUE_TASKS_NUM_LABELS[finetuning_task]
lowerCamelCase : List[str] = XLNetForSequenceClassification(lowerCamelCase__)
elif "squad" in finetuning_task:
lowerCamelCase : Optional[Any] = finetuning_task
lowerCamelCase : Dict = XLNetForQuestionAnswering(lowerCamelCase__)
else:
lowerCamelCase : Tuple = XLNetLMHeadModel(lowerCamelCase__)
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__)
# Save pytorch-model
lowerCamelCase : int = os.path.join(lowerCamelCase__ , lowerCamelCase__)
lowerCamelCase : List[str] = os.path.join(lowerCamelCase__ , lowerCamelCase__)
print(F'''Save PyTorch model to {os.path.abspath(lowerCamelCase__)}''')
torch.save(model.state_dict() , lowerCamelCase__)
print(F'''Save configuration file to {os.path.abspath(lowerCamelCase__)}''')
with open(lowerCamelCase__ , 'w' , encoding='utf-8') as f:
f.write(config.to_json_string())
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--xlnet_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained XLNet model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--finetuning_task',
default=None,
type=str,
help='Name of a task on which the XLNet TensorFlow model was fine-tuned',
)
__lowerCamelCase = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 710
|
'''simple docstring'''
from __future__ import annotations
A = '#'
class __snake_case :
def __init__( self ):
"""simple docstring"""
lowerCamelCase : dict = {}
def UpperCAmelCase_ ( self, A ):
"""simple docstring"""
lowerCamelCase : int = self._trie
for char in text:
if char not in trie:
lowerCamelCase : Dict = {}
lowerCamelCase : Optional[int] = trie[char]
lowerCamelCase : Optional[Any] = True
def UpperCAmelCase_ ( self, A ):
"""simple docstring"""
lowerCamelCase : Dict = self._trie
for char in prefix:
if char in trie:
lowerCamelCase : int = trie[char]
else:
return []
return self._elements(A )
def UpperCAmelCase_ ( self, A ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = []
for c, v in d.items():
lowerCamelCase : Optional[Any] = [' '] if c == END else [(c + s) for s in self._elements(A )]
result.extend(A )
return tuple(A )
A = Trie()
A = ('depart', 'detergent', 'daring', 'dog', 'deer', 'deal')
for word in words:
trie.insert_word(word)
def UpperCAmelCase ( UpperCAmelCase__ : str):
lowerCamelCase : Any = trie.find_word(UpperCAmelCase__)
return tuple(string + word for word in suffixes)
def UpperCAmelCase ( ):
print(autocomplete_using_trie('de'))
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 449
| 0
|
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A :
'''simple docstring'''
def __init__(self : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any]=13 , _UpperCAmelCase : Dict=[30, 30] , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : Optional[Any]=3 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Optional[int]=32 , _UpperCAmelCase : List[Any]=5 , _UpperCAmelCase : int=4 , _UpperCAmelCase : Optional[Any]=37 , _UpperCAmelCase : Union[str, Any]="gelu" , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : List[str]=10 , _UpperCAmelCase : List[str]=0.02 , _UpperCAmelCase : str=3 , _UpperCAmelCase : int=None , _UpperCAmelCase : List[Any]=8 , _UpperCAmelCase : Dict=10 , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_labels
lowercase__ = scope
lowercase__ = n_targets
lowercase__ = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
lowercase__ = (image_size[1] // patch_size) * (image_size[0] // patch_size)
lowercase__ = num_patches + 1 + self.num_detection_tokens
def lowerCamelCase__ (self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
lowercase__ = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
lowercase__ = []
for i in range(self.batch_size ):
lowercase__ = {}
lowercase__ = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=_UpperCAmelCase )
lowercase__ = torch.rand(self.n_targets , 4 , device=_UpperCAmelCase )
labels.append(_UpperCAmelCase )
lowercase__ = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ (self : Tuple ) -> List[Any]:
"""simple docstring"""
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def lowerCamelCase__ (self : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = YolosModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def lowerCamelCase__ (self : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : int ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = YolosForObjectDetection(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(pixel_values=_UpperCAmelCase )
lowercase__ = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
lowercase__ = model(pixel_values=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def lowerCamelCase__ (self : str ) -> Any:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
A__ = (
{'''feature-extraction''': YolosModel, '''object-detection''': YolosForObjectDetection} if is_torch_available() else {}
)
A__ = False
A__ = False
A__ = False
A__ = False
def lowerCamelCase__ (self : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : int=False ) -> str:
"""simple docstring"""
lowercase__ = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
lowercase__ = []
for i in range(self.model_tester.batch_size ):
lowercase__ = {}
lowercase__ = torch.ones(
size=(self.model_tester.n_targets,) , device=_UpperCAmelCase , dtype=torch.long )
lowercase__ = torch.ones(
self.model_tester.n_targets , 4 , device=_UpperCAmelCase , dtype=torch.float )
labels.append(_UpperCAmelCase )
lowercase__ = labels
return inputs_dict
def lowerCamelCase__ (self : Dict ) -> Tuple:
"""simple docstring"""
lowercase__ = YolosModelTester(self )
lowercase__ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def lowerCamelCase__ (self : Dict ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase__ (self : Optional[Any] ) -> List[str]:
"""simple docstring"""
pass
def lowerCamelCase__ (self : Tuple ) -> str:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def lowerCamelCase__ (self : Dict ) -> Optional[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(_UpperCAmelCase )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def lowerCamelCase__ (self : Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowerCamelCase__ (self : Dict ) -> List[str]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = True
# in YOLOS, the seq_len is different
lowercase__ = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
lowercase__ = True
lowercase__ = False
lowercase__ = True
lowercase__ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
lowercase__ = outputs.attentions
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ = True
lowercase__ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
lowercase__ = outputs.attentions
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowercase__ = len(_UpperCAmelCase )
# Check attention is always last and order is fine
lowercase__ = True
lowercase__ = True
lowercase__ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
lowercase__ = 1
self.assertEqual(out_len + added_hidden_states , len(_UpperCAmelCase ) )
lowercase__ = outputs.attentions
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def lowerCamelCase__ (self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
def check_hidden_states_output(_UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple , _UpperCAmelCase : Any ):
lowercase__ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
lowercase__ = outputs.hidden_states
lowercase__ = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# YOLOS has a different seq_length
lowercase__ = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase__ (self : Any ) -> List[str]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*_UpperCAmelCase )
@slow
def lowerCamelCase__ (self : str ) -> Dict:
"""simple docstring"""
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = YolosModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
lowercase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCamelCase__ (self : List[str] ) -> Optional[Any]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""hustvl/yolos-small""" ) if is_vision_available() else None
@slow
def lowerCamelCase__ (self : List[str] ) -> List[Any]:
"""simple docstring"""
lowercase__ = YolosForObjectDetection.from_pretrained("""hustvl/yolos-small""" ).to(_UpperCAmelCase )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=_UpperCAmelCase , return_tensors="""pt""" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
lowercase__ = model(inputs.pixel_values )
# verify outputs
lowercase__ = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
lowercase__ = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] , device=_UpperCAmelCase , )
lowercase__ = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] , device=_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _UpperCAmelCase , atol=1E-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , _UpperCAmelCase , atol=1E-4 ) )
# verify postprocessing
lowercase__ = image_processor.post_process_object_detection(
_UpperCAmelCase , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
lowercase__ = torch.tensor([0.9_994, 0.9_790, 0.9_964, 0.9_972, 0.9_861] ).to(_UpperCAmelCase )
lowercase__ = [75, 75, 17, 63, 17]
lowercase__ = torch.tensor([335.0_609, 79.3_848, 375.4_216, 187.2_495] ).to(_UpperCAmelCase )
self.assertEqual(len(results["""scores"""] ) , 5 )
self.assertTrue(torch.allclose(results["""scores"""] , _UpperCAmelCase , atol=1E-4 ) )
self.assertSequenceEqual(results["""labels"""].tolist() , _UpperCAmelCase )
self.assertTrue(torch.allclose(results["""boxes"""][0, :] , _UpperCAmelCase ) )
| 15
|
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : list[int] ):
'''simple docstring'''
if not nums: # Makes sure that the list is not empty
raise ValueError("""List is empty""" )
A: Tuple = sum(lowerCamelCase__ ) / len(lowerCamelCase__ ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(lowerCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 135
| 0
|
'''simple docstring'''
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
lowerCAmelCase__ = logging.getLogger(__name__)
lowerCAmelCase__ = 50 # max width of layer names
lowerCAmelCase__ = 70 # max width of quantizer names
def _A ( A__ ):
"""simple docstring"""
__lowercase = parser.add_argument_group('''quant_trainer arguments''' )
group.add_argument('''--wprec''' , type=A__ , default=8 , help='''weight precision''' )
group.add_argument('''--aprec''' , type=A__ , default=8 , help='''activation precision''' )
group.add_argument('''--quant-per-tensor''' , action='''store_true''' , help='''per tensor weight scaling''' )
group.add_argument('''--quant-disable''' , action='''store_true''' , help='''disable all quantizers''' )
group.add_argument('''--quant-disable-embeddings''' , action='''store_true''' , help='''disable all embeddings quantizers''' )
group.add_argument('''--quant-disable-keyword''' , type=A__ , nargs='''+''' , help='''disable quantizers by keyword''' )
group.add_argument('''--quant-disable-layer-module''' , type=A__ , help='''disable quantizers by keyword under layer.''' )
group.add_argument('''--quant-enable-layer-module''' , type=A__ , help='''enable quantizers by keyword under layer''' )
group.add_argument('''--calibrator''' , default='''max''' , help='''which quantization range calibrator to use''' )
group.add_argument('''--percentile''' , default=A__ , type=A__ , help='''percentile for PercentileCalibrator''' )
group.add_argument('''--fuse-qkv''' , action='''store_true''' , help='''use the same scale factor for qkv''' )
group.add_argument('''--clip-gelu''' , metavar='''N''' , type=A__ , help='''clip gelu output maximum value to N''' )
group.add_argument(
'''--recalibrate-weights''' , action='''store_true''' , help=(
'''recalibrate weight amaxes by taking the max of the weights.'''
''' amaxes will be computed with the current quantization granularity (axis).'''
) , )
def _A ( A__ ):
"""simple docstring"""
if args.calibrator == "max":
__lowercase = '''max'''
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('''Specify --percentile when using percentile calibrator''' )
__lowercase = '''histogram'''
elif args.calibrator == "mse":
__lowercase = '''histogram'''
else:
raise ValueError(F"Invalid calibrator {args.calibrator}" )
__lowercase = QuantDescriptor(num_bits=args.aprec , calib_method=A__ )
__lowercase = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(A__ )
quant_nn.QuantLinear.set_default_quant_desc_weight(A__ )
def _A ( A__ , A__ , A__=False , A__=False ):
"""simple docstring"""
logger.info('''Configuring Model for Quantization''' )
logger.info(F"using quantization package {pytorch_quantization.__file__}" )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(A__ , ['''embeddings'''] , which='''weight''' , _disabled=A__ )
if args.quant_disable:
set_quantizer_by_name(A__ , [''''''] , _disabled=A__ )
if args.quant_disable_keyword:
set_quantizer_by_name(A__ , args.quant_disable_keyword , _disabled=A__ )
if args.quant_disable_layer_module:
set_quantizer_by_name(A__ , [R'''layer.\d+.''' + args.quant_disable_layer_module] , _disabled=A__ )
if args.quant_enable_layer_module:
set_quantizer_by_name(A__ , [R'''layer.\d+.''' + args.quant_enable_layer_module] , _disabled=A__ )
if args.recalibrate_weights:
recalibrate_weights(A__ )
if args.fuse_qkv:
fuse_qkv(A__ , A__ )
if args.clip_gelu:
clip_gelu(A__ , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(A__ )
def _A ( A__ ):
"""simple docstring"""
logger.info('''Enabling Calibration''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F"{name:80}: {module}" )
def _A ( A__ , A__ ):
"""simple docstring"""
logger.info('''Loading calibrated amax''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('''percentile''' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(A__ )
def _A ( A__ , A__ ):
"""simple docstring"""
def fusea(A__ , A__ , A__ ):
for mod in [qq, qk, qv]:
if not hasattr(A__ , '''_amax''' ):
print(''' WARNING: NO AMAX BUFFER''' )
return
__lowercase = qq._amax.detach().item()
__lowercase = qk._amax.detach().item()
__lowercase = qv._amax.detach().item()
__lowercase = max(A__ , A__ , A__ )
qq._amax.fill_(A__ )
qk._amax.fill_(A__ )
qv._amax.fill_(A__ )
logger.info(F" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}" )
for name, mod in model.named_modules():
if name.endswith('''.attention.self''' ):
logger.info(F"FUSE_QKV: {name:{name_width}}" )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def _A ( A__ , A__ ):
"""simple docstring"""
for name, mod in model.named_modules():
if name.endswith('''.output.dense''' ) and not name.endswith('''attention.output.dense''' ):
__lowercase = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=A__ )
__lowercase = mod._input_quantizer._amax.data.detach().item()
logger.info(F"CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}" )
def _A ( A__ ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(A__ , '''_weight_quantizer''' ) and mod._weight_quantizer.axis is not None:
__lowercase = mod.weight.shape[0]
__lowercase = mod._weight_quantizer._amax.detach()
__lowercase = torch.ones(A__ , dtype=amax.dtype , device=amax.device ) * amax
print(F"expanding {name} {amax} -> {mod._weight_quantizer._amax}" )
def _A ( A__ ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(A__ , '''_weight_quantizer''' ):
if not hasattr(mod.weight_quantizer , '''_amax''' ):
print('''RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER''' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
__lowercase = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
__lowercase = set(range(len(mod.weight.size() ) ) ) - axis_set
__lowercase = pytorch_quantization.utils.reduce_amax(mod.weight , axis=A__ , keepdims=A__ ).detach()
logger.info(F"RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}" )
__lowercase = amax
def _A ( A__ , A__=25 , A__=180 , A__=None ):
"""simple docstring"""
if ignore is None:
__lowercase = []
elif not isinstance(A__ , A__ ):
__lowercase = [ignore]
__lowercase = 0
for name, mod in model.named_modules():
if not hasattr(A__ , '''weight''' ):
continue
__lowercase = max(A__ , len(A__ ) )
for name, mod in model.named_modules():
__lowercase = getattr(A__ , '''_input_quantizer''' , A__ )
__lowercase = getattr(A__ , '''_weight_quantizer''' , A__ )
if not hasattr(A__ , '''weight''' ):
continue
if type(A__ ) in ignore:
continue
if [True for s in ignore if type(A__ ) is str and s in name]:
continue
__lowercase = F"Act:{input_q.extra_repr()}"
__lowercase = F"Wgt:{weight_q.extra_repr()}"
__lowercase = F"{name:{name_width}} {act_str} {wgt_str}"
if len(A__ ) <= line_width:
logger.info(A__ )
else:
logger.info(F"{name:{name_width}} {act_str}" )
logger.info(F"{' ':{name_width}} {wgt_str}" )
def _A ( A__ ):
"""simple docstring"""
__lowercase = 0
for name, mod in model.named_modules():
if isinstance(A__ , pytorch_quantization.nn.TensorQuantizer ):
print(F"{name:80} {mod}" )
count += 1
print(F"{count} TensorQuantizers found in model" )
def _A ( A__ , A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = getattr(A__ , A__ , A__ )
if quantizer_mod is not None:
assert hasattr(A__ , A__ )
setattr(A__ , A__ , A__ )
else:
logger.warning(F"{name} has no {quantizer}" )
def _A ( A__ , A__ , A__="both" , **A__ ):
"""simple docstring"""
__lowercase = F"Warning: changing {which} quantizers of {name:{qname_width}}"
for k, v in kwargs.items():
s += F" {k}={v}"
if which in ["input", "both"]:
set_quantizer(A__ , A__ , '''_input_quantizer''' , A__ , A__ )
if which in ["weight", "both"]:
set_quantizer(A__ , A__ , '''_weight_quantizer''' , A__ , A__ )
logger.info(A__ )
def _A ( A__ , A__ , **A__ ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(A__ , '''_input_quantizer''' ) or hasattr(A__ , '''_weight_quantizer''' ):
for n in names:
if re.search(A__ , A__ ):
set_quantizers(A__ , A__ , **A__ )
elif name.endswith('''_quantizer''' ):
for n in names:
if re.search(A__ , A__ ):
__lowercase = F"Warning: changing {name:{name_width}}"
for k, v in kwargs.items():
s += F" {k}={v}"
setattr(A__ , A__ , A__ )
logger.info(A__ )
| 711
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''sentencepiece.model'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
}
lowerCAmelCase__ = {
'''google/rembert''': 256,
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : str ,lowercase__ : Optional[Any] ,lowercase__ : List[str]=False ,lowercase__ : Dict=True ,lowercase__ : List[str]=True ,lowercase__ : Dict="[CLS]" ,lowercase__ : Union[str, Any]="[SEP]" ,lowercase__ : List[str]="[UNK]" ,lowercase__ : int="[SEP]" ,lowercase__ : List[str]="[PAD]" ,lowercase__ : Optional[int]="[CLS]" ,lowercase__ : List[Any]="[MASK]" ,**lowercase__ : int ,):
super().__init__(
do_lower_case=lowercase__ ,remove_space=lowercase__ ,keep_accents=lowercase__ ,bos_token=lowercase__ ,eos_token=lowercase__ ,unk_token=lowercase__ ,sep_token=lowercase__ ,pad_token=lowercase__ ,cls_token=lowercase__ ,mask_token=lowercase__ ,**lowercase__ ,)
__lowercase = do_lower_case
__lowercase = remove_space
__lowercase = keep_accents
__lowercase = vocab_file
__lowercase = spm.SentencePieceProcessor()
self.sp_model.Load(lowercase__ )
@property
def SCREAMING_SNAKE_CASE ( self : str ):
return len(self.sp_model )
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = {self.convert_ids_to_tokens(lowercase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[Any] ):
__lowercase = self.__dict__.copy()
__lowercase = None
return state
def __setstate__( self : str ,lowercase__ : Optional[int] ):
__lowercase = d
__lowercase = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : List[str] ,lowercase__ : List[Any]=False ):
__lowercase = self.sp_model.EncodeAsPieces(lowercase__ )
return pieces
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : List[Any] ):
return self.sp_model.PieceToId(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : str ):
return self.sp_model.IdToPiece(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Tuple ):
__lowercase = self.sp_model.decode_pieces(lowercase__ )
return out_string
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ):
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ,lowercase__ : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowercase__ )) + [1] + ([0] * len(lowercase__ )) + [1]
return [1] + ([0] * len(lowercase__ )) + [1]
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ):
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : str ,lowercase__ : Optional[str] = None ):
if not os.path.isdir(lowercase__ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(lowercase__ ) )
return
__lowercase = os.path.join(
lowercase__ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ):
copyfile(self.vocab_file ,lowercase__ )
return (out_vocab_file,)
| 624
| 0
|
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("""0.8.3"""):
raise Exception("""requires gluonnlp == 0.8.3""")
if version.parse(mx.__version__) != version.parse("""1.5.0"""):
raise Exception("""requires mxnet == 1.5.0""")
logging.set_verbosity_info()
_lowerCamelCase : Any = logging.get_logger(__name__)
_lowerCamelCase : Dict = """The Nymphenburg Palace is a beautiful palace in Munich!"""
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[int]:
"""simple docstring"""
A__ = {
'''attention_cell''': '''multi_head''',
'''num_layers''': 4,
'''units''': 1_024,
'''hidden_size''': 768,
'''max_length''': 512,
'''num_heads''': 8,
'''scaled''': True,
'''dropout''': 0.1,
'''use_residual''': True,
'''embed_size''': 1_024,
'''embed_dropout''': 0.1,
'''word_embed''': None,
'''layer_norm_eps''': 1E-5,
'''token_type_vocab_size''': 2,
}
A__ = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
A__ = BERTEncoder(
attention_cell=predefined_args['''attention_cell'''] , num_layers=predefined_args['''num_layers'''] , units=predefined_args['''units'''] , hidden_size=predefined_args['''hidden_size'''] , max_length=predefined_args['''max_length'''] , num_heads=predefined_args['''num_heads'''] , scaled=predefined_args['''scaled'''] , dropout=predefined_args['''dropout'''] , output_attention=_UpperCAmelCase , output_all_encodings=_UpperCAmelCase , use_residual=predefined_args['''use_residual'''] , activation=predefined_args.get('''activation''' , '''gelu''' ) , layer_norm_eps=predefined_args.get('''layer_norm_eps''' , _UpperCAmelCase ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
A__ = '''openwebtext_ccnews_stories_books_cased'''
# Specify download folder to Gluonnlp's vocab
A__ = os.path.join(get_home_dir() , '''models''' )
A__ = _load_vocab(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , cls=_UpperCAmelCase )
A__ = nlp.model.BERTModel(
_UpperCAmelCase , len(_UpperCAmelCase ) , units=predefined_args['''units'''] , embed_size=predefined_args['''embed_size'''] , embed_dropout=predefined_args['''embed_dropout'''] , word_embed=predefined_args['''word_embed'''] , use_pooler=_UpperCAmelCase , use_token_type_embed=_UpperCAmelCase , token_type_vocab_size=predefined_args['''token_type_vocab_size'''] , use_classifier=_UpperCAmelCase , use_decoder=_UpperCAmelCase , )
original_bort.load_parameters(_UpperCAmelCase , cast_dtype=_UpperCAmelCase , ignore_extra=_UpperCAmelCase )
A__ = original_bort._collect_params_with_prefix()
# Build our config 🤗
A__ = {
'''architectures''': ['''BertForMaskedLM'''],
'''attention_probs_dropout_prob''': predefined_args['''dropout'''],
'''hidden_act''': '''gelu''',
'''hidden_dropout_prob''': predefined_args['''dropout'''],
'''hidden_size''': predefined_args['''embed_size'''],
'''initializer_range''': 0.02,
'''intermediate_size''': predefined_args['''hidden_size'''],
'''layer_norm_eps''': predefined_args['''layer_norm_eps'''],
'''max_position_embeddings''': predefined_args['''max_length'''],
'''model_type''': '''bort''',
'''num_attention_heads''': predefined_args['''num_heads'''],
'''num_hidden_layers''': predefined_args['''num_layers'''],
'''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa
'''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa
'''vocab_size''': len(_UpperCAmelCase ),
}
A__ = BertConfig.from_dict(_UpperCAmelCase )
A__ = BertForMaskedLM(_UpperCAmelCase )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(lowercase_ ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(lowercase_ , lowercase_ ):
A__ = hf_param.shape
A__ = to_torch(params[gluon_param] )
A__ = gluon_param.shape
assert (
shape_hf == shape_gluon
), f"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"""
return gluon_param
A__ = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , '''word_embed.0.weight''' )
A__ = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , '''encoder.position_weight''' )
A__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , '''encoder.layer_norm.beta''' )
A__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , '''encoder.layer_norm.gamma''' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
A__ = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
A__ = hf_bort_model.bert.encoder.layer[i]
# self attention
A__ = layer.attention.self
A__ = check_and_map_params(
self_attn.key.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" )
A__ = check_and_map_params(
self_attn.key.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" )
A__ = check_and_map_params(
self_attn.query.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" )
A__ = check_and_map_params(
self_attn.query.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" )
A__ = check_and_map_params(
self_attn.value.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" )
A__ = check_and_map_params(
self_attn.value.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" )
# self attention output
A__ = layer.attention.output
A__ = check_and_map_params(
self_output.dense.bias , f"""encoder.transformer_cells.{i}.proj.bias""" )
A__ = check_and_map_params(
self_output.dense.weight , f"""encoder.transformer_cells.{i}.proj.weight""" )
A__ = check_and_map_params(
self_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.layer_norm.beta""" )
A__ = check_and_map_params(
self_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.layer_norm.gamma""" )
# intermediate
A__ = layer.intermediate
A__ = check_and_map_params(
intermediate.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" )
A__ = check_and_map_params(
intermediate.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" )
# output
A__ = layer.output
A__ = check_and_map_params(
bert_output.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" )
A__ = check_and_map_params(
bert_output.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" )
A__ = check_and_map_params(
bert_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" )
A__ = check_and_map_params(
bert_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
A__ = RobertaTokenizer.from_pretrained('''roberta-base''' )
A__ = tokenizer.encode_plus(_UpperCAmelCase )['''input_ids''']
# Get gluon output
A__ = mx.nd.array([input_ids] )
A__ = original_bort(inputs=_UpperCAmelCase , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(_UpperCAmelCase )
A__ = BertModel.from_pretrained(_UpperCAmelCase )
hf_bort_model.eval()
A__ = tokenizer.encode_plus(_UpperCAmelCase , return_tensors='''pt''' )
A__ = hf_bort_model(**_UpperCAmelCase )[0]
A__ = output_gluon[0].asnumpy()
A__ = output_hf[0].detach().numpy()
A__ = np.max(np.abs(hf_layer - gluon_layer ) ).item()
A__ = np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 )
if success:
print('''✔️ Both model do output the same tensors''' )
else:
print('''❌ Both model do **NOT** output the same tensors''' )
print('''Absolute difference is:''' , _UpperCAmelCase )
if __name__ == "__main__":
_lowerCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--bort_checkpoint_path""", default=None, type=str, required=True, help="""Path the official Bort params file."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_lowerCamelCase : Tuple = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 87
|
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
lowercase_ = {
'iou_prediction_head.layers.0': 'iou_prediction_head.proj_in',
'iou_prediction_head.layers.1': 'iou_prediction_head.layers.0',
'iou_prediction_head.layers.2': 'iou_prediction_head.proj_out',
'mask_decoder.output_upscaling.0': 'mask_decoder.upscale_conv1',
'mask_decoder.output_upscaling.1': 'mask_decoder.upscale_layer_norm',
'mask_decoder.output_upscaling.3': 'mask_decoder.upscale_conv2',
'mask_downscaling.0': 'mask_embed.conv1',
'mask_downscaling.1': 'mask_embed.layer_norm1',
'mask_downscaling.3': 'mask_embed.conv2',
'mask_downscaling.4': 'mask_embed.layer_norm2',
'mask_downscaling.6': 'mask_embed.conv3',
'point_embeddings': 'point_embed',
'pe_layer.positional_encoding_gaussian_matrix': 'shared_embedding.positional_embedding',
'image_encoder': 'vision_encoder',
'neck.0': 'neck.conv1',
'neck.1': 'neck.layer_norm1',
'neck.2': 'neck.conv2',
'neck.3': 'neck.layer_norm2',
'patch_embed.proj': 'patch_embed.projection',
'.norm': '.layer_norm',
'blocks': 'layers',
}
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Optional[int]:
_a = {}
state_dict.pop('pixel_mean' , _UpperCAmelCase )
state_dict.pop('pixel_std' , _UpperCAmelCase )
_a = R'.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_a = key.replace(_UpperCAmelCase , _UpperCAmelCase )
if re.match(_UpperCAmelCase , _UpperCAmelCase ):
_a = int(re.match(_UpperCAmelCase , _UpperCAmelCase ).group(2 ) )
if layer_nb == 0:
_a = key.replace('layers.0' , 'proj_in' )
elif layer_nb == 1:
_a = key.replace('layers.1' , 'layers.0' )
elif layer_nb == 2:
_a = key.replace('layers.2' , 'proj_out' )
_a = value
_a = model_state_dict[
'prompt_encoder.shared_embedding.positional_embedding'
]
return model_state_dict
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="ybelkada/segment-anything" ) -> Optional[Any]:
_a = hf_hub_download(_UpperCAmelCase , f"""checkpoints/{model_name}.pth""" )
if "sam_vit_b" in model_name:
_a = SamConfig()
elif "sam_vit_l" in model_name:
_a = SamVisionConfig(
hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
_a = SamConfig(
vision_config=_UpperCAmelCase , )
elif "sam_vit_h" in model_name:
_a = SamVisionConfig(
hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
_a = SamConfig(
vision_config=_UpperCAmelCase , )
_a = torch.load(_UpperCAmelCase , map_location='cpu' )
_a = replace_keys(_UpperCAmelCase )
_a = SamImageProcessor()
_a = SamProcessor(image_processor=_UpperCAmelCase )
_a = SamModel(_UpperCAmelCase )
hf_model.load_state_dict(_UpperCAmelCase )
_a = hf_model.to('cuda' )
_a = 'https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'
_a = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert('RGB' )
_a = [[[400, 650]]]
_a = [[1]]
_a = processor(images=np.array(_UpperCAmelCase ) , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
_a = hf_model(**_UpperCAmelCase )
_a = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579890251159668
_a = processor(
images=np.array(_UpperCAmelCase ) , input_points=_UpperCAmelCase , input_labels=_UpperCAmelCase , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
_a = hf_model(**_UpperCAmelCase )
_a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9712603092193604
_a = ((75, 275, 1725, 850),)
_a = processor(images=np.array(_UpperCAmelCase ) , input_boxes=_UpperCAmelCase , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
_a = hf_model(**_UpperCAmelCase )
_a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8686015605926514
# Test with 2 points and 1 image.
_a = [[[400, 650], [800, 650]]]
_a = [[1, 1]]
_a = processor(
images=np.array(_UpperCAmelCase ) , input_points=_UpperCAmelCase , input_labels=_UpperCAmelCase , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
_a = hf_model(**_UpperCAmelCase )
_a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9936047792434692
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
lowercase_ = ['sam_vit_b_01ec64', 'sam_vit_h_4b8939', 'sam_vit_l_0b3195']
parser.add_argument(
'--model_name',
default='sam_vit_h_4b8939',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
parser.add_argument(
'--model_hub_id',
default='ybelkada/segment-anything',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
lowercase_ = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 562
| 0
|
import math
def _UpperCAmelCase ( a : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(a ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _UpperCAmelCase ( a : int = 1_0001 ):
try:
snake_case__ = int(a )
except (TypeError, ValueError):
raise TypeError("""Parameter nth must be int or castable to int.""" ) from None
if nth <= 0:
raise ValueError("""Parameter nth must be greater than or equal to one.""" )
snake_case__ = []
snake_case__ = 2
while len(a ) < nth:
if is_prime(a ):
primes.append(a )
num += 1
else:
num += 1
return primes[len(a ) - 1]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 721
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ = {
"""configuration_autoformer""": [
"""AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AutoformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
"""AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AutoformerForPrediction""",
"""AutoformerModel""",
"""AutoformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 99
| 0
|
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class __a ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : Union[List[PIL.Image.Image], np.ndarray]
SCREAMING_SNAKE_CASE__ : Optional[List[bool]]
SCREAMING_SNAKE_CASE__ : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 650
|
"""simple docstring"""
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
A_ : int =np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
A_ : Tuple =[0, 2_5, 5_0]
A_ : int =[2_5, 5_0, 7_5]
A_ : List[str] =fuzz.membership.trimf(X, abca)
A_ : Any =fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
A_ : Optional[Any] =np.ones(7_5)
A_ : int =np.zeros((7_5,))
# 1. Union = max(µA(x), µB(x))
A_ : Optional[Any] =fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
A_ : Union[str, Any] =fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
A_ : List[Any] =fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
A_ : int =fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
A_ : Optional[Any] =young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
A_ : List[Any] =young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
A_ : Union[str, Any] =fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
A_ : Optional[Any] =fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title("""Young""")
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title("""Middle aged""")
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title("""union""")
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title("""intersection""")
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title("""complement_a""")
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title("""difference a/b""")
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title("""alg_sum""")
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title("""alg_product""")
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title("""bdd_sum""")
plt.grid(True)
plt.subplot(4, 3, 1_0)
plt.plot(X, bdd_difference)
plt.title("""bdd_difference""")
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 650
| 1
|
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self ):
debug_launcher(test_script.main )
def snake_case__ ( self ):
debug_launcher(test_ops.main )
| 715
|
def __lowercase ( ) -> List[Any]:
'''simple docstring'''
__lowercase = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
__lowercase = 6
__lowercase = 1
__lowercase = 1_901
__lowercase = 0
while year < 2_001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
__lowercase = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
__lowercase = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
__lowercase = day - days_per_month[month - 2]
if month > 12:
year += 1
__lowercase = 1
if year < 2_001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 576
| 0
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __SCREAMING_SNAKE_CASE (metaclass=lowerCamelCase_ ):
"""simple docstring"""
__a =["""keras_nlp"""]
def __init__( self : List[Any] , *__a : Tuple , **__a : int ):
requires_backends(self , ["keras_nlp"] )
| 692
|
'''simple docstring'''
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
a = random.Random()
def a_ ( __UpperCAmelCase , __UpperCAmelCase=1.0 , __UpperCAmelCase=None , __UpperCAmelCase=None ) -> List[str]:
"""simple docstring"""
if rng is None:
snake_case: Dict =global_rng
snake_case: Tuple =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class a_ ( unittest.TestCase ):
def __init__( self : int , a_ : Any , a_ : str=7 , a_ : Tuple=4_0_0 , a_ : List[Any]=2_0_0_0 , a_ : str=2_0_4_8 , a_ : List[str]=1_2_8 , a_ : int=1 , a_ : Tuple=5_1_2 , a_ : Dict=3_0 , a_ : Optional[int]=4_4_1_0_0 , ) -> Union[str, Any]:
snake_case: Union[str, Any] =parent
snake_case: Optional[Any] =batch_size
snake_case: Union[str, Any] =min_seq_length
snake_case: List[str] =max_seq_length
snake_case: str =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
snake_case: int =spectrogram_length
snake_case: List[str] =feature_size
snake_case: Dict =num_audio_channels
snake_case: int =hop_length
snake_case: List[Any] =chunk_length
snake_case: Optional[Any] =sampling_rate
def UpperCamelCase ( self : Any ) -> Dict:
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def UpperCamelCase ( self : Tuple , a_ : Optional[Any]=False , a_ : Tuple=False ) -> Optional[int]:
def _flatten(a_ : Dict ):
return list(itertools.chain(*a_ ) )
if equal_length:
snake_case: Union[str, Any] =[floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
snake_case: Union[str, Any] =[
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
snake_case: Any =[np.asarray(a_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class a_ ( snake_case , unittest.TestCase ):
UpperCAmelCase : Optional[Any] = TvltFeatureExtractor
def UpperCamelCase ( self : int ) -> int:
snake_case: Union[str, Any] =TvltFeatureExtractionTester(self )
def UpperCamelCase ( self : Optional[Any] ) -> str:
snake_case: int =self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(a_ , 'spectrogram_length' ) )
self.assertTrue(hasattr(a_ , 'feature_size' ) )
self.assertTrue(hasattr(a_ , 'num_audio_channels' ) )
self.assertTrue(hasattr(a_ , 'hop_length' ) )
self.assertTrue(hasattr(a_ , 'chunk_length' ) )
self.assertTrue(hasattr(a_ , 'sampling_rate' ) )
def UpperCamelCase ( self : Tuple ) -> Optional[int]:
snake_case: Tuple =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case: Optional[Any] =feat_extract_first.save_pretrained(a_ )[0]
check_json_file_has_correct_format(a_ )
snake_case: str =self.feature_extraction_class.from_pretrained(a_ )
snake_case: Optional[int] =feat_extract_first.to_dict()
snake_case: List[str] =feat_extract_second.to_dict()
snake_case: Optional[int] =dict_first.pop('mel_filters' )
snake_case: List[Any] =dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(a_ , a_ ) )
self.assertEqual(a_ , a_ )
def UpperCamelCase ( self : str ) -> str:
snake_case: Any =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case: int =os.path.join(a_ , 'feat_extract.json' )
feat_extract_first.to_json_file(a_ )
snake_case: Union[str, Any] =self.feature_extraction_class.from_json_file(a_ )
snake_case: Optional[Any] =feat_extract_first.to_dict()
snake_case: Optional[Any] =feat_extract_second.to_dict()
snake_case: Union[str, Any] =dict_first.pop('mel_filters' )
snake_case: Union[str, Any] =dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(a_ , a_ ) )
self.assertEqual(a_ , a_ )
def UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
# Initialize feature_extractor
snake_case: Tuple =self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
snake_case: Any =[floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
snake_case: Optional[Any] =[np.asarray(a_ ) for speech_input in speech_inputs]
# Test not batched input
snake_case: List[Any] =feature_extractor(np_speech_inputs[0] , return_tensors='np' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
snake_case: Any =feature_extractor(a_ , return_tensors='np' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
snake_case: Optional[Any] =feature_extractor(
a_ , return_tensors='np' , sampling_rate=4_4_1_0_0 , mask_audio=a_ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
snake_case: List[Any] =[floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
snake_case: List[str] =np.asarray(a_ )
snake_case: str =feature_extractor(a_ , return_tensors='np' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def UpperCamelCase ( self : Any , a_ : str ) -> Union[str, Any]:
snake_case: Dict =load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
snake_case: Optional[int] =ds.sort('id' ).select(range(a_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
snake_case: Tuple =self._load_datasamples(1 )
snake_case: Any =TvltFeatureExtractor()
snake_case: Any =feature_extractor(a_ , return_tensors='pt' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 1_9_2, 1_2_8) )
snake_case: int =torch.tensor([[-0.3_0_3_2, -0.2_7_0_8], [-0.4_4_3_4, -0.4_0_0_7]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , a_ , atol=1E-4 ) )
| 350
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_A = {
"""configuration_efficientformer""": [
"""EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientFormerConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["""EfficientFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientFormerForImageClassification""",
"""EfficientFormerForImageClassificationWithTeacher""",
"""EfficientFormerModel""",
"""EfficientFormerPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFEfficientFormerForImageClassification""",
"""TFEfficientFormerForImageClassificationWithTeacher""",
"""TFEfficientFormerModel""",
"""TFEfficientFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 507
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def lowercase_ ( __UpperCAmelCase ) -> Tuple:
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def lowercase_ ( __UpperCAmelCase ) -> Tuple:
lowerCAmelCase__ : Optional[int] = create_tensor(__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = gather(__UpperCAmelCase )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def lowercase_ ( __UpperCAmelCase ) -> List[Any]:
lowerCAmelCase__ : Any = [state.process_index]
lowerCAmelCase__ : Dict = gather_object(__UpperCAmelCase )
assert len(__UpperCAmelCase ) == state.num_processes, f"""{gathered_obj}, {len(__UpperCAmelCase )} != {state.num_processes}"""
assert gathered_obj == list(range(state.num_processes ) ), f"""{gathered_obj} != {list(range(state.num_processes ) )}"""
def lowercase_ ( __UpperCAmelCase ) -> Dict:
lowerCAmelCase__ : Union[str, Any] = create_tensor(__UpperCAmelCase )
lowerCAmelCase__ : Any = broadcast(__UpperCAmelCase )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def lowercase_ ( __UpperCAmelCase ) -> Union[str, Any]:
# We need to pad the tensor with one more element if we are the main process
# to ensure that we can pad
if state.is_main_process:
lowerCAmelCase__ : int = torch.arange(state.num_processes + 1 ).to(state.device )
else:
lowerCAmelCase__ : Optional[Any] = torch.arange(state.num_processes ).to(state.device )
lowerCAmelCase__ : Any = pad_across_processes(__UpperCAmelCase )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def lowercase_ ( __UpperCAmelCase ) -> Optional[Any]:
# For now runs on only two processes
if state.num_processes != 2:
return
lowerCAmelCase__ : Union[str, Any] = create_tensor(__UpperCAmelCase )
lowerCAmelCase__ : Any = reduce(__UpperCAmelCase , """sum""" )
lowerCAmelCase__ : Union[str, Any] = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase ), f"""{reduced_tensor} != {truth_tensor}"""
def lowercase_ ( __UpperCAmelCase ) -> List[str]:
# For now runs on only two processes
if state.num_processes != 2:
return
lowerCAmelCase__ : List[str] = create_tensor(__UpperCAmelCase )
lowerCAmelCase__ : Any = reduce(__UpperCAmelCase , """mean""" )
lowerCAmelCase__ : str = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase ), f"""{reduced_tensor} != {truth_tensor}"""
def lowercase_ ( __UpperCAmelCase ) -> Dict:
# For xla_spawn (TPUs)
main()
def lowercase_ ( ) -> Optional[int]:
lowerCAmelCase__ : str = PartialState()
state.print(f"""State: {state}""" )
state.print("""testing gather""" )
test_gather(__UpperCAmelCase )
state.print("""testing gather_object""" )
test_gather_object(__UpperCAmelCase )
state.print("""testing broadcast""" )
test_broadcast(__UpperCAmelCase )
state.print("""testing pad_across_processes""" )
test_pad_across_processes(__UpperCAmelCase )
state.print("""testing reduce_sum""" )
test_reduce_sum(__UpperCAmelCase )
state.print("""testing reduce_mean""" )
test_reduce_mean(__UpperCAmelCase )
if __name__ == "__main__":
main()
| 507
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class A ( __UpperCAmelCase ):
__snake_case = 'trocr'
__snake_case = ['past_key_values']
__snake_case = {
'num_attention_heads': 'decoder_attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'decoder_layers',
}
def __init__( self, UpperCamelCase__=5_0265, UpperCamelCase__=1024, UpperCamelCase__=12, UpperCamelCase__=16, UpperCamelCase__=4096, UpperCamelCase__="gelu", UpperCamelCase__=512, UpperCamelCase__=0.1, UpperCamelCase__=0.0, UpperCamelCase__=0.0, UpperCamelCase__=2, UpperCamelCase__=0.02, UpperCamelCase__=0.0, UpperCamelCase__=True, UpperCamelCase__=False, UpperCamelCase__=True, UpperCamelCase__=True, UpperCamelCase__=1, UpperCamelCase__=0, UpperCamelCase__=2, **UpperCamelCase__, ):
"""simple docstring"""
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = d_model
lowerCAmelCase_ = decoder_layers
lowerCAmelCase_ = decoder_attention_heads
lowerCAmelCase_ = decoder_ffn_dim
lowerCAmelCase_ = activation_function
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = dropout
lowerCAmelCase_ = attention_dropout
lowerCAmelCase_ = activation_dropout
lowerCAmelCase_ = init_std
lowerCAmelCase_ = decoder_layerdrop
lowerCAmelCase_ = use_cache
lowerCAmelCase_ = scale_embedding
lowerCAmelCase_ = use_learned_position_embeddings
lowerCAmelCase_ = layernorm_embedding
super().__init__(
pad_token_id=UpperCamelCase__, bos_token_id=UpperCamelCase__, eos_token_id=UpperCamelCase__, decoder_start_token_id=UpperCamelCase__, **UpperCamelCase__, )
| 431
|
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class A :
__snake_case = MBartConfig
__snake_case = {}
__snake_case = 'gelu'
def __init__( self, UpperCamelCase__, UpperCamelCase__=13, UpperCamelCase__=7, UpperCamelCase__=True, UpperCamelCase__=False, UpperCamelCase__=99, UpperCamelCase__=32, UpperCamelCase__=2, UpperCamelCase__=4, UpperCamelCase__=37, UpperCamelCase__=0.1, UpperCamelCase__=0.1, UpperCamelCase__=20, UpperCamelCase__=2, UpperCamelCase__=1, UpperCamelCase__=0, ):
"""simple docstring"""
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = seq_length
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = eos_token_id
lowerCAmelCase_ = pad_token_id
lowerCAmelCase_ = bos_token_id
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size )
lowerCAmelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 )
lowerCAmelCase_ = tf.concat([input_ids, eos_tensor], axis=1 )
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCAmelCase_ = self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, )
lowerCAmelCase_ = prepare_mbart_inputs_dict(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = TFMBartModel(config=UpperCamelCase__ ).get_decoder()
lowerCAmelCase_ = inputs_dict['''input_ids''']
lowerCAmelCase_ = input_ids[:1, :]
lowerCAmelCase_ = inputs_dict['''attention_mask'''][:1, :]
lowerCAmelCase_ = inputs_dict['''head_mask''']
lowerCAmelCase_ = 1
# first forward pass
lowerCAmelCase_ = model(UpperCamelCase__, attention_mask=UpperCamelCase__, head_mask=UpperCamelCase__, use_cache=UpperCamelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ = outputs.to_tuple()
lowerCAmelCase_ = past_key_values[1]
def __UpperCamelCase ( _A , _A , _A , _A=None , _A=None , _A=None , _A=None , _A=None , ):
if attention_mask is None:
lowerCAmelCase_ = tf.cast(tf.math.not_equal(_A , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowerCAmelCase_ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowerCAmelCase_ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCAmelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCAmelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class A ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__snake_case = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
__snake_case = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
__snake_case = (
{
'conversational': TFMBartForConditionalGeneration,
'feature-extraction': TFMBartModel,
'summarization': TFMBartForConditionalGeneration,
'text2text-generation': TFMBartForConditionalGeneration,
'translation': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
__snake_case = True
__snake_case = False
__snake_case = False
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = TFMBartModelTester(self )
lowerCAmelCase_ = ConfigTester(self, config_class=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase__ )
@require_sentencepiece
@require_tokenizers
@require_tf
class A ( unittest.TestCase ):
__snake_case = [
' UN Chief Says There Is No Military Solution in Syria',
]
__snake_case = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
]
__snake_case = 'facebook/mbart-large-en-ro'
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def SCREAMING_SNAKE_CASE__ ( self, **UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = self.translate_src_text(**UpperCamelCase__ )
self.assertListEqual(self.expected_text, UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, **UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = self.tokenizer(self.src_text, **UpperCamelCase__, return_tensors='''tf''' )
lowerCAmelCase_ = self.model.generate(
model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2 )
lowerCAmelCase_ = self.tokenizer.batch_decode(UpperCamelCase__, skip_special_tokens=UpperCamelCase__ )
return generated_words
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 431
| 1
|
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''vocab_file''': '''vocab.json''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
'''merges_file''': '''merges.txt''',
}
lowerCAmelCase__ = {
'''vocab_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'''
),
},
'''tokenizer_config_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'''
),
},
'''merges_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'''
),
},
}
lowerCAmelCase__ = '''</w>'''
lowerCAmelCase__ = '''@@ '''
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] ) -> Dict:
'''simple docstring'''
A__ = set()
A__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A__ = char
return pairs
# Speech2Text2 has no max input length
lowerCAmelCase__ = {'''facebook/s2t-wav2vec2-large-en-de''': 1_0_2_4}
class a__ ( __a ):
"""simple docstring"""
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self , lowercase , lowercase="<s>" , lowercase="<pad>" , lowercase="</s>" , lowercase="<unk>" , lowercase=False , lowercase=None , **lowercase , ) -> List[Any]:
'''simple docstring'''
super().__init__(
unk_token=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , pad_token=snake_case__ , do_lower_case=snake_case__ , **snake_case__ , )
A__ = do_lower_case
with open(snake_case__ , encoding="utf-8" ) as vocab_handle:
A__ = json.load(snake_case__ )
A__ = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F'No merges files provided. {self.__class__.__name__} can only be used for decoding.' )
A__ = None
A__ = None
else:
with open(snake_case__ , encoding="utf-8" ) as merges_handle:
A__ = merges_handle.read().split("\n" )[:-1]
A__ = [tuple(merge.split()[:2] ) for merge in merges]
A__ = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
A__ = {}
@property
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
return len(self.decoder )
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase ( self , lowercase ) -> Any:
'''simple docstring'''
A__ = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
A__ = get_pairs(snake_case__ )
if not pairs:
return token
while True:
A__ = min(snake_case__ , key=lambda lowercase : self.bpe_ranks.get(snake_case__ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
A__ , A__ = bigram
A__ = []
A__ = 0
while i < len(snake_case__ ):
try:
A__ = word.index(snake_case__ , snake_case__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A__ = j
if word[i] == first and i < len(snake_case__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A__ = tuple(snake_case__ )
A__ = new_word
if len(snake_case__ ) == 1:
break
else:
A__ = get_pairs(snake_case__ )
A__ = " ".join(snake_case__ )
if word == "\n " + BPE_TOKEN_MERGES:
A__ = "\n" + BPE_TOKEN_MERGES
if word.endswith(snake_case__ ):
A__ = word.replace(snake_case__ , "" )
A__ = word.replace(" " , snake_case__ )
A__ = word
return word
def UpperCamelCase ( self , lowercase ) -> Optional[int]:
'''simple docstring'''
if self.bpe_ranks is None:
raise ValueError(
"This tokenizer was instantiated without a `merges.txt` file, so"
" that it can only be used for decoding, not for encoding."
"Make sure to provide `merges.txt` file at instantiation to enable "
"encoding." )
if self.do_lower_case:
A__ = text.lower()
A__ = text.split()
A__ = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(snake_case__ ).split(" " ) ) )
return split_tokens
def UpperCamelCase ( self , lowercase ) -> Optional[Any]:
'''simple docstring'''
return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) )
def UpperCamelCase ( self , lowercase ) -> Dict:
'''simple docstring'''
A__ = self.decoder.get(snake_case__ , self.unk_token )
return result
def UpperCamelCase ( self , lowercase ) -> int:
'''simple docstring'''
A__ = " ".join(snake_case__ )
# make sure @@ tokens are concatenated
A__ = "".join(string.split(snake_case__ ) )
return string
def UpperCamelCase ( self , lowercase , lowercase = None ) -> Union[str, Any]:
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A__ = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
A__ = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(snake_case__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case__ , ensure_ascii=snake_case__ ) + "\n" )
A__ = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(snake_case__ , "w" , encoding="utf-8" ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowercase : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'
" Please check that the tokenizer is not corrupted!" )
A__ = token_index
writer.write(" ".join(snake_case__ ) + "\n" )
index += 1
return (vocab_file, merges_file)
| 706
|
import math
lowerCAmelCase__ = 1_0
lowerCAmelCase__ = 7
lowerCAmelCase__ = BALLS_PER_COLOUR * NUM_COLOURS
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int = 2_0 ) -> str:
'''simple docstring'''
A__ = math.comb(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A__ = math.comb(NUM_BALLS - BALLS_PER_COLOUR , SCREAMING_SNAKE_CASE_ )
A__ = NUM_COLOURS * (1 - missing_colour / total)
return F'{result:.9f}'
if __name__ == "__main__":
print(solution(2_0))
| 626
| 0
|
"""simple docstring"""
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
A = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.14.0""", """To fix: pip install -r examples/pytorch/audio-classification/requirements.txt""")
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase = 1_6000 ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : int = int(round(sample_rate * max_length ) )
if len(UpperCamelCase ) <= sample_length:
return wav
__UpperCAmelCase : int = randint(0 , len(UpperCamelCase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class a__ :
lowercase_ = field(default=__magic_name__ , metadata={"help": "Name of a dataset from the datasets package"} )
lowercase_ = field(
default=__magic_name__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
lowercase_ = field(
default=__magic_name__ , metadata={"help": "A file containing the training audio paths and labels."} )
lowercase_ = field(
default=__magic_name__ , metadata={"help": "A file containing the validation audio paths and labels."} )
lowercase_ = field(
default="train" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
lowercase_ = field(
default="validation" , metadata={
"help": (
"The name of the training data set split to use (via the datasets library). Defaults to 'validation'"
)
} , )
lowercase_ = field(
default="audio" , metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"} , )
lowercase_ = field(
default="label" , metadata={"help": "The name of the dataset column containing the labels. Defaults to 'label'"} )
lowercase_ = field(
default=__magic_name__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowercase_ = field(
default=__magic_name__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
lowercase_ = field(
default=2_0 , metadata={"help": "Audio clips will be randomly cut to this length during training if the value is set."} , )
@dataclass
class a__ :
lowercase_ = field(
default="facebook/wav2vec2-base" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , )
lowercase_ = field(
default=__magic_name__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowercase_ = field(
default=__magic_name__ , metadata={"help": "Where do you want to store the pretrained models downloaded from the Hub"} )
lowercase_ = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowercase_ = field(
default=__magic_name__ , metadata={"help": "Name or path of preprocessor config."} )
lowercase_ = field(
default=__magic_name__ , metadata={"help": "Whether to freeze the feature encoder layers of the model."} )
lowercase_ = field(
default=__magic_name__ , metadata={"help": "Whether to generate an attention mask in the feature extractor."} )
lowercase_ = field(
default=__magic_name__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
lowercase_ = field(
default=__magic_name__ , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
lowercase_ = field(
default=__magic_name__ , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def a_ ( self : Optional[int]):
"""simple docstring"""
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"The argument `--freeze_feature_extractor` is deprecated and "
"will be removed in a future version. Use `--freeze_feature_encoder`"
"instead. Setting `freeze_feature_encoder==True`." , UpperCamelCase_ , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"The argument `--freeze_feature_extractor` is deprecated and "
"should not be used in combination with `--freeze_feature_encoder`."
"Only make use of `--freeze_feature_encoder`.")
def _UpperCamelCase ( ) -> Any:
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__UpperCAmelCase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_audio_classification" , UpperCamelCase , UpperCamelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__UpperCAmelCase : List[str] = training_args.get_process_log_level()
logger.setLevel(UpperCamelCase )
transformers.utils.logging.set_verbosity(UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} "
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
__UpperCAmelCase : str = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__UpperCAmelCase : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to train from scratch." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset and prepare it for the audio classification task.
__UpperCAmelCase : Optional[int] = DatasetDict()
__UpperCAmelCase : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
__UpperCAmelCase : Tuple = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. "
"Make sure to set `--audio_column_name` to the correct audio column - one of "
f"{', '.join(raw_datasets['train'].column_names )}." )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. "
"Make sure to set `--label_column_name` to the correct text column - one of "
f"{', '.join(raw_datasets['train'].column_names )}." )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
__UpperCAmelCase : Optional[Any] = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
__UpperCAmelCase : List[str] = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
__UpperCAmelCase : Tuple = feature_extractor.model_input_names[0]
def train_transforms(UpperCamelCase ):
__UpperCAmelCase : Optional[int] = []
for audio in batch[data_args.audio_column_name]:
__UpperCAmelCase : int = random_subsample(
audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(UpperCamelCase )
__UpperCAmelCase : Any = feature_extractor(UpperCamelCase , sampling_rate=feature_extractor.sampling_rate )
__UpperCAmelCase : Any = {model_input_name: inputs.get(UpperCamelCase )}
__UpperCAmelCase : int = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(UpperCamelCase ):
__UpperCAmelCase : Any = [audio["array"] for audio in batch[data_args.audio_column_name]]
__UpperCAmelCase : List[Any] = feature_extractor(UpperCamelCase , sampling_rate=feature_extractor.sampling_rate )
__UpperCAmelCase : Union[str, Any] = {model_input_name: inputs.get(UpperCamelCase )}
__UpperCAmelCase : List[Any] = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
__UpperCAmelCase : Tuple = raw_datasets["train"].features[data_args.label_column_name].names
__UpperCAmelCase , __UpperCAmelCase : List[Any] = {}, {}
for i, label in enumerate(UpperCamelCase ):
__UpperCAmelCase : List[str] = str(UpperCamelCase )
__UpperCAmelCase : str = label
# Load the accuracy metric from the datasets package
__UpperCAmelCase : Union[str, Any] = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(UpperCamelCase ):
__UpperCAmelCase : str = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=UpperCamelCase , references=eval_pred.label_ids )
__UpperCAmelCase : Dict = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(UpperCamelCase ) , labelaid=UpperCamelCase , idalabel=UpperCamelCase , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__UpperCAmelCase : Tuple = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
__UpperCAmelCase : Any = (
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(UpperCamelCase , output_all_columns=UpperCamelCase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
__UpperCAmelCase : Union[str, Any] = (
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(UpperCamelCase , output_all_columns=UpperCamelCase )
# Initialize our trainer
__UpperCAmelCase : Optional[int] = Trainer(
model=UpperCamelCase , args=UpperCamelCase , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=UpperCamelCase , tokenizer=UpperCamelCase , )
# Training
if training_args.do_train:
__UpperCAmelCase : List[Any] = None
if training_args.resume_from_checkpoint is not None:
__UpperCAmelCase : Any = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__UpperCAmelCase : Tuple = last_checkpoint
__UpperCAmelCase : Optional[Any] = trainer.train(resume_from_checkpoint=UpperCamelCase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__UpperCAmelCase : Union[str, Any] = trainer.evaluate()
trainer.log_metrics("eval" , UpperCamelCase )
trainer.save_metrics("eval" , UpperCamelCase )
# Write model card and (optionally) push to hub
__UpperCAmelCase : Any = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCamelCase )
else:
trainer.create_model_card(**UpperCamelCase )
if __name__ == "__main__":
main()
| 77
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
A = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 77
| 1
|
def lowerCamelCase_ ( UpperCamelCase__ : int ):
'''simple docstring'''
UpperCamelCase__ = [1]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 0, 0, 0
UpperCamelCase__ = ugly_nums[ia] * 2
UpperCamelCase__ = ugly_nums[ia] * 3
UpperCamelCase__ = ugly_nums[ia] * 5
for _ in range(1, UpperCamelCase__ ):
UpperCamelCase__ = min(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
ugly_nums.append(UpperCamelCase__ )
if next_num == next_a:
ia += 1
UpperCamelCase__ = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
UpperCamelCase__ = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
UpperCamelCase__ = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f'{ugly_numbers(2_0_0) = }')
| 713
|
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def lowerCamelCase_ ( UpperCamelCase__ : Any ):
'''simple docstring'''
UpperCamelCase__ = tmp_path / '''file.csv'''
UpperCamelCase__ = textwrap.dedent(
'''\
header1,header2
1,2
10,20
''' )
with open(UpperCamelCase__, '''w''' ) as f:
f.write(UpperCamelCase__ )
return str(UpperCamelCase__ )
@pytest.fixture
def lowerCamelCase_ ( UpperCamelCase__ : int ):
'''simple docstring'''
UpperCamelCase__ = tmp_path / '''malformed_file.csv'''
UpperCamelCase__ = textwrap.dedent(
'''\
header1,header2
1,2
10,20,
''' )
with open(UpperCamelCase__, '''w''' ) as f:
f.write(UpperCamelCase__ )
return str(UpperCamelCase__ )
@pytest.fixture
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : str ):
'''simple docstring'''
UpperCamelCase__ = tmp_path / '''csv_with_image.csv'''
UpperCamelCase__ = textwrap.dedent(
F"""\
image
{image_file}
""" )
with open(UpperCamelCase__, '''w''' ) as f:
f.write(UpperCamelCase__ )
return str(UpperCamelCase__ )
@pytest.fixture
def lowerCamelCase_ ( UpperCamelCase__ : List[str] ):
'''simple docstring'''
UpperCamelCase__ = tmp_path / '''csv_with_label.csv'''
UpperCamelCase__ = textwrap.dedent(
'''\
label
good
bad
good
''' )
with open(UpperCamelCase__, '''w''' ) as f:
f.write(UpperCamelCase__ )
return str(UpperCamelCase__ )
@pytest.fixture
def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ = tmp_path / '''csv_with_int_list.csv'''
UpperCamelCase__ = textwrap.dedent(
'''\
int_list
1 2 3
4 5 6
7 8 9
''' )
with open(UpperCamelCase__, '''w''' ) as f:
f.write(UpperCamelCase__ )
return str(UpperCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : int, UpperCamelCase__ : Optional[int], UpperCamelCase__ : Tuple ):
'''simple docstring'''
UpperCamelCase__ = Csv()
UpperCamelCase__ = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(UpperCamelCase__, match='''Error tokenizing data''' ):
for _ in generator:
pass
assert any(
record.levelname == '''ERROR'''
and '''Failed to read file''' in record.message
and os.path.basename(UpperCamelCase__ ) in record.message
for record in caplog.records )
@require_pil
def lowerCamelCase_ ( UpperCamelCase__ : List[str] ):
'''simple docstring'''
with open(UpperCamelCase__, encoding='''utf-8''' ) as f:
UpperCamelCase__ = f.read().splitlines()[1]
UpperCamelCase__ = Csv(encoding='''utf-8''', features=Features({'''image''': Image()} ) )
UpperCamelCase__ = csv._generate_tables([[csv_file_with_image]] )
UpperCamelCase__ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''image''' ).type == Image()()
UpperCamelCase__ = pa_table.to_pydict()['''image''']
assert generated_content == [{"path": image_file, "bytes": None}]
def lowerCamelCase_ ( UpperCamelCase__ : Tuple ):
'''simple docstring'''
with open(UpperCamelCase__, encoding='''utf-8''' ) as f:
UpperCamelCase__ = f.read().splitlines()[1:]
UpperCamelCase__ = Csv(encoding='''utf-8''', features=Features({'''label''': ClassLabel(names=['''good''', '''bad'''] )} ) )
UpperCamelCase__ = csv._generate_tables([[csv_file_with_label]] )
UpperCamelCase__ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''label''' ).type == ClassLabel(names=['''good''', '''bad'''] )()
UpperCamelCase__ = pa_table.to_pydict()['''label''']
assert generated_content == [ClassLabel(names=['''good''', '''bad'''] ).straint(UpperCamelCase__ ) for label in labels]
def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ = Csv(encoding='''utf-8''', sep=''',''', converters={'''int_list''': lambda UpperCamelCase__ : [int(UpperCamelCase__ ) for i in x.split()]} )
UpperCamelCase__ = csv._generate_tables([[csv_file_with_int_list]] )
UpperCamelCase__ = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('''int_list''' ).type )
UpperCamelCase__ = pa_table.to_pydict()['''int_list''']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 591
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {}
class snake_case__ ( __snake_case ):
_SCREAMING_SNAKE_CASE : Tuple = """llama"""
_SCREAMING_SNAKE_CASE : Dict = ["""past_key_values"""]
def __init__( self : Optional[Any] , A__ : List[str]=3_20_00 , A__ : Union[str, Any]=40_96 , A__ : List[Any]=1_10_08 , A__ : Union[str, Any]=32 , A__ : List[Any]=32 , A__ : Dict=None , A__ : Any="silu" , A__ : Any=20_48 , A__ : Any=0.02 , A__ : Optional[int]=1E-6 , A__ : Union[str, Any]=True , A__ : List[str]=0 , A__ : Tuple=1 , A__ : Optional[int]=2 , A__ : Dict=1 , A__ : List[Any]=False , A__ : Union[str, Any]=None , **A__ : List[Any] , ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[Any] = vocab_size
snake_case_ : int = max_position_embeddings
snake_case_ : Dict = hidden_size
snake_case_ : Optional[int] = intermediate_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : List[str] = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
snake_case_ : Any = num_attention_heads
snake_case_ : List[str] = num_key_value_heads
snake_case_ : Optional[Any] = hidden_act
snake_case_ : Optional[Any] = initializer_range
snake_case_ : Optional[int] = rms_norm_eps
snake_case_ : Optional[Any] = pretraining_tp
snake_case_ : Tuple = use_cache
snake_case_ : Dict = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , tie_word_embeddings=UpperCamelCase__ , **UpperCamelCase__ , )
def UpperCAmelCase__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , UpperCamelCase__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f"got {self.rope_scaling}" )
snake_case_ : Optional[int] = self.rope_scaling.get("type" , UpperCamelCase__ )
snake_case_ : List[str] = self.rope_scaling.get("factor" , UpperCamelCase__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 666
|
"""simple docstring"""
def a__ ( __SCREAMING_SNAKE_CASE ) -> int:
if n == 1 or not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return 0
elif n == 2:
return 1
else:
__lowerCAmelCase: Tuple = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def a__ ( __SCREAMING_SNAKE_CASE ) -> int:
__lowerCAmelCase: str = 0
__lowerCAmelCase: Any = 2
while digits < n:
index += 1
__lowerCAmelCase: Optional[int] = len(str(fibonacci(__SCREAMING_SNAKE_CASE ) ) )
return index
def a__ ( __SCREAMING_SNAKE_CASE = 1_0_0_0 ) -> int:
return fibonacci_digits_index(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 346
| 0
|
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class lowerCAmelCase :
"""simple docstring"""
def __init__( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = {}
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=1 ) -> Union[str, Any]:
'''simple docstring'''
if self.graph.get(UpperCamelCase__ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
lowerCamelCase_ = [[w, v]]
if not self.graph.get(UpperCamelCase__ ):
lowerCamelCase_ = []
def _lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
return list(self.graph )
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> str:
'''simple docstring'''
if self.graph.get(UpperCamelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(UpperCamelCase__ )
def _lowerCAmelCase ( self , UpperCamelCase__=-2 , UpperCamelCase__=-1 ) -> Optional[int]:
'''simple docstring'''
if s == d:
return []
lowerCamelCase_ = []
lowerCamelCase_ = []
if s == -2:
lowerCamelCase_ = list(self.graph )[0]
stack.append(UpperCamelCase__ )
visited.append(UpperCamelCase__ )
lowerCamelCase_ = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase_ = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(UpperCamelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase_ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(UpperCamelCase__ ) != 0:
lowerCamelCase_ = stack[len(UpperCamelCase__ ) - 1]
else:
lowerCamelCase_ = ss
# check if se have reached the starting point
if len(UpperCamelCase__ ) == 0:
return visited
def _lowerCAmelCase ( self , UpperCamelCase__=-1 ) -> Optional[int]:
'''simple docstring'''
if c == -1:
lowerCamelCase_ = floor(random() * 10_000 ) + 10
for i in range(UpperCamelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowerCamelCase_ = floor(random() * c ) + 1
if n != i:
self.add_pair(UpperCamelCase__ , UpperCamelCase__ , 1 )
def _lowerCAmelCase ( self , UpperCamelCase__=-2 ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = deque()
lowerCamelCase_ = []
if s == -2:
lowerCamelCase_ = list(self.graph )[0]
d.append(UpperCamelCase__ )
visited.append(UpperCamelCase__ )
while d:
lowerCamelCase_ = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
return len(self.graph[u] )
def _lowerCAmelCase ( self , UpperCamelCase__=-2 ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = []
lowerCamelCase_ = []
if s == -2:
lowerCamelCase_ = list(self.graph )[0]
stack.append(UpperCamelCase__ )
visited.append(UpperCamelCase__ )
lowerCamelCase_ = s
lowerCamelCase_ = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase_ = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase_ = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(UpperCamelCase__ ) != 0:
lowerCamelCase_ = stack[len(UpperCamelCase__ ) - 1]
else:
lowerCamelCase_ = ss
# check if se have reached the starting point
if len(UpperCamelCase__ ) == 0:
return sorted_nodes
def _lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = []
lowerCamelCase_ = []
lowerCamelCase_ = list(self.graph )[0]
stack.append(UpperCamelCase__ )
visited.append(UpperCamelCase__ )
lowerCamelCase_ = -2
lowerCamelCase_ = []
lowerCamelCase_ = s
lowerCamelCase_ = False
lowerCamelCase_ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase_ = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCamelCase_ = len(UpperCamelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase_ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCamelCase_ = True
if len(UpperCamelCase__ ) != 0:
lowerCamelCase_ = stack[len(UpperCamelCase__ ) - 1]
else:
lowerCamelCase_ = False
indirect_parents.append(UpperCamelCase__ )
lowerCamelCase_ = s
lowerCamelCase_ = ss
# check if se have reached the starting point
if len(UpperCamelCase__ ) == 0:
return list(UpperCamelCase__ )
def _lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = []
lowerCamelCase_ = []
lowerCamelCase_ = list(self.graph )[0]
stack.append(UpperCamelCase__ )
visited.append(UpperCamelCase__ )
lowerCamelCase_ = -2
lowerCamelCase_ = []
lowerCamelCase_ = s
lowerCamelCase_ = False
lowerCamelCase_ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase_ = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCamelCase_ = len(UpperCamelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase_ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCamelCase_ = True
if len(UpperCamelCase__ ) != 0:
lowerCamelCase_ = stack[len(UpperCamelCase__ ) - 1]
else:
lowerCamelCase_ = False
indirect_parents.append(UpperCamelCase__ )
lowerCamelCase_ = s
lowerCamelCase_ = ss
# check if se have reached the starting point
if len(UpperCamelCase__ ) == 0:
return False
def _lowerCAmelCase ( self , UpperCamelCase__=-2 , UpperCamelCase__=-1 ) -> int:
'''simple docstring'''
lowerCamelCase_ = time()
self.dfs(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase_ = time()
return end - begin
def _lowerCAmelCase ( self , UpperCamelCase__=-2 ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = time()
self.bfs(UpperCamelCase__ )
lowerCamelCase_ = time()
return end - begin
class lowerCAmelCase :
"""simple docstring"""
def __init__( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = {}
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=1 ) -> List[Any]:
'''simple docstring'''
if self.graph.get(UpperCamelCase__ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
lowerCamelCase_ = [[w, v]]
# add the other way
if self.graph.get(UpperCamelCase__ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
lowerCamelCase_ = [[w, u]]
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
if self.graph.get(UpperCamelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(UpperCamelCase__ )
# the other way round
if self.graph.get(UpperCamelCase__ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(UpperCamelCase__ )
def _lowerCAmelCase ( self , UpperCamelCase__=-2 , UpperCamelCase__=-1 ) -> str:
'''simple docstring'''
if s == d:
return []
lowerCamelCase_ = []
lowerCamelCase_ = []
if s == -2:
lowerCamelCase_ = list(self.graph )[0]
stack.append(UpperCamelCase__ )
visited.append(UpperCamelCase__ )
lowerCamelCase_ = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase_ = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(UpperCamelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase_ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(UpperCamelCase__ ) != 0:
lowerCamelCase_ = stack[len(UpperCamelCase__ ) - 1]
else:
lowerCamelCase_ = ss
# check if se have reached the starting point
if len(UpperCamelCase__ ) == 0:
return visited
def _lowerCAmelCase ( self , UpperCamelCase__=-1 ) -> Any:
'''simple docstring'''
if c == -1:
lowerCamelCase_ = floor(random() * 10_000 ) + 10
for i in range(UpperCamelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowerCamelCase_ = floor(random() * c ) + 1
if n != i:
self.add_pair(UpperCamelCase__ , UpperCamelCase__ , 1 )
def _lowerCAmelCase ( self , UpperCamelCase__=-2 ) -> int:
'''simple docstring'''
lowerCamelCase_ = deque()
lowerCamelCase_ = []
if s == -2:
lowerCamelCase_ = list(self.graph )[0]
d.append(UpperCamelCase__ )
visited.append(UpperCamelCase__ )
while d:
lowerCamelCase_ = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
return len(self.graph[u] )
def _lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = []
lowerCamelCase_ = []
lowerCamelCase_ = list(self.graph )[0]
stack.append(UpperCamelCase__ )
visited.append(UpperCamelCase__ )
lowerCamelCase_ = -2
lowerCamelCase_ = []
lowerCamelCase_ = s
lowerCamelCase_ = False
lowerCamelCase_ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase_ = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCamelCase_ = len(UpperCamelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase_ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCamelCase_ = True
if len(UpperCamelCase__ ) != 0:
lowerCamelCase_ = stack[len(UpperCamelCase__ ) - 1]
else:
lowerCamelCase_ = False
indirect_parents.append(UpperCamelCase__ )
lowerCamelCase_ = s
lowerCamelCase_ = ss
# check if se have reached the starting point
if len(UpperCamelCase__ ) == 0:
return list(UpperCamelCase__ )
def _lowerCAmelCase ( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = []
lowerCamelCase_ = []
lowerCamelCase_ = list(self.graph )[0]
stack.append(UpperCamelCase__ )
visited.append(UpperCamelCase__ )
lowerCamelCase_ = -2
lowerCamelCase_ = []
lowerCamelCase_ = s
lowerCamelCase_ = False
lowerCamelCase_ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase_ = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCamelCase_ = len(UpperCamelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase_ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCamelCase_ = True
if len(UpperCamelCase__ ) != 0:
lowerCamelCase_ = stack[len(UpperCamelCase__ ) - 1]
else:
lowerCamelCase_ = False
indirect_parents.append(UpperCamelCase__ )
lowerCamelCase_ = s
lowerCamelCase_ = ss
# check if se have reached the starting point
if len(UpperCamelCase__ ) == 0:
return False
def _lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
return list(self.graph )
def _lowerCAmelCase ( self , UpperCamelCase__=-2 , UpperCamelCase__=-1 ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = time()
self.dfs(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase_ = time()
return end - begin
def _lowerCAmelCase ( self , UpperCamelCase__=-2 ) -> Any:
'''simple docstring'''
lowerCamelCase_ = time()
self.bfs(UpperCamelCase__ )
lowerCamelCase_ = time()
return end - begin
| 66
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowercase : Tuple = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : str = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Union[str, Any] = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__lowercase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 66
| 1
|
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> int:
"""simple docstring"""
def count_of_possible_combinations(SCREAMING_SNAKE_CASE_ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> int:
"""simple docstring"""
def count_of_possible_combinations_with_dp_array(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
UpperCamelCase_ = sum(
count_of_possible_combinations_with_dp_array(target - item , SCREAMING_SNAKE_CASE_ )
for item in array )
UpperCamelCase_ = answer
return answer
UpperCamelCase_ = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> int:
"""simple docstring"""
UpperCamelCase_ = [0] * (target + 1)
UpperCamelCase_ = 1
for i in range(1 , target + 1 ):
for j in range(SCREAMING_SNAKE_CASE_ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE :Union[str, Any] = 3
SCREAMING_SNAKE_CASE :Optional[Any] = 5
SCREAMING_SNAKE_CASE :Optional[int] = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 628
|
SCREAMING_SNAKE_CASE :Dict = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
SCREAMING_SNAKE_CASE :Dict = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
SCREAMING_SNAKE_CASE :int = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
SCREAMING_SNAKE_CASE :Optional[Any] = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
SCREAMING_SNAKE_CASE :int = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
SCREAMING_SNAKE_CASE :Optional[int] = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
SCREAMING_SNAKE_CASE :Dict = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
SCREAMING_SNAKE_CASE :int = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 628
| 1
|
"""simple docstring"""
from __future__ import annotations
class snake_case_:
def __init__( self : int , UpperCamelCase_ : str , UpperCamelCase_ : str ):
lowerCAmelCase, lowerCAmelCase : List[str] = text, pattern
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = len(UpperCamelCase_ ), len(UpperCamelCase_ )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : str ):
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : int ):
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def lowerCamelCase__ ( self : Dict ):
# searches pattern in text and returns index positions
lowerCAmelCase : Union[str, Any] = []
for i in range(self.textLen - self.patLen + 1 ):
lowerCAmelCase : str = self.mismatch_in_text(UpperCamelCase_ )
if mismatch_index == -1:
positions.append(UpperCamelCase_ )
else:
lowerCAmelCase : Optional[Any] = self.match_in_pattern(self.text[mismatch_index] )
lowerCAmelCase : int = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
snake_case__ : str = '''ABAABA'''
snake_case__ : List[str] = '''AB'''
snake_case__ : Union[str, Any] = BoyerMooreSearch(text, pattern)
snake_case__ : Optional[Any] = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 637
|
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
snake_case__ : int = '''platform'''
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def _snake_case ( _snake_case : str , _snake_case : Any , _snake_case : str=None , _snake_case : str=None , _snake_case : Dict=None , _snake_case : Tuple=None , _snake_case : str=None , _snake_case : Any=None , ):
if attention_mask is None:
lowerCAmelCase : List[str] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
lowerCAmelCase : Optional[int] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
lowerCAmelCase : Any = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCAmelCase : int = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCAmelCase : List[str] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class snake_case_:
def __init__( self : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : int=1_3 , UpperCamelCase_ : Union[str, Any]=7 , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : List[Any]=False , UpperCamelCase_ : Dict=9_9 , UpperCamelCase_ : Optional[int]=1_6 , UpperCamelCase_ : str=2 , UpperCamelCase_ : List[str]=4 , UpperCamelCase_ : List[Any]=4 , UpperCamelCase_ : int="gelu" , UpperCamelCase_ : Optional[int]=0.1 , UpperCamelCase_ : Any=0.1 , UpperCamelCase_ : str=3_2 , UpperCamelCase_ : str=2 , UpperCamelCase_ : Tuple=1 , UpperCamelCase_ : List[Any]=0 , UpperCamelCase_ : Any=0.02 , ):
lowerCAmelCase : Tuple = parent
lowerCAmelCase : str = batch_size
lowerCAmelCase : List[Any] = seq_length
lowerCAmelCase : Optional[int] = is_training
lowerCAmelCase : int = use_labels
lowerCAmelCase : List[Any] = vocab_size
lowerCAmelCase : str = hidden_size
lowerCAmelCase : List[Any] = num_hidden_layers
lowerCAmelCase : Any = num_attention_heads
lowerCAmelCase : List[Any] = intermediate_size
lowerCAmelCase : Optional[int] = hidden_act
lowerCAmelCase : Dict = hidden_dropout_prob
lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase : List[Any] = max_position_embeddings
lowerCAmelCase : Union[str, Any] = eos_token_id
lowerCAmelCase : Dict = pad_token_id
lowerCAmelCase : Optional[Any] = bos_token_id
lowerCAmelCase : List[str] = initializer_range
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : List[Any] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
lowerCAmelCase : str = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
lowerCAmelCase : Tuple = shift_tokens_right(UpperCamelCase_ , 1 , 2 )
lowerCAmelCase : Union[str, Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=UpperCamelCase_ , )
lowerCAmelCase : Union[str, Any] = prepare_blenderbot_inputs_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return config, inputs_dict
def lowerCamelCase__ ( self : str ):
lowerCAmelCase, lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Tuple ):
lowerCAmelCase : int = 2_0
lowerCAmelCase : Tuple = model_class_name(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = model.encode(inputs_dict['''input_ids'''] )
lowerCAmelCase, lowerCAmelCase : str = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowerCAmelCase : str = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
lowerCAmelCase : Tuple = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCAmelCase : List[Any] = model.decode(
decoder_input_ids[:, :-1] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , )
lowerCAmelCase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowerCAmelCase : List[str] = model.decode(
decoder_input_ids[:, -1:] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCamelCase_ , )
lowerCAmelCase : Union[str, Any] = model.decode(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : List[str] ):
lowerCAmelCase : Optional[int] = 2_0
lowerCAmelCase : List[Any] = model_class_name(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = model.encode(inputs_dict['''input_ids'''] )
lowerCAmelCase, lowerCAmelCase : Optional[int] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowerCAmelCase : str = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
lowerCAmelCase : Union[str, Any] = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : str = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCAmelCase : Dict = model.decode(
decoder_input_ids[:, :-1] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , )
lowerCAmelCase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowerCAmelCase : Union[str, Any] = model.decode(
decoder_input_ids[:, -1:] , UpperCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , )
lowerCAmelCase : Dict = model.decode(UpperCamelCase_ , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ )
lowerCAmelCase : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
@require_flax
class snake_case_( unittest.TestCase ):
__UpperCamelCase = 99
def lowerCamelCase__ ( self : str ):
lowerCAmelCase : List[Any] = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
lowerCAmelCase : List[Any] = input_ids.shape[0]
lowerCAmelCase : Optional[Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : Any = self._get_config_and_data()
lowerCAmelCase : Any = FlaxBlenderbotForConditionalGeneration(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = lm_model(input_ids=UpperCamelCase_ )
lowerCAmelCase : Tuple = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , UpperCamelCase_ )
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : Any = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
lowerCAmelCase : int = FlaxBlenderbotForConditionalGeneration(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa )
lowerCAmelCase : List[str] = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa )
lowerCAmelCase : List[Any] = lm_model(input_ids=UpperCamelCase_ , decoder_input_ids=UpperCamelCase_ )
lowerCAmelCase : str = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , UpperCamelCase_ )
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : Any = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa )
lowerCAmelCase : Tuple = shift_tokens_right(UpperCamelCase_ , 1 , 2 )
lowerCAmelCase : Optional[int] = np.equal(UpperCamelCase_ , 1 ).astype(np.floataa ).sum()
lowerCAmelCase : str = np.equal(UpperCamelCase_ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(UpperCamelCase_ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class snake_case_( a__ , unittest.TestCase , a__ ):
__UpperCamelCase = True
__UpperCamelCase = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
__UpperCamelCase = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : Any = FlaxBlenderbotModelTester(self )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase, lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase, lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase : Optional[int] = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = model_class(UpperCamelCase_ )
@jax.jit
def encode_jitted(UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any]=None , **UpperCamelCase_ : List[str] ):
return model.encode(input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ )
with self.subTest('''JIT Enabled''' ):
lowerCAmelCase : List[str] = encode_jitted(**UpperCamelCase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCAmelCase : int = encode_jitted(**UpperCamelCase_ ).to_tuple()
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase, lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase : Tuple = model_class(UpperCamelCase_ )
lowerCAmelCase : int = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
lowerCAmelCase : List[Any] = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : int ):
return model.decode(
decoder_input_ids=UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , encoder_outputs=UpperCamelCase_ , )
with self.subTest('''JIT Enabled''' ):
lowerCAmelCase : str = decode_jitted(**UpperCamelCase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCAmelCase : Union[str, Any] = decode_jitted(**UpperCamelCase_ ).to_tuple()
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCamelCase__ ( self : Optional[int] ):
for model_class_name in self.all_model_classes:
lowerCAmelCase : Optional[int] = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
lowerCAmelCase : int = np.ones((1, 1) ) * model.config.eos_token_id
lowerCAmelCase : List[str] = model(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' )
@slow
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Dict = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 1_5, '''max_length''': 2_5}
lowerCAmelCase : List[str] = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True}
lowerCAmelCase : Tuple = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' )
lowerCAmelCase : List[Any] = ['''Sam''']
lowerCAmelCase : str = tokenizer(UpperCamelCase_ , return_tensors='''jax''' )
lowerCAmelCase : Union[str, Any] = model.generate(**UpperCamelCase_ , **UpperCamelCase_ )
lowerCAmelCase : Tuple = '''Sam is a great name. It means "sun" in Gaelic.'''
lowerCAmelCase : Union[str, Any] = tokenizer.batch_decode(UpperCamelCase_ , **UpperCamelCase_ )
assert generated_txt[0].strip() == tgt_text
| 637
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ = {
"configuration_trajectory_transformer": [
"TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TrajectoryTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrajectoryTransformerModel",
"TrajectoryTransformerPreTrainedModel",
"load_tf_weights_in_trajectory_transformer",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 143
|
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
_lowercase = pytest.mark.integration
@require_faiss
class a_ ( UpperCAmelCase__ ):
def lowercase__ ( self : List[Any] ):
__snake_case = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(__lowerCAmelCase ) for x in np.arange(3_0 ).tolist()]} )
return dset
def lowercase__ ( self : List[str] ):
import faiss
__snake_case = self._create_dummy_dataset()
__snake_case = dset.map(
lambda __lowerCAmelCase , __lowerCAmelCase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase )
__snake_case = dset.add_faiss_index('vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT )
__snake_case , __snake_case = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def lowercase__ ( self : Optional[int] ):
import faiss
__snake_case = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__snake_case , __snake_case = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def lowercase__ ( self : Dict ):
import faiss
__snake_case = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__lowerCAmelCase ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
__snake_case , __snake_case = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def lowercase__ ( self : Union[str, Any] ):
__snake_case = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(__lowerCAmelCase , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def lowercase__ ( self : List[str] ):
from elasticsearch import Elasticsearch
__snake_case = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
__snake_case = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 3_0 )
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 2_9}]}}
__snake_case = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=__lowerCAmelCase )
__snake_case , __snake_case = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class a_ ( UpperCAmelCase__ ):
def lowercase__ ( self : Dict ):
import faiss
__snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 1_0 )
# single query
__snake_case = np.zeros(5 , dtype=np.floataa )
__snake_case = 1
__snake_case , __snake_case = index.search(__lowerCAmelCase )
self.assertRaises(__lowerCAmelCase , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
__snake_case = np.eye(5 , dtype=np.floataa )[::-1]
__snake_case , __snake_case = index.search_batch(__lowerCAmelCase )
self.assertRaises(__lowerCAmelCase , index.search_batch , queries[0] )
__snake_case = [scores[0] for scores in total_scores]
__snake_case = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__lowerCAmelCase ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , __lowerCAmelCase )
def lowercase__ ( self : Optional[int] ):
import faiss
__snake_case = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
__snake_case = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(__lowerCAmelCase ):
__snake_case = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def lowercase__ ( self : int ):
import faiss
__snake_case = faiss.IndexFlat(5 )
__snake_case = FaissIndex(custom_index=__lowerCAmelCase )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def lowercase__ ( self : Tuple ):
import faiss
__snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__lowerCAmelCase ) as tmp_file:
index.save(tmp_file.name )
__snake_case = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
__snake_case = np.zeros(5 , dtype=np.floataa )
__snake_case = 1
__snake_case , __snake_case = index.search(__lowerCAmelCase )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def lowerCamelCase__ ( a ):
import faiss
__snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
__snake_case = 'index.faiss'
__snake_case = f'mock://{index_name}'
index.save(a , storage_options=mockfs.storage_options )
__snake_case = FaissIndex.load(a , storage_options=mockfs.storage_options )
__snake_case = np.zeros(5 , dtype=np.floataa )
__snake_case = 1
__snake_case , __snake_case = index.search(a )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class a_ ( UpperCAmelCase__ ):
def lowercase__ ( self : int ):
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
__snake_case = Elasticsearch()
__snake_case = {'acknowledged': True}
__snake_case = ElasticSearchIndex(es_client=__lowerCAmelCase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
__snake_case = 'foo'
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__snake_case , __snake_case = index.search(__lowerCAmelCase )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
__snake_case = 'foo'
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__snake_case , __snake_case = index.search(__lowerCAmelCase , request_timeout=3_0 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
__snake_case = ['foo', 'bar', 'foobar']
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__snake_case , __snake_case = index.search_batch(__lowerCAmelCase )
__snake_case = [scores[0] for scores in total_scores]
__snake_case = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__lowerCAmelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __lowerCAmelCase )
# batched queries with timeout
__snake_case = ['foo', 'bar', 'foobar']
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__snake_case , __snake_case = index.search_batch(__lowerCAmelCase , request_timeout=3_0 )
__snake_case = [scores[0] for scores in total_scores]
__snake_case = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__lowerCAmelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __lowerCAmelCase )
| 356
| 0
|
import re
import string
import numpy as np
import datasets
lowerCamelCase : Any = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
lowerCamelCase : Optional[Any] = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
lowerCamelCase : List[Any] = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase (datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , reference_urls=[] , )
def UpperCAmelCase ( self , A , A , A=None , A=False , A=False , A=False , ) -> str:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
snake_case : Optional[int] = np.array([re.sub(A , """""" , A ) for x in predictions] )
snake_case : Optional[int] = np.array([re.sub(A , """""" , A ) for x in references] )
else:
snake_case : List[Any] = np.asarray(A )
snake_case : List[Any] = np.asarray(A )
if ignore_case:
snake_case : List[Any] = np.char.lower(A )
snake_case : List[str] = np.char.lower(A )
if ignore_punctuation:
snake_case : List[str] = string.punctuation.maketrans("""""" , """""" , string.punctuation )
snake_case : Dict = np.char.translate(A , table=A )
snake_case : Optional[int] = np.char.translate(A , table=A )
if ignore_numbers:
snake_case : Dict = string.digits.maketrans("""""" , """""" , string.digits )
snake_case : List[str] = np.char.translate(A , table=A )
snake_case : Tuple = np.char.translate(A , table=A )
snake_case : Dict = predictions == references
return {"exact_match": np.mean(A ) * 1_0_0}
| 713
|
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> Tuple:
# Initialise PyTorch model
snake_case : int = RemBertConfig.from_json_file(lowercase )
print("""Building PyTorch model from configuration: {}""".format(str(lowercase ) ) )
snake_case : Tuple = RemBertModel(lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(lowercase ,lowercase ,lowercase )
# Save pytorch-model
print("""Save PyTorch model to {}""".format(lowercase ) )
torch.save(model.state_dict() ,lowercase )
if __name__ == "__main__":
lowerCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCamelCase : Dict = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 684
| 0
|
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class __snake_case :
'''simple docstring'''
def __init__( self , A_ , A_=14 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=5_12 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_token_type_ids
SCREAMING_SNAKE_CASE__ = use_input_mask
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = use_mc_token_ids
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = num_choices
SCREAMING_SNAKE_CASE__ = scope
SCREAMING_SNAKE_CASE__ = self.vocab_size - 1
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ = None
if self.use_mc_token_ids:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ = self.get_config()
SCREAMING_SNAKE_CASE__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowercase_ ( self ):
'''simple docstring'''
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ , *A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = CTRLModel(config=A_ )
model.to(A_ )
model.eval()
model(A_ , token_type_ids=A_ , head_mask=A_ )
model(A_ , token_type_ids=A_ )
SCREAMING_SNAKE_CASE__ = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ , *A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = CTRLLMHeadModel(A_ )
model.to(A_ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask}
return config, inputs_dict
def lowercase_ ( self , A_ , A_ , A_ , A_ , *A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = CTRLForSequenceClassification(A_ )
model.to(A_ )
model.eval()
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = model(A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
lowerCamelCase__ : List[Any] = (CTRLLMHeadModel,) if is_torch_available() else ()
lowerCamelCase__ : Tuple = (
{
"""feature-extraction""": CTRLModel,
"""text-classification""": CTRLForSequenceClassification,
"""text-generation""": CTRLLMHeadModel,
"""zero-shot""": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : Optional[int] = False
lowerCamelCase__ : Any = False
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = CTRLModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=A_ , n_embd=37 )
def lowercase_ ( self ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*A_ )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*A_ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase_ ( self ):
'''simple docstring'''
pass
@slow
def lowercase_ ( self ):
'''simple docstring'''
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = CTRLModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def lowercase_ ( self ):
'''simple docstring'''
pass
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = CTRLLMHeadModel.from_pretrained('''ctrl''' )
model.to(A_ )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[1_18_59, 0, 16_11, 8]] , dtype=torch.long , device=A_ ) # Legal the president is
SCREAMING_SNAKE_CASE__ = [
1_18_59,
0,
16_11,
8,
5,
1_50,
2_64_49,
2,
19,
3_48,
4_69,
3,
25_95,
48,
2_07_40,
24_65_33,
24_65_33,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
SCREAMING_SNAKE_CASE__ = model.generate(A_ , do_sample=A_ )
self.assertListEqual(output_ids[0].tolist() , A_ )
| 100
|
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
_A : Dict = get_tests_dir("""fixtures/test_sentencepiece_bpe.model""")
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Any = BartphoTokenizer
lowerCamelCase__ : Tuple = False
lowerCamelCase__ : Dict = True
def lowercase_ ( self ):
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE__ = ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']
SCREAMING_SNAKE_CASE__ = dict(zip(A_ , range(len(A_ ) ) ) )
SCREAMING_SNAKE_CASE__ = {'''unk_token''': '''<unk>'''}
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''monolingual_vocab_file'''] )
with open(self.monolingual_vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(f'''{token} {vocab_tokens[token]}\n''' )
SCREAMING_SNAKE_CASE__ = BartphoTokenizer(A_ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ ( self , **A_ ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **A_ )
def lowercase_ ( self , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = '''This is a là test'''
SCREAMING_SNAKE_CASE__ = '''This is a<unk><unk> test'''
return input_text, output_text
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = BartphoTokenizer(A_ , self.monolingual_vocab_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE__ = '''This is a là test'''
SCREAMING_SNAKE_CASE__ = '''▁This ▁is ▁a ▁l à ▁t est'''.split()
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
SCREAMING_SNAKE_CASE__ = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , A_ )
| 100
| 1
|
from __future__ import annotations
from decimal import Decimal
from numpy import array
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> list[list[float]]:
'''simple docstring'''
__UpperCAmelCase : Any = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(lowercase_ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
__UpperCAmelCase : List[Any] = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creates a copy of the matrix with swapped positions of the elements
__UpperCAmelCase : Optional[int] = [[0.0, 0.0], [0.0, 0.0]]
__UpperCAmelCase , __UpperCAmelCase : Dict = matrix[1][1], matrix[0][0]
__UpperCAmelCase , __UpperCAmelCase : int = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(lowercase_ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(lowercase_ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
__UpperCAmelCase : Any = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creating cofactor matrix
__UpperCAmelCase : str = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
__UpperCAmelCase : List[str] = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
__UpperCAmelCase : Any = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
__UpperCAmelCase : Dict = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
__UpperCAmelCase : Optional[int] = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
__UpperCAmelCase : Optional[Any] = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
__UpperCAmelCase : Optional[int] = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
__UpperCAmelCase : str = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
__UpperCAmelCase : Optional[int] = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
__UpperCAmelCase : int = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
__UpperCAmelCase : Dict = array(lowercase_ )
for i in range(3 ):
for j in range(3 ):
__UpperCAmelCase : List[str] = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
__UpperCAmelCase : str = array(lowercase_ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(lowercase_ )
# Calculate the inverse of the matrix
return [[float(d(lowercase_ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('''Please provide a matrix of size 2x2 or 3x3.''' )
| 675
|
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase = """
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)[\"depth\"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline(\"depth-estimation\")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to(\"cuda\")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16
... )
>>> pipe = pipe.to(\"cuda\")
>>> img = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/cat.png\"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")
>>> prompt = \"A robot, 4k photo\"
>>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"
>>> generator = torch.Generator(device=\"cuda\").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save(\"robot_cat.png\")
```
"""
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=8 ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__UpperCAmelCase : Union[str, Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCamelCase ( _UpperCamelCase ):
def __init__( self , lowercase__ , lowercase__ , lowercase__ , ):
super().__init__()
self.register_modules(
unet=lowercase__ , scheduler=lowercase__ , movq=lowercase__ , )
__UpperCAmelCase : Any = 2 ** (len(self.movq.config.block_out_channels) - 1)
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__):
if latents is None:
__UpperCAmelCase : Any = randn_tensor(lowercase__ , generator=lowercase__ , device=lowercase__ , dtype=lowercase__)
else:
if latents.shape != shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}")
__UpperCAmelCase : Union[str, Any] = latents.to(lowercase__)
__UpperCAmelCase : Union[str, Any] = latents * scheduler.init_noise_sigma
return latents
def A( self , lowercase__=0):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''')
__UpperCAmelCase : List[str] = torch.device(F"cuda:{gpu_id}")
__UpperCAmelCase : List[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase__ , lowercase__)
def A( self , lowercase__=0):
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0'''):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''')
__UpperCAmelCase : Optional[Any] = torch.device(F"cuda:{gpu_id}")
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=lowercase__)
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__UpperCAmelCase : List[Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
__UpperCAmelCase , __UpperCAmelCase : List[str] = cpu_offload_with_hook(lowercase__ , lowercase__ , prev_module_hook=lowercase__)
# We'll offload the last model manually.
__UpperCAmelCase : Any = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def A( self):
if not hasattr(self.unet , '''_hf_hook'''):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase__ , '''_hf_hook''')
and hasattr(module._hf_hook , '''execution_device''')
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase__)
def __call__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = 5_1_2 , lowercase__ = 5_1_2 , lowercase__ = 1_0_0 , lowercase__ = 4.0 , lowercase__ = 1 , lowercase__ = None , lowercase__ = None , lowercase__ = "pil" , lowercase__ = True , ):
__UpperCAmelCase : str = self._execution_device
__UpperCAmelCase : List[str] = guidance_scale > 1.0
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Dict = torch.cat(lowercase__ , dim=0)
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Tuple = torch.cat(lowercase__ , dim=0)
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Any = torch.cat(lowercase__ , dim=0)
__UpperCAmelCase : Union[str, Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
__UpperCAmelCase : Optional[int] = image_embeds.repeat_interleave(lowercase__ , dim=0)
__UpperCAmelCase : Dict = negative_image_embeds.repeat_interleave(lowercase__ , dim=0)
__UpperCAmelCase : List[Any] = hint.repeat_interleave(lowercase__ , dim=0)
__UpperCAmelCase : Tuple = torch.cat([negative_image_embeds, image_embeds] , dim=0).to(dtype=self.unet.dtype , device=lowercase__)
__UpperCAmelCase : List[Any] = torch.cat([hint, hint] , dim=0).to(dtype=self.unet.dtype , device=lowercase__)
self.scheduler.set_timesteps(lowercase__ , device=lowercase__)
__UpperCAmelCase : List[Any] = self.scheduler.timesteps
__UpperCAmelCase : Any = self.movq.config.latent_channels
__UpperCAmelCase , __UpperCAmelCase : List[str] = downscale_height_and_width(lowercase__ , lowercase__ , self.movq_scale_factor)
# create initial latent
__UpperCAmelCase : Union[str, Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowercase__ , lowercase__ , lowercase__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowercase__)):
# expand the latents if we are doing classifier free guidance
__UpperCAmelCase : List[Any] = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
__UpperCAmelCase : Union[str, Any] = {'''image_embeds''': image_embeds, '''hint''': hint}
__UpperCAmelCase : Any = self.unet(
sample=lowercase__ , timestep=lowercase__ , encoder_hidden_states=lowercase__ , added_cond_kwargs=lowercase__ , return_dict=lowercase__ , )[0]
if do_classifier_free_guidance:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1)
__UpperCAmelCase , __UpperCAmelCase : List[str] = noise_pred.chunk(2)
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = variance_pred.chunk(2)
__UpperCAmelCase : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__UpperCAmelCase : int = torch.cat([noise_pred, variance_pred_text] , dim=1)
if not (
hasattr(self.scheduler.config , '''variance_type''')
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1)
# compute the previous noisy sample x_t -> x_t-1
__UpperCAmelCase : Tuple = self.scheduler.step(
lowercase__ , lowercase__ , lowercase__ , generator=lowercase__ , )[0]
# post-processing
__UpperCAmelCase : str = self.movq.decode(lowercase__ , force_not_quantize=lowercase__)['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}")
if output_type in ["np", "pil"]:
__UpperCAmelCase : Dict = image * 0.5 + 0.5
__UpperCAmelCase : Union[str, Any] = image.clamp(0 , 1)
__UpperCAmelCase : List[str] = image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
__UpperCAmelCase : List[str] = self.numpy_to_pil(lowercase__)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase__)
| 675
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Dict = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( __a ):
'''simple docstring'''
UpperCamelCase__ : Tuple = '''markuplm'''
def __init__( self : Tuple , lowerCAmelCase__ : Optional[Any]=30522 , lowerCAmelCase__ : Optional[int]=768 , lowerCAmelCase__ : int=12 , lowerCAmelCase__ : List[str]=12 , lowerCAmelCase__ : Dict=3072 , lowerCAmelCase__ : List[str]="gelu" , lowerCAmelCase__ : Tuple=0.1 , lowerCAmelCase__ : Union[str, Any]=0.1 , lowerCAmelCase__ : List[Any]=512 , lowerCAmelCase__ : List[Any]=2 , lowerCAmelCase__ : Tuple=0.02 , lowerCAmelCase__ : int=1E-12 , lowerCAmelCase__ : str=0 , lowerCAmelCase__ : Dict=0 , lowerCAmelCase__ : List[str]=2 , lowerCAmelCase__ : Optional[int]=256 , lowerCAmelCase__ : Tuple=1024 , lowerCAmelCase__ : Optional[Any]=216 , lowerCAmelCase__ : int=1001 , lowerCAmelCase__ : Any=32 , lowerCAmelCase__ : List[str]=50 , lowerCAmelCase__ : Tuple="absolute" , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : Tuple=None , **lowerCAmelCase__ : List[str] , ) -> Tuple:
super().__init__(
pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
snake_case__ = vocab_size
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = hidden_act
snake_case__ = intermediate_size
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = max_position_embeddings
snake_case__ = type_vocab_size
snake_case__ = initializer_range
snake_case__ = layer_norm_eps
snake_case__ = position_embedding_type
snake_case__ = use_cache
snake_case__ = classifier_dropout
# additional properties
snake_case__ = max_depth
snake_case__ = max_xpath_tag_unit_embeddings
snake_case__ = max_xpath_subs_unit_embeddings
snake_case__ = tag_pad_id
snake_case__ = subs_pad_id
snake_case__ = xpath_unit_hidden_size
| 214
|
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
lowerCAmelCase : Union[str, Any] = False
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
pass
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Tuple ) -> int:
snake_case__ = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
snake_case__ = torch.manual_seed(0 )
snake_case__ = pipe(
image=lowerCAmelCase__ , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
snake_case__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
snake_case__ = np.array([0.0_441, 0.0_469, 0.0_507, 0.0_575, 0.0_632, 0.0_650, 0.0_865, 0.0_909, 0.0_945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 214
| 1
|
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowercase_ = "platform"
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class SCREAMING_SNAKE_CASE__ :
A : List[str] = PegasusConfig
A : List[str] = {}
A : Dict = "gelu"
def __init__( self : str , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any]=13 , _lowerCAmelCase : Dict=7 , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Union[str, Any]=99 , _lowerCAmelCase : Union[str, Any]=32 , _lowerCAmelCase : Optional[Any]=5 , _lowerCAmelCase : Optional[Any]=4 , _lowerCAmelCase : Optional[int]=37 , _lowerCAmelCase : str=0.1 , _lowerCAmelCase : Optional[Any]=0.1 , _lowerCAmelCase : int=20 , _lowerCAmelCase : str=2 , _lowerCAmelCase : Any=1 , _lowerCAmelCase : List[Any]=0 , ):
__snake_case : Optional[int] = parent
__snake_case : Union[str, Any] = batch_size
__snake_case : Union[str, Any] = seq_length
__snake_case : Union[str, Any] = is_training
__snake_case : Any = use_labels
__snake_case : Optional[Any] = vocab_size
__snake_case : Tuple = hidden_size
__snake_case : Union[str, Any] = num_hidden_layers
__snake_case : int = num_attention_heads
__snake_case : str = intermediate_size
__snake_case : Any = hidden_dropout_prob
__snake_case : Tuple = attention_probs_dropout_prob
__snake_case : Dict = max_position_embeddings
__snake_case : Dict = eos_token_id
__snake_case : int = pad_token_id
__snake_case : Dict = bos_token_id
def snake_case__ ( self : Any ):
__snake_case : int = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
__snake_case : List[Any] = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
__snake_case : Any = np.concatenate([input_ids, eos_tensor] , axis=1 )
__snake_case : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Any = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__snake_case : Any = prepare_pegasus_inputs_dict(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return config, inputs_dict
def snake_case__ ( self : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple ):
__snake_case : Any = 20
__snake_case : Any = model_class_name(_lowerCAmelCase )
__snake_case : List[str] = model.encode(inputs_dict["""input_ids"""] )
__snake_case , __snake_case : List[Any] = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
__snake_case : Optional[Any] = model.init_cache(decoder_input_ids.shape[0] , _lowerCAmelCase , _lowerCAmelCase )
__snake_case : Tuple = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
__snake_case : Any = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__snake_case : Union[str, Any] = model.decode(
decoder_input_ids[:, :-1] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , )
__snake_case : int = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
__snake_case : Union[str, Any] = model.decode(
decoder_input_ids[:, -1:] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_lowerCAmelCase , )
__snake_case : List[Any] = model.decode(_lowerCAmelCase , _lowerCAmelCase )
__snake_case : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : List[Any] ):
__snake_case : Dict = 20
__snake_case : int = model_class_name(_lowerCAmelCase )
__snake_case : Tuple = model.encode(inputs_dict["""input_ids"""] )
__snake_case , __snake_case : Any = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
__snake_case : Optional[int] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__snake_case : Union[str, Any] = model.init_cache(decoder_input_ids.shape[0] , _lowerCAmelCase , _lowerCAmelCase )
__snake_case : Tuple = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__snake_case : int = model.decode(
decoder_input_ids[:, :-1] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , )
__snake_case : List[str] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
__snake_case : str = model.decode(
decoder_input_ids[:, -1:] , _lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , )
__snake_case : str = model.decode(_lowerCAmelCase , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase )
__snake_case : Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : List[Any]=None , ):
'''simple docstring'''
if attention_mask is None:
__snake_case : Tuple = np.not_equal(__SCREAMING_SNAKE_CASE , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
__snake_case : List[Any] = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , unittest.TestCase ):
A : Any = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
A : int = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
A : Tuple = True
A : Union[str, Any] = False
A : int = False
A : Tuple = False
def snake_case__ ( self : str ):
__snake_case : List[str] = FlaxPegasusModelTester(self )
__snake_case : List[Any] = ConfigTester(self , config_class=_lowerCAmelCase )
def snake_case__ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def snake_case__ ( self : Any ):
__snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def snake_case__ ( self : str ):
__snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def snake_case__ ( self : Optional[Any] ):
__snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__snake_case : Dict = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
__snake_case : str = model_class(_lowerCAmelCase )
@jax.jit
def encode_jitted(_lowerCAmelCase : str , _lowerCAmelCase : Any=None , **_lowerCAmelCase : Union[str, Any] ):
return model.encode(input_ids=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
with self.subTest("""JIT Enabled""" ):
__snake_case : Dict = encode_jitted(**_lowerCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__snake_case : Dict = encode_jitted(**_lowerCAmelCase ).to_tuple()
self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) )
for jitted_output, output in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case__ ( self : int ):
__snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__snake_case : Union[str, Any] = model_class(_lowerCAmelCase )
__snake_case : Union[str, Any] = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
__snake_case : Dict = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(_lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : str ):
return model.decode(
decoder_input_ids=_lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , encoder_outputs=_lowerCAmelCase , )
with self.subTest("""JIT Enabled""" ):
__snake_case : str = decode_jitted(**_lowerCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__snake_case : str = decode_jitted(**_lowerCAmelCase ).to_tuple()
self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) )
for jitted_output, output in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def snake_case__ ( self : Optional[int] ):
for model_class_name in self.all_model_classes:
__snake_case : str = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=_lowerCAmelCase )
__snake_case : Union[str, Any] = np.ones((1, 1) )
__snake_case : List[Any] = model(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@slow
def snake_case__ ( self : int ):
__snake_case : Optional[int] = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" )
__snake_case : Any = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" )
__snake_case : Optional[Any] = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
__snake_case : Any = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
__snake_case : str = tokenizer(_lowerCAmelCase , return_tensors="""np""" , truncation=_lowerCAmelCase , max_length=5_12 , padding=_lowerCAmelCase )
__snake_case : Any = model.generate(**_lowerCAmelCase , num_beams=2 ).sequences
__snake_case : List[Any] = tokenizer.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
assert tgt_text == decoded
| 390
|
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __lt__( self : Tuple , _lowerCAmelCase : Optional[int] ):
return self[-1] < other[-1]
def __eq__( self : Tuple , _lowerCAmelCase : Tuple ):
return self[-1] == other[-1]
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list ):
'''simple docstring'''
__snake_case : list[Stack] = []
# sort into stacks
for element in collection:
__snake_case : Dict = Stack([element] )
__snake_case : int = bisect_left(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if i != len(__SCREAMING_SNAKE_CASE ):
stacks[i].append(__SCREAMING_SNAKE_CASE )
else:
stacks.append(__SCREAMING_SNAKE_CASE )
# use a heap-based merge to merge stack efficiently
__snake_case : int = merge(*(reversed(__SCREAMING_SNAKE_CASE ) for stack in stacks) )
return collection
if __name__ == "__main__":
lowercase_ = input("Enter numbers separated by a comma:\n").strip()
lowercase_ = [int(item) for item in user_input.split(",")]
print(patience_sort(unsorted))
| 390
| 1
|
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
lowerCAmelCase__ = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def snake_case_ ( A_ : Any ):
'''simple docstring'''
_lowerCamelCase : Any = torch.load(A_, map_location='''cpu''' )
return sd
def snake_case_ ( A_ : Any, A_ : List[str], A_ : Dict=rename_keys_prefix ):
'''simple docstring'''
_lowerCamelCase : int = OrderedDict()
_lowerCamelCase : Optional[Any] = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
_lowerCamelCase : Any = key
for name_pair in rename_keys_prefix:
_lowerCamelCase : Any = new_key.replace(name_pair[0], name_pair[1] )
_lowerCamelCase : Tuple = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
_lowerCamelCase : Union[str, Any] = new_d['''cls.predictions.bias''']
return new_d
@torch.no_grad()
def snake_case_ ( A_ : Dict, A_ : Dict ):
'''simple docstring'''
assert (
checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS
), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
_lowerCamelCase : List[str] = '''pretraining'''
if "vcr" in checkpoint_path:
_lowerCamelCase : Union[str, Any] = {'''visual_embedding_dim''': 5_12}
elif "vqa_advanced" in checkpoint_path:
_lowerCamelCase : int = {'''visual_embedding_dim''': 20_48}
elif "vqa" in checkpoint_path:
_lowerCamelCase : List[Any] = {'''visual_embedding_dim''': 20_48}
elif "nlvr" in checkpoint_path:
_lowerCamelCase : Dict = {'''visual_embedding_dim''': 10_24}
else:
raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
_lowerCamelCase : Dict = {'''visual_embedding_dim''': 5_12}
_lowerCamelCase : int = '''multichoice'''
elif "vqa_advanced" in checkpoint_path:
_lowerCamelCase : List[str] = {'''visual_embedding_dim''': 20_48}
_lowerCamelCase : str = '''vqa_advanced'''
elif "vqa" in checkpoint_path:
_lowerCamelCase : Union[str, Any] = {'''visual_embedding_dim''': 20_48, '''num_labels''': 31_29}
_lowerCamelCase : List[str] = '''vqa'''
elif "nlvr" in checkpoint_path:
_lowerCamelCase : Tuple = {
'''visual_embedding_dim''': 10_24,
'''num_labels''': 2,
}
_lowerCamelCase : Union[str, Any] = '''nlvr'''
_lowerCamelCase : Any = VisualBertConfig(**A_ )
# Load State Dict
_lowerCamelCase : Any = load_state_dict(A_ )
_lowerCamelCase : Optional[int] = get_new_dict(A_, A_ )
if model_type == "pretraining":
_lowerCamelCase : Optional[int] = VisualBertForPreTraining(A_ )
elif model_type == "vqa":
_lowerCamelCase : List[Any] = VisualBertForQuestionAnswering(A_ )
elif model_type == "nlvr":
_lowerCamelCase : Optional[Any] = VisualBertForVisualReasoning(A_ )
elif model_type == "multichoice":
_lowerCamelCase : int = VisualBertForMultipleChoice(A_ )
model.load_state_dict(A_ )
# Save Checkpoints
Path(A_ ).mkdir(exist_ok=A_ )
model.save_pretrained(A_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
lowerCAmelCase__ = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 83
|
"""simple docstring"""
import os
import time
import numpy as np
import onnxruntime as ort
lowerCAmelCase__ = '''1'''
lowerCAmelCase__ = '''0'''
lowerCAmelCase__ = '''1'''
lowerCAmelCase__ = ort.SessionOptions()
lowerCAmelCase__ = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print('''Create inference session...''')
lowerCAmelCase__ = ['''TensorrtExecutionProvider''', '''CUDAExecutionProvider''']
lowerCAmelCase__ = ort.InferenceSession('''model.onnx''', sess_options=sess_opt, providers=execution_provider)
lowerCAmelCase__ = ort.RunOptions()
lowerCAmelCase__ = 128
lowerCAmelCase__ = 1
lowerCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa)
lowerCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa)
lowerCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa)
print('''Warm up phase...''')
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Start inference...''')
lowerCAmelCase__ = time.time()
lowerCAmelCase__ = 2000
lowerCAmelCase__ = {}
for iter in range(max_iters):
lowerCAmelCase__ = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Average Inference Time = {:.3f} ms'''.format((time.time() - start_time) * 1000 / max_iters))
| 83
| 1
|
import logging
import os
from .state import PartialState
class _UpperCamelCase( logging.LoggerAdapter ):
@staticmethod
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
__a : Dict = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , *SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
if PartialState._shared_state == {}:
raise RuntimeError(
'You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.' )
__a : str = kwargs.pop('main_process_only' , SCREAMING_SNAKE_CASE__ )
__a : List[Any] = kwargs.pop('in_order' , SCREAMING_SNAKE_CASE__ )
if self.isEnabledFor(SCREAMING_SNAKE_CASE__ ):
if self._should_log(SCREAMING_SNAKE_CASE__ ):
__a , __a : int = self.process(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.logger.log(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
elif in_order:
__a : Dict = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
__a , __a : List[Any] = self.process(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.logger.log(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
state.wait_for_everyone()
def UpperCAmelCase__ ( lowerCamelCase_ : str , lowerCamelCase_ : str = None ):
if log_level is None:
__a : List[str] = os.environ.get('ACCELERATE_LOG_LEVEL' , lowerCamelCase_ )
__a : Union[str, Any] = logging.getLogger(lowerCamelCase_ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(lowerCamelCase_ , {} )
| 577
|
def UpperCAmelCase__ ( lowerCamelCase_ : str , lowerCamelCase_ : str ):
__a : Union[str, Any] = len(lowerCamelCase_ ) + 1
__a : Tuple = len(lowerCamelCase_ ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
__a : Optional[Any] = [[0 for i in range(lowerCamelCase_ )] for j in range(lowerCamelCase_ )]
# since string of zero length match pattern of zero length
__a : int = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , lowerCamelCase_ ):
__a : Optional[Any] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , lowerCamelCase_ ):
__a : List[str] = dp[0][j - 2] if pattern[j - 1] == '*' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , lowerCamelCase_ ):
for j in range(1 , lowerCamelCase_ ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
__a : List[Any] = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
__a : Optional[int] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
__a : Tuple = dp[i - 1][j]
else:
__a : Tuple = 0
else:
__a : Optional[int] = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
SCREAMING_SNAKE_CASE__ = '''aab'''
SCREAMING_SNAKE_CASE__ = '''c*a*b'''
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F"{input_string} matches the given pattern {pattern}")
else:
print(F"{input_string} does not match with the given pattern {pattern}")
| 577
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase = {
"configuration_transfo_xl": ["TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP", "TransfoXLConfig"],
"tokenization_transfo_xl": ["TransfoXLCorpus", "TransfoXLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"AdaptiveEmbedding",
"TransfoXLForSequenceClassification",
"TransfoXLLMHeadModel",
"TransfoXLModel",
"TransfoXLPreTrainedModel",
"load_tf_weights_in_transfo_xl",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFAdaptiveEmbedding",
"TFTransfoXLForSequenceClassification",
"TFTransfoXLLMHeadModel",
"TFTransfoXLMainLayer",
"TFTransfoXLModel",
"TFTransfoXLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 666
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase_ = {
"""configuration_maskformer""": ["""MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MaskFormerConfig"""],
"""configuration_maskformer_swin""": ["""MaskFormerSwinConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["""MaskFormerFeatureExtractor"""]
UpperCAmelCase_ = ["""MaskFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"""MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MaskFormerForInstanceSegmentation""",
"""MaskFormerModel""",
"""MaskFormerPreTrainedModel""",
]
UpperCAmelCase_ = [
"""MaskFormerSwinBackbone""",
"""MaskFormerSwinModel""",
"""MaskFormerSwinPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 458
| 0
|
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowercase :
'''simple docstring'''
def __init__( self: List[str] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[str]=13 , UpperCamelCase__: Optional[int]=30 , UpperCamelCase__: Optional[int]=2 , UpperCamelCase__: List[str]=3 , UpperCamelCase__: List[str]=True , UpperCamelCase__: Any=True , UpperCamelCase__: List[Any]=32 , UpperCamelCase__: Any=5 , UpperCamelCase__: Optional[Any]=4 , UpperCamelCase__: Dict=37 , UpperCamelCase__: List[Any]="gelu" , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: List[Any]=10 , UpperCamelCase__: Tuple=0.02 , UpperCamelCase__: Optional[int]=3 , UpperCamelCase__: Dict=0.6 , UpperCamelCase__: int=None , ):
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : Optional[Any] = batch_size
lowerCamelCase__ : Optional[int] = image_size
lowerCamelCase__ : Optional[Any] = patch_size
lowerCamelCase__ : Any = num_channels
lowerCamelCase__ : Any = is_training
lowerCamelCase__ : Union[str, Any] = use_labels
lowerCamelCase__ : List[str] = hidden_size
lowerCamelCase__ : List[str] = num_hidden_layers
lowerCamelCase__ : List[Any] = num_attention_heads
lowerCamelCase__ : str = intermediate_size
lowerCamelCase__ : str = hidden_act
lowerCamelCase__ : Any = hidden_dropout_prob
lowerCamelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase__ : List[Any] = type_sequence_label_size
lowerCamelCase__ : int = initializer_range
lowerCamelCase__ : List[str] = mask_ratio
lowerCamelCase__ : List[Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowerCamelCase__ : str = (image_size // patch_size) ** 2
lowerCamelCase__ : Optional[int] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[int] = None
if self.use_labels:
lowerCamelCase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Any = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self: str ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[int] , UpperCamelCase__: int ):
lowerCamelCase__ : Tuple = ViTMAEModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: str , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Dict ):
lowerCamelCase__ : int = ViTMAEForPreTraining(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ )
lowerCamelCase__ : Any = (self.image_size // self.patch_size) ** 2
lowerCamelCase__ : str = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowerCamelCase__ : Dict = 1
lowerCamelCase__ : Optional[int] = ViTMAEForPreTraining(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )
lowerCamelCase__ : Tuple = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Optional[int] = self.prepare_config_and_inputs()
lowerCamelCase__ : Any = config_and_inputs
lowerCamelCase__ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
a = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
a = {"""feature-extraction""": ViTMAEModel} if is_torch_available() else {}
a = False
a = False
a = False
a = False
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Tuple = ViTMAEModelTester(self )
lowerCamelCase__ : Union[str, Any] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self: Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def lowerCamelCase_ ( self: Dict ):
pass
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : str = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase__ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Any = model_class(UpperCamelCase__ )
lowerCamelCase__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Any = [*signature.parameters.keys()]
lowerCamelCase__ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Dict , UpperCamelCase__: Optional[int] ):
# make masks reproducible
np.random.seed(2 )
lowerCamelCase__ : Tuple = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
lowerCamelCase__ : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowerCamelCase__ : Tuple = torch.from_numpy(UpperCamelCase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowerCamelCase__ : Tuple = pt_noise
super().check_pt_tf_models(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowerCamelCase__ : Optional[int] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : Optional[int] = outputs[0].cpu().numpy()
lowerCamelCase__ : List[str] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ )
lowerCamelCase__ : List[str] = model_class.from_pretrained(UpperCamelCase__ )
model.to(UpperCamelCase__ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowerCamelCase__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
# Make sure we don't have nans
lowerCamelCase__ : Dict = after_outputs[0].cpu().numpy()
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCamelCase__ , 1e-5 )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCamelCase_ ( self: int ):
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCamelCase_ ( self: Any ):
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCamelCase_ ( self: List[str] ):
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def lowerCamelCase_ ( self: Tuple ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase_ ( self: Optional[int] ):
pass
@slow
def lowerCamelCase_ ( self: List[str] ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Tuple = ViTMAEModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ () -> List[Any]:
lowerCamelCase__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCamelCase_ ( self: List[str] ):
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self: Tuple ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
lowerCamelCase__ : str = ViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ).to(UpperCamelCase__ )
lowerCamelCase__ : Tuple = self.default_image_processor
lowerCamelCase__ : List[str] = prepare_img()
lowerCamelCase__ : int = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowerCamelCase__ : List[str] = ViTMAEConfig()
lowerCamelCase__ : int = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowerCamelCase__ : Any = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
lowerCamelCase__ : List[Any] = model(**UpperCamelCase__ , noise=torch.from_numpy(UpperCamelCase__ ).to(device=UpperCamelCase__ ) )
# verify the logits
lowerCamelCase__ : List[str] = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCamelCase__ : str = torch.tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(UpperCamelCase__ ) , atol=1e-4 ) )
| 716
|
'''simple docstring'''
import sys
import turtle
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> tuple[float, float]:
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> None:
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 )
triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 )
triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
_A : Any =turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
_A : Dict =[(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 631
| 0
|
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
a_ = 100
a_ = set(range(3, NUM_PRIMES, 2))
primes.add(2)
a_ = 42
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=100 )
def __UpperCAmelCase (lowercase__ ) -> set[int]:
'''simple docstring'''
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
a_ = set()
a_ = 42
a_ = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def __UpperCAmelCase (lowercase__ = 5000 ) -> int | None:
'''simple docstring'''
for number_to_partition in range(1 ,lowercase__ ):
if len(partition(lowercase__ ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F'{solution() = }')
| 685
|
'''simple docstring'''
import argparse
import os
import re
a_ = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
a_ = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
a_ = re.compile(r'\s*\(\s*"(\S[^"]+)"')
def __UpperCAmelCase (lowercase__ ,lowercase__ = False ) -> List[Any]:
'''simple docstring'''
with open(lowercase__ ,"r" ,encoding="utf-8" ) as f:
a_ = f.read()
a_ = content.split("\n" )
a_ = []
a_ = 0
while line_idx < len(lowercase__ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
a_ = len(re.search(r"^(\s*)\S" ,lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
a_ = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
a_ = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
a_ = sorted(lowercase__ ,key=lambda lowercase__ : _re_identifier.search(lowercase__ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(lowercase__ ,"w" ,encoding="utf-8" ) as f:
f.write("\n".join(lowercase__ ) )
elif "\n".join(lowercase__ ) != content:
return True
def __UpperCAmelCase (lowercase__ = False ) -> Optional[int]:
'''simple docstring'''
a_ = [os.path.join(lowercase__ ,lowercase__ ) for f in os.listdir(lowercase__ ) if f.endswith(".py" )]
a_ = [sort_auto_mapping(lowercase__ ,overwrite=lowercase__ ) for fname in fnames]
if not overwrite and any(lowercase__ ):
a_ = [f for f, d in zip(lowercase__ ,lowercase__ ) if d]
raise ValueError(
F"""The following files have auto mappings that need sorting: {', '.join(lowercase__ )}. Run `make style` to fix"""
" this." )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
a_ = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 685
| 1
|
import math
import sys
def a__ (__lowercase :int ) -> int:
if number != int(__lowercase ):
raise ValueError('''the value of input must be a natural number''' )
if number < 0:
raise ValueError('''the value of input must not be a negative number''' )
if number == 0:
return 1
_A : Union[str, Any] = [-1] * (number + 1)
_A : List[Any] = 0
for i in range(1 , number + 1 ):
_A : int = sys.maxsize
_A : Tuple = int(math.sqrt(__lowercase ) )
for j in range(1 , root + 1 ):
_A : List[str] = 1 + answers[i - (j**2)]
_A : Optional[int] = min(__lowercase , __lowercase )
_A : Tuple = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 332
|
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_UpperCamelCase : int =logging.get_logger(__name__)
class UpperCAmelCase__ ( __snake_case ):
__snake_case : Optional[int] = ["pixel_values"]
def __init__( self ,A__ = True ,A__ = None ,A__ = PILImageResampling.BICUBIC ,A__ = True ,A__ = None ,A__ = True ,A__ = 1 / 255 ,A__ = True ,A__ = IMAGENET_DEFAULT_MEAN ,A__ = IMAGENET_DEFAULT_STD ,**A__ ,):
super().__init__(**A__ )
_A : List[Any] = size if size is not None else {'''shortest_edge''': 224}
_A : int = get_size_dict(A__ ,default_to_square=A__ )
_A : List[str] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
_A : Dict = get_size_dict(A__ ,param_name='''crop_size''' )
_A : Any = do_resize
_A : Union[str, Any] = size
_A : List[str] = resample
_A : Dict = do_center_crop
_A : int = crop_size
_A : Tuple = do_rescale
_A : Optional[Any] = rescale_factor
_A : Optional[Any] = do_normalize
_A : List[Any] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_A : Dict = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def A__ ( self ,A__ ,A__ ,A__ = PILImageResampling.BICUBIC ,A__ = None ,**A__ ,):
_A : Optional[Any] = get_size_dict(A__ ,default_to_square=A__ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
_A : Union[str, Any] = int((256 / 224) * size['''shortest_edge'''] )
_A : List[str] = get_resize_output_image_size(A__ ,size=A__ ,default_to_square=A__ )
_A : Optional[int] = {'''height''': output_size[0], '''width''': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" )
return resize(
A__ ,size=(size_dict['''height'''], size_dict['''width''']) ,resample=A__ ,data_format=A__ ,**A__ )
def A__ ( self ,A__ ,A__ ,A__ = None ,**A__ ,):
_A : Optional[int] = get_size_dict(A__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(A__ ,size=(size['''height'''], size['''width''']) ,data_format=A__ ,**A__ )
def A__ ( self ,A__ ,A__ ,A__ = None ,**A__ ,):
return rescale(A__ ,scale=A__ ,data_format=A__ ,**A__ )
def A__ ( self ,A__ ,A__ ,A__ ,A__ = None ,**A__ ,):
return normalize(A__ ,mean=A__ ,std=A__ ,data_format=A__ ,**A__ )
def A__ ( self ,A__ ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = ChannelDimension.FIRST ,**A__ ,):
_A : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
_A : List[Any] = resample if resample is not None else self.resample
_A : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
_A : Any = do_rescale if do_rescale is not None else self.do_rescale
_A : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_A : int = do_normalize if do_normalize is not None else self.do_normalize
_A : Optional[Any] = image_mean if image_mean is not None else self.image_mean
_A : Optional[Any] = image_std if image_std is not None else self.image_std
_A : str = size if size is not None else self.size
_A : Optional[Any] = get_size_dict(A__ ,default_to_square=A__ )
_A : Tuple = crop_size if crop_size is not None else self.crop_size
_A : str = get_size_dict(A__ ,param_name='''crop_size''' )
_A : Optional[int] = make_list_of_images(A__ )
if not valid_images(A__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_A : List[Any] = [to_numpy_array(A__ ) for image in images]
if do_resize:
_A : Tuple = [self.resize(A__ ,A__ ,A__ ) for image in images]
if do_center_crop:
_A : str = [self.center_crop(A__ ,A__ ) for image in images]
if do_rescale:
_A : List[Any] = [self.rescale(A__ ,A__ ) for image in images]
if do_normalize:
_A : List[Any] = [self.normalize(A__ ,A__ ,A__ ) for image in images]
_A : Any = [to_channel_dimension_format(A__ ,A__ ) for image in images]
_A : Optional[Any] = {'''pixel_values''': images}
return BatchFeature(data=A__ ,tensor_type=A__ )
| 332
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json""",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class A_ ( A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = """gpt_neox"""
def __init__( self :Optional[Any] , lowerCamelCase_ :Union[str, Any]=50_432 , lowerCamelCase_ :Dict=6_144 , lowerCamelCase_ :Optional[int]=44 , lowerCamelCase_ :List[str]=64 , lowerCamelCase_ :str=24_576 , lowerCamelCase_ :int="gelu" , lowerCamelCase_ :List[Any]=0.25 , lowerCamelCase_ :Tuple=10_000 , lowerCamelCase_ :List[str]=0.0 , lowerCamelCase_ :List[str]=0.0 , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :Union[str, Any]=2_048 , lowerCamelCase_ :List[str]=0.02 , lowerCamelCase_ :Any=1e-5 , lowerCamelCase_ :str=True , lowerCamelCase_ :int=0 , lowerCamelCase_ :str=2 , lowerCamelCase_ :str=False , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :List[Any]=None , **lowerCamelCase_ :Tuple , ):
"""simple docstring"""
super().__init__(bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
lowerCamelCase__ : List[str] =vocab_size
lowerCamelCase__ : Optional[Any] =max_position_embeddings
lowerCamelCase__ : List[str] =hidden_size
lowerCamelCase__ : Tuple =num_hidden_layers
lowerCamelCase__ : Union[str, Any] =num_attention_heads
lowerCamelCase__ : Optional[Any] =intermediate_size
lowerCamelCase__ : int =hidden_act
lowerCamelCase__ : Tuple =rotary_pct
lowerCamelCase__ : Union[str, Any] =rotary_emb_base
lowerCamelCase__ : Tuple =attention_dropout
lowerCamelCase__ : Optional[int] =hidden_dropout
lowerCamelCase__ : Any =classifier_dropout
lowerCamelCase__ : List[str] =initializer_range
lowerCamelCase__ : Dict =layer_norm_eps
lowerCamelCase__ : List[str] =use_cache
lowerCamelCase__ : Tuple =tie_word_embeddings
lowerCamelCase__ : Optional[int] =use_parallel_residual
lowerCamelCase__ : Dict =rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'The hidden size is not divisble by the number of attention heads! Make sure to update them!' )
def UpperCAmelCase__ ( self :int ):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowerCamelCase_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f"""got {self.rope_scaling}""" )
lowerCamelCase__ : Dict =self.rope_scaling.get('type' , lowerCamelCase_ )
lowerCamelCase__ : List[Any] =self.rope_scaling.get('factor' , lowerCamelCase_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 174
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case_ : str ) ->Union[str, Any]:
lowerCamelCase__ : Tuple =DPTConfig(embedding_type='hybrid' )
if "large" in checkpoint_url:
lowerCamelCase__ : Any =1_0_2_4
lowerCamelCase__ : Optional[Any] =4_0_9_6
lowerCamelCase__ : Optional[int] =2_4
lowerCamelCase__ : List[Any] =1_6
lowerCamelCase__ : List[str] =[5, 1_1, 1_7, 2_3]
lowerCamelCase__ : Optional[Any] =[2_5_6, 5_1_2, 1_0_2_4, 1_0_2_4]
lowerCamelCase__ : Any =(1, 3_8_4, 3_8_4)
if "nyu" or "midas" in checkpoint_url:
lowerCamelCase__ : int =7_6_8
lowerCamelCase__ : Optional[Any] =[1, 1, 1, 0.5]
lowerCamelCase__ : Dict =[2_5_6, 5_1_2, 7_6_8, 7_6_8]
lowerCamelCase__ : Tuple =1_5_0
lowerCamelCase__ : Optional[Any] =1_6
lowerCamelCase__ : int =(1, 3_8_4, 3_8_4)
lowerCamelCase__ : Optional[Any] =False
lowerCamelCase__ : Any ='project'
if "ade" in checkpoint_url:
lowerCamelCase__ : Optional[int] =True
lowerCamelCase__ : Dict =7_6_8
lowerCamelCase__ : List[Any] =[1, 1, 1, 0.5]
lowerCamelCase__ : Any =1_5_0
lowerCamelCase__ : List[str] =1_6
lowerCamelCase__ : Any ='huggingface/label-files'
lowerCamelCase__ : List[Any] ='ade20k-id2label.json'
lowerCamelCase__ : List[Any] =json.load(open(cached_download(hf_hub_url(snake_case_ , snake_case_ , repo_type='dataset' ) ) , 'r' ) )
lowerCamelCase__ : int ={int(snake_case_ ): v for k, v in idalabel.items()}
lowerCamelCase__ : Dict =idalabel
lowerCamelCase__ : Any ={v: k for k, v in idalabel.items()}
lowerCamelCase__ : int =[1, 1_5_0, 4_8_0, 4_8_0]
return config, expected_shape
def lowerCAmelCase_ ( snake_case_ : Tuple ) ->Any:
lowerCamelCase__ : Union[str, Any] =['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(snake_case_ , snake_case_ )
def lowerCAmelCase_ ( snake_case_ : Any ) ->Tuple:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowerCamelCase__ : List[str] =name.replace('pretrained.model' , 'dpt.encoder' )
if "pretrained.model" in name:
lowerCamelCase__ : Any =name.replace('pretrained.model' , 'dpt.embeddings' )
if "patch_embed" in name:
lowerCamelCase__ : Tuple =name.replace('patch_embed' , '' )
if "pos_embed" in name:
lowerCamelCase__ : int =name.replace('pos_embed' , 'position_embeddings' )
if "attn.proj" in name:
lowerCamelCase__ : Union[str, Any] =name.replace('attn.proj' , 'attention.output.dense' )
if "proj" in name and "project" not in name:
lowerCamelCase__ : Dict =name.replace('proj' , 'projection' )
if "blocks" in name:
lowerCamelCase__ : Any =name.replace('blocks' , 'layer' )
if "mlp.fc1" in name:
lowerCamelCase__ : Dict =name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
lowerCamelCase__ : Any =name.replace('mlp.fc2' , 'output.dense' )
if "norm1" in name and "backbone" not in name:
lowerCamelCase__ : Optional[Any] =name.replace('norm1' , 'layernorm_before' )
if "norm2" in name and "backbone" not in name:
lowerCamelCase__ : Optional[int] =name.replace('norm2' , 'layernorm_after' )
if "scratch.output_conv" in name:
lowerCamelCase__ : List[str] =name.replace('scratch.output_conv' , 'head' )
if "scratch" in name:
lowerCamelCase__ : str =name.replace('scratch' , 'neck' )
if "layer1_rn" in name:
lowerCamelCase__ : Union[str, Any] =name.replace('layer1_rn' , 'convs.0' )
if "layer2_rn" in name:
lowerCamelCase__ : List[Any] =name.replace('layer2_rn' , 'convs.1' )
if "layer3_rn" in name:
lowerCamelCase__ : Any =name.replace('layer3_rn' , 'convs.2' )
if "layer4_rn" in name:
lowerCamelCase__ : Dict =name.replace('layer4_rn' , 'convs.3' )
if "refinenet" in name:
lowerCamelCase__ : Optional[int] =int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowerCamelCase__ : Union[str, Any] =name.replace(f"""refinenet{layer_idx}""" , f"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
lowerCamelCase__ : List[Any] =name.replace('out_conv' , 'projection' )
if "resConfUnit1" in name:
lowerCamelCase__ : str =name.replace('resConfUnit1' , 'residual_layer1' )
if "resConfUnit2" in name:
lowerCamelCase__ : List[str] =name.replace('resConfUnit2' , 'residual_layer2' )
if "conv1" in name:
lowerCamelCase__ : Any =name.replace('conv1' , 'convolution1' )
if "conv2" in name:
lowerCamelCase__ : Any =name.replace('conv2' , 'convolution2' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowerCamelCase__ : int =name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0' )
if "pretrained.act_postprocess2.0.project.0" in name:
lowerCamelCase__ : Union[str, Any] =name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0' )
if "pretrained.act_postprocess3.0.project.0" in name:
lowerCamelCase__ : int =name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0' )
if "pretrained.act_postprocess4.0.project.0" in name:
lowerCamelCase__ : Optional[Any] =name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowerCamelCase__ : Optional[Any] =name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection' )
if "pretrained.act_postprocess1.4" in name:
lowerCamelCase__ : Optional[int] =name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize' )
if "pretrained.act_postprocess2.3" in name:
lowerCamelCase__ : Dict =name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection' )
if "pretrained.act_postprocess2.4" in name:
lowerCamelCase__ : List[str] =name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize' )
if "pretrained.act_postprocess3.3" in name:
lowerCamelCase__ : str =name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection' )
if "pretrained.act_postprocess4.3" in name:
lowerCamelCase__ : int =name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection' )
if "pretrained.act_postprocess4.4" in name:
lowerCamelCase__ : List[Any] =name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize' )
if "pretrained" in name:
lowerCamelCase__ : Union[str, Any] =name.replace('pretrained' , 'dpt' )
if "bn" in name:
lowerCamelCase__ : Tuple =name.replace('bn' , 'batch_norm' )
if "head" in name:
lowerCamelCase__ : Any =name.replace('head' , 'head.head' )
if "encoder.norm" in name:
lowerCamelCase__ : Dict =name.replace('encoder.norm' , 'layernorm' )
if "auxlayer" in name:
lowerCamelCase__ : int =name.replace('auxlayer' , 'auxiliary_head.head' )
if "backbone" in name:
lowerCamelCase__ : str =name.replace('backbone' , 'backbone.bit.encoder' )
if ".." in name:
lowerCamelCase__ : Optional[int] =name.replace('..' , '.' )
if "stem.conv" in name:
lowerCamelCase__ : List[Any] =name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
lowerCamelCase__ : Dict =name.replace('blocks' , 'layers' )
if "convolution" in name and "backbone" in name:
lowerCamelCase__ : List[Any] =name.replace('convolution' , 'conv' )
if "layer" in name and "backbone" in name:
lowerCamelCase__ : List[Any] =name.replace('layer' , 'layers' )
if "backbone.bit.encoder.bit" in name:
lowerCamelCase__ : int =name.replace('backbone.bit.encoder.bit' , 'backbone.bit' )
if "embedder.conv" in name:
lowerCamelCase__ : List[str] =name.replace('embedder.conv' , 'embedder.convolution' )
if "backbone.bit.encoder.stem.norm" in name:
lowerCamelCase__ : str =name.replace('backbone.bit.encoder.stem.norm' , 'backbone.bit.embedder.norm' )
return name
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Any ) ->List[Any]:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase__ : Any =state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
lowerCamelCase__ : str =state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ : List[Any] =in_proj_weight[: config.hidden_size, :]
lowerCamelCase__ : Any =in_proj_bias[: config.hidden_size]
lowerCamelCase__ : Union[str, Any] =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase__ : int =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase__ : Tuple =in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase__ : Tuple =in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( ) ->Union[str, Any]:
lowerCamelCase__ : List[Any] ='http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCamelCase__ : Dict =Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] , snake_case_ : int , snake_case_ : Any , snake_case_ : Optional[Any] , snake_case_ : int ) ->int:
lowerCamelCase__ , lowerCamelCase__ : List[Any] =get_dpt_config(snake_case_ )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
lowerCamelCase__ : Union[str, Any] =torch.load(snake_case_ , map_location='cpu' )
# remove certain keys
remove_ignore_keys_(snake_case_ )
# rename keys
for key in state_dict.copy().keys():
lowerCamelCase__ : str =state_dict.pop(snake_case_ )
lowerCamelCase__ : Tuple =val
# read in qkv matrices
read_in_q_k_v(snake_case_ , snake_case_ )
# load HuggingFace model
lowerCamelCase__ : str =DPTForSemanticSegmentation(snake_case_ ) if 'ade' in checkpoint_url else DPTForDepthEstimation(snake_case_ )
model.load_state_dict(snake_case_ )
model.eval()
# Check outputs on an image
lowerCamelCase__ : Optional[int] =4_8_0 if 'ade' in checkpoint_url else 3_8_4
lowerCamelCase__ : Optional[Any] =DPTImageProcessor(size=snake_case_ )
lowerCamelCase__ : Optional[int] =prepare_img()
lowerCamelCase__ : Optional[int] =image_processor(snake_case_ , return_tensors='pt' )
# forward pass
lowerCamelCase__ : int =model(**snake_case_ ).logits if 'ade' in checkpoint_url else model(**snake_case_ ).predicted_depth
if show_prediction:
lowerCamelCase__ : Optional[Any] =(
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='bicubic' , align_corners=snake_case_ , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 2_5_5 ).show()
if pytorch_dump_folder_path is not None:
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(snake_case_ )
if push_to_hub:
model.push_to_hub('ybelkada/dpt-hybrid-midas' )
image_processor.push_to_hub('ybelkada/dpt-hybrid-midas' )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
parser.add_argument(
"""--show_prediction""",
action="""store_true""",
)
lowerCAmelCase = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 174
| 1
|
from math import ceil
def _a ( lowercase__ : int = 10_01 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
SCREAMING_SNAKE_CASE__ : List[Any] = 2 * i + 1
SCREAMING_SNAKE_CASE__ : Tuple = 2 * i
SCREAMING_SNAKE_CASE__ : List[str] = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
SCREAMING_SNAKE_CASE__ : Optional[int] = int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number")
| 636
|
def _a ( lowercase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : List[Any] = set({'(', '[', '{'} )
SCREAMING_SNAKE_CASE__ : Optional[int] = set({')', ']', '}'} )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {'{': '}', '[': ']', '(': ')'}
for i in range(len(lowercase__ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(lowercase__ ) == 0 or (len(lowercase__ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(lowercase__ ) == 0
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = input('Enter sequence of brackets: ' )
if is_balanced(lowercase__ ):
print(lowercase__ , 'is balanced' )
else:
print(lowercase__ , 'is not balanced' )
if __name__ == "__main__":
main()
| 636
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase = {
'configuration_mobilevit': ['MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MobileViTConfig', 'MobileViTOnnxConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ['MobileViTFeatureExtractor']
lowerCAmelCase = ['MobileViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileViTForImageClassification',
'MobileViTForSemanticSegmentation',
'MobileViTModel',
'MobileViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileViTForImageClassification',
'TFMobileViTForSemanticSegmentation',
'TFMobileViTModel',
'TFMobileViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 43
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 43
| 1
|
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase__ ( a , a , a ):
if len(a ) == 0:
raise ValueError('find_max() arg is an empty sequence' )
if (
left >= len(a )
or left < -len(a )
or right >= len(a )
or right < -len(a )
):
raise IndexError('list index out of range' )
if left == right:
return nums[left]
__snake_case = (left + right) >> 1 # the middle
__snake_case = find_max(a , a , a ) # find max in range[left, mid]
__snake_case = find_max(a , mid + 1 , a ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 427
|
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class a_ :
def __init__( self : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any]=1_3 , __lowerCAmelCase : str=7 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : Union[str, Any]=1_9 , __lowerCAmelCase : Optional[Any]=3_2 , __lowerCAmelCase : str=5 , __lowerCAmelCase : List[str]=4 , __lowerCAmelCase : str=3_7 , __lowerCAmelCase : List[Any]="gelu" , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : List[Any]=5_1_2 , __lowerCAmelCase : Optional[int]=1_6 , __lowerCAmelCase : Dict=2 , __lowerCAmelCase : str=0.02 , __lowerCAmelCase : str=3 , __lowerCAmelCase : List[Any]=4 , __lowerCAmelCase : Any=None , ):
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = num_choices
__snake_case = scope
def lowercase__ ( self : int ):
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = None
if self.use_input_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case = None
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case = ids_tensor([self.batch_size] , self.num_choices )
__snake_case = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Any ):
__snake_case = EsmConfig(
vocab_size=3_3 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=__lowerCAmelCase , esmfold_config={'trunk': {'num_blocks': 2}, 'fp16_esm': False} , )
return config
def lowercase__ ( self : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] ):
__snake_case = EsmForProteinFolding(config=__lowerCAmelCase ).float()
model.to(__lowerCAmelCase )
model.eval()
__snake_case = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
__snake_case = model(__lowerCAmelCase )
__snake_case = model(__lowerCAmelCase )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 1_4, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def lowercase__ ( self : Union[str, Any] ):
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowercase_ : Dict = False
lowercase_ : Optional[int] = (EsmForProteinFolding,) if is_torch_available() else ()
lowercase_ : List[str] = ()
lowercase_ : List[str] = {} if is_torch_available() else {}
lowercase_ : List[str] = False
def lowercase__ ( self : List[Any] ):
__snake_case = EsmFoldModelTester(self )
__snake_case = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 )
def lowercase__ ( self : List[Any] ):
self.config_tester.run_common_tests()
def lowercase__ ( self : Union[str, Any] ):
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
@unittest.skip('Does not support attention outputs' )
def lowercase__ ( self : Optional[int] ):
pass
@unittest.skip
def lowercase__ ( self : Any ):
pass
@unittest.skip('Esm does not support embedding resizing' )
def lowercase__ ( self : Optional[Any] ):
pass
@unittest.skip('Esm does not support embedding resizing' )
def lowercase__ ( self : List[Any] ):
pass
@unittest.skip('ESMFold does not support passing input embeds!' )
def lowercase__ ( self : List[str] ):
pass
@unittest.skip('ESMFold does not support head pruning.' )
def lowercase__ ( self : Union[str, Any] ):
pass
@unittest.skip('ESMFold does not support head pruning.' )
def lowercase__ ( self : int ):
pass
@unittest.skip('ESMFold does not support head pruning.' )
def lowercase__ ( self : Tuple ):
pass
@unittest.skip('ESMFold does not support head pruning.' )
def lowercase__ ( self : Optional[Any] ):
pass
@unittest.skip('ESMFold does not support head pruning.' )
def lowercase__ ( self : Dict ):
pass
@unittest.skip('ESMFold does not output hidden states in the normal way.' )
def lowercase__ ( self : Union[str, Any] ):
pass
@unittest.skip('ESMfold does not output hidden states in the normal way.' )
def lowercase__ ( self : int ):
pass
@unittest.skip('ESMFold only has one output format.' )
def lowercase__ ( self : Dict ):
pass
@unittest.skip('This test doesn\'t work for ESMFold and doesn\'t test core functionality' )
def lowercase__ ( self : Any ):
pass
@unittest.skip('ESMFold does not support input chunking.' )
def lowercase__ ( self : Optional[Any] ):
pass
@unittest.skip('ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.' )
def lowercase__ ( self : str ):
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def lowercase__ ( self : Optional[int] ):
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def lowercase__ ( self : Optional[int] ):
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def lowercase__ ( self : List[str] ):
pass
@unittest.skip('ESMFold doesn\'t support data parallel.' )
def lowercase__ ( self : Dict ):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase__ ( self : Optional[int] ):
pass
@require_torch
class a_ ( UpperCAmelCase__ ):
@slow
def lowercase__ ( self : Optional[int] ):
__snake_case = EsmForProteinFolding.from_pretrained('facebook/esmfold_v1' ).float()
model.eval()
__snake_case = torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
__snake_case = model(__lowerCAmelCase )['positions']
__snake_case = torch.tensor([2.5828, 0.7993, -10.9334] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , __lowerCAmelCase , atol=1E-4 ) )
| 427
| 1
|
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
__lowerCamelCase : List[Any] = HUGGINGFACE_HUB_CACHE
__lowerCamelCase : Dict = "config.json"
__lowerCamelCase : int = "diffusion_pytorch_model.bin"
__lowerCamelCase : int = "diffusion_flax_model.msgpack"
__lowerCamelCase : int = "model.onnx"
__lowerCamelCase : Optional[Any] = "diffusion_pytorch_model.safetensors"
__lowerCamelCase : Union[str, Any] = "weights.pb"
__lowerCamelCase : Optional[int] = "https://huggingface.co"
__lowerCamelCase : Optional[int] = default_cache_path
__lowerCamelCase : str = "diffusers_modules"
__lowerCamelCase : List[str] = os.getenv("HF_MODULES_CACHE", os.path.join(hf_cache_home, "modules"))
__lowerCamelCase : int = ["fp16", "non-ema"]
__lowerCamelCase : Dict = ".self_attn"
| 323
|
'''simple docstring'''
import gc
import threading
import time
import psutil
import torch
class _snake_case :
def __init__( self : Any ):
SCREAMING_SNAKE_CASE:List[Any] = psutil.Process()
SCREAMING_SNAKE_CASE:Dict = False
def __UpperCamelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE:Any = -1
while True:
SCREAMING_SNAKE_CASE:Dict = max(self.process.memory_info().rss ,self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def __UpperCamelCase ( self : str ):
SCREAMING_SNAKE_CASE:List[Any] = True
SCREAMING_SNAKE_CASE:str = threading.Thread(target=self.peak_monitor )
SCREAMING_SNAKE_CASE:Tuple = True
self.thread.start()
def __UpperCamelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE:Union[str, Any] = False
self.thread.join()
return self.cpu_memory_peak
A_ = PeakCPUMemory()
def A_ ( ):
# Time
SCREAMING_SNAKE_CASE:int = {"time": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
SCREAMING_SNAKE_CASE:int = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
SCREAMING_SNAKE_CASE:Tuple = torch.cuda.memory_allocated(snake_case )
torch.cuda.reset_peak_memory_stats()
return measures
def A_ ( snake_case ):
# Time
SCREAMING_SNAKE_CASE:Optional[Any] = {"time": time.time() - start_measures["time"]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
SCREAMING_SNAKE_CASE:Any = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20
SCREAMING_SNAKE_CASE:Dict = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
SCREAMING_SNAKE_CASE:List[str] = (torch.cuda.memory_allocated(snake_case ) - start_measures[str(snake_case )]) / 2**20
SCREAMING_SNAKE_CASE:Union[str, Any] = (torch.cuda.max_memory_allocated(snake_case ) - start_measures[str(snake_case )]) / 2**20
return measures
def A_ ( snake_case , snake_case ):
print(F'''{description}:''' )
print(F'''- Time: {measures["time"]:.2f}s''' )
for i in range(torch.cuda.device_count() ):
print(F'''- GPU {i} allocated: {measures[str(snake_case )]:.2f}MiB''' )
SCREAMING_SNAKE_CASE:str = measures[F'''{i}-peak''']
print(F'''- GPU {i} peak: {peak:.2f}MiB''' )
print(F'''- CPU RAM allocated: {measures["cpu"]:.2f}MiB''' )
print(F'''- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB''' )
| 143
| 0
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> None:
'''simple docstring'''
warnings.warn(
"The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use ImageGPTImageProcessor instead." , lowerCamelCase , )
super().__init__(*lowerCamelCase , **lowerCamelCase )
| 435
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_ = {
'configuration_roberta_prelayernorm': [
'ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP',
'RobertaPreLayerNormConfig',
'RobertaPreLayerNormOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaPreLayerNormForCausalLM',
'RobertaPreLayerNormForMaskedLM',
'RobertaPreLayerNormForMultipleChoice',
'RobertaPreLayerNormForQuestionAnswering',
'RobertaPreLayerNormForSequenceClassification',
'RobertaPreLayerNormForTokenClassification',
'RobertaPreLayerNormModel',
'RobertaPreLayerNormPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaPreLayerNormForCausalLM',
'TFRobertaPreLayerNormForMaskedLM',
'TFRobertaPreLayerNormForMultipleChoice',
'TFRobertaPreLayerNormForQuestionAnswering',
'TFRobertaPreLayerNormForSequenceClassification',
'TFRobertaPreLayerNormForTokenClassification',
'TFRobertaPreLayerNormMainLayer',
'TFRobertaPreLayerNormModel',
'TFRobertaPreLayerNormPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'FlaxRobertaPreLayerNormForCausalLM',
'FlaxRobertaPreLayerNormForMaskedLM',
'FlaxRobertaPreLayerNormForMultipleChoice',
'FlaxRobertaPreLayerNormForQuestionAnswering',
'FlaxRobertaPreLayerNormForSequenceClassification',
'FlaxRobertaPreLayerNormForTokenClassification',
'FlaxRobertaPreLayerNormModel',
'FlaxRobertaPreLayerNormPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 435
| 1
|
"""simple docstring"""
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('''.''')
def lowerCAmelCase_( lowercase_ : List[str] ) -> str:
_lowerCamelCase = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'''`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '''
F"""{test_file} instead.""" )
_lowerCamelCase = components[-1]
if not test_fn.endswith('''py''' ):
raise ValueError(F"""`test_file` should be a python file. Got {test_fn} instead.""" )
if not test_fn.startswith('''test_modeling_''' ):
raise ValueError(
F"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" )
_lowerCamelCase = components[:-1] + [test_fn.replace('''.py''' , '''''' )]
_lowerCamelCase = '''.'''.join(snake_case_ )
return test_module_path
def lowerCAmelCase_( lowercase_ : Any ) -> Optional[Any]:
_lowerCamelCase = get_module_path(snake_case_ )
_lowerCamelCase = importlib.import_module(snake_case_ )
return test_module
def lowerCAmelCase_( lowercase_ : Dict ) -> str:
_lowerCamelCase = []
_lowerCamelCase = get_test_module(snake_case_ )
for attr in dir(snake_case_ ):
if attr.endswith('''ModelTester''' ):
tester_classes.append(getattr(snake_case_ , snake_case_ ) )
# sort with class names
return sorted(snake_case_ , key=lambda lowercase_ : x.__name__ )
def lowerCAmelCase_( lowercase_ : Optional[Any] ) -> Union[str, Any]:
_lowerCamelCase = []
_lowerCamelCase = get_test_module(snake_case_ )
for attr in dir(snake_case_ ):
_lowerCamelCase = getattr(snake_case_ , snake_case_ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
_lowerCamelCase = getattr(snake_case_ , '''all_model_classes''' , [] )
if len(snake_case_ ) > 0:
test_classes.append(snake_case_ )
# sort with class names
return sorted(snake_case_ , key=lambda lowercase_ : x.__name__ )
def lowerCAmelCase_( lowercase_ : List[str] ) -> Any:
_lowerCamelCase = get_test_classes(snake_case_ )
_lowerCamelCase = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(snake_case_ , key=lambda lowercase_ : x.__name__ )
def lowerCAmelCase_( lowercase_ : str ) -> List[str]:
_lowerCamelCase = test_class()
if hasattr(snake_case_ , '''setUp''' ):
test.setUp()
_lowerCamelCase = None
if hasattr(snake_case_ , '''model_tester''' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
_lowerCamelCase = test.model_tester.__class__
return model_tester
def lowerCAmelCase_( lowercase_ : List[str] , lowercase_ : Optional[int] ) -> str:
_lowerCamelCase = get_test_classes(snake_case_ )
_lowerCamelCase = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(snake_case_ )
# sort with class names
return sorted(snake_case_ , key=lambda lowercase_ : x.__name__ )
def lowerCAmelCase_( lowercase_ : Dict , lowercase_ : str ) -> List[Any]:
_lowerCamelCase = get_test_classes_for_model(snake_case_ , snake_case_ )
_lowerCamelCase = []
for test_class in test_classes:
_lowerCamelCase = get_model_tester_from_test_class(snake_case_ )
if tester_class is not None:
tester_classes.append(snake_case_ )
# sort with class names
return sorted(snake_case_ , key=lambda lowercase_ : x.__name__ )
def lowerCAmelCase_( lowercase_ : Optional[Any] ) -> Union[str, Any]:
_lowerCamelCase = get_test_classes(snake_case_ )
_lowerCamelCase = {test_class: get_model_tester_from_test_class(snake_case_ ) for test_class in test_classes}
return test_tester_mapping
def lowerCAmelCase_( lowercase_ : Optional[Any] ) -> str:
_lowerCamelCase = get_model_classes(snake_case_ )
_lowerCamelCase = {
model_class: get_test_classes_for_model(snake_case_ , snake_case_ ) for model_class in model_classes
}
return model_test_mapping
def lowerCAmelCase_( lowercase_ : List[Any] ) -> int:
_lowerCamelCase = get_model_classes(snake_case_ )
_lowerCamelCase = {
model_class: get_tester_classes_for_model(snake_case_ , snake_case_ ) for model_class in model_classes
}
return model_to_tester_mapping
def lowerCAmelCase_( lowercase_ : int ) -> List[str]:
if isinstance(snake_case_ , snake_case_ ):
return o
elif isinstance(snake_case_ , snake_case_ ):
return o.__name__
elif isinstance(snake_case_ , (list, tuple) ):
return [to_json(snake_case_ ) for x in o]
elif isinstance(snake_case_ , snake_case_ ):
return {to_json(snake_case_ ): to_json(snake_case_ ) for k, v in o.items()}
else:
return o
| 661
|
'''simple docstring'''
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
SCREAMING_SNAKE_CASE_: Any ={
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def lowerCAmelCase_ ( snake_case_ : Any ) -> str:
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
if args.student_type == "roberta":
UpperCAmelCase_ = False
elif args.student_type == "gpt2":
UpperCAmelCase_ = False
def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : List[Any] ) -> Tuple:
'''simple docstring'''
if args.student_type == "roberta":
UpperCAmelCase_ = False
def lowerCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = argparse.ArgumentParser(description="Training" )
parser.add_argument("--force" , action="store_true" , help="Overwrite dump_path if it already exists." )
parser.add_argument(
"--dump_path" , type=snake_case_ , required=snake_case_ , help="The output directory (log, checkpoints, parameters, etc.)" )
parser.add_argument(
"--data_file" , type=snake_case_ , required=snake_case_ , help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence." , )
parser.add_argument(
"--student_type" , type=snake_case_ , choices=["distilbert", "roberta", "gpt2"] , required=snake_case_ , help="The student type (DistilBERT, RoBERTa)." , )
parser.add_argument("--student_config" , type=snake_case_ , required=snake_case_ , help="Path to the student configuration." )
parser.add_argument(
"--student_pretrained_weights" , default=snake_case_ , type=snake_case_ , help="Load student initialization checkpoint." )
parser.add_argument(
"--teacher_type" , choices=["bert", "roberta", "gpt2"] , required=snake_case_ , help="Teacher type (BERT, RoBERTa)." )
parser.add_argument("--teacher_name" , type=snake_case_ , required=snake_case_ , help="The teacher model." )
parser.add_argument("--temperature" , default=2.0 , type=snake_case_ , help="Temperature for the softmax temperature." )
parser.add_argument(
"--alpha_ce" , default=0.5 , type=snake_case_ , help="Linear weight for the distillation loss. Must be >=0." )
parser.add_argument(
"--alpha_mlm" , default=0.0 , type=snake_case_ , help="Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag." , )
parser.add_argument("--alpha_clm" , default=0.5 , type=snake_case_ , help="Linear weight for the CLM loss. Must be >=0." )
parser.add_argument("--alpha_mse" , default=0.0 , type=snake_case_ , help="Linear weight of the MSE loss. Must be >=0." )
parser.add_argument(
"--alpha_cos" , default=0.0 , type=snake_case_ , help="Linear weight of the cosine embedding loss. Must be >=0." )
parser.add_argument(
"--mlm" , action="store_true" , help="The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM." )
parser.add_argument(
"--mlm_mask_prop" , default=0.15 , type=snake_case_ , help="Proportion of tokens for which we need to make a prediction." , )
parser.add_argument("--word_mask" , default=0.8 , type=snake_case_ , help="Proportion of tokens to mask out." )
parser.add_argument("--word_keep" , default=0.1 , type=snake_case_ , help="Proportion of tokens to keep." )
parser.add_argument("--word_rand" , default=0.1 , type=snake_case_ , help="Proportion of tokens to randomly replace." )
parser.add_argument(
"--mlm_smoothing" , default=0.7 , type=snake_case_ , help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec)." , )
parser.add_argument("--token_counts" , type=snake_case_ , help="The token counts in the data_file for MLM." )
parser.add_argument(
"--restrict_ce_to_mask" , action="store_true" , help="If true, compute the distillation loss only the [MLM] prediction distribution." , )
parser.add_argument(
"--freeze_pos_embs" , action="store_true" , help="Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only." , )
parser.add_argument(
"--freeze_token_type_embds" , action="store_true" , help="Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only." , )
parser.add_argument("--n_epoch" , type=snake_case_ , default=3 , help="Number of pass on the whole dataset." )
parser.add_argument("--batch_size" , type=snake_case_ , default=5 , help="Batch size (for each process)." )
parser.add_argument(
"--group_by_size" , action="store_false" , help="If true, group sequences that have similar length into the same batch. Default is true." , )
parser.add_argument(
"--gradient_accumulation_steps" , type=snake_case_ , default=50 , help="Gradient accumulation for larger training batches." , )
parser.add_argument("--warmup_prop" , default=0.05 , type=snake_case_ , help="Linear warmup proportion." )
parser.add_argument("--weight_decay" , default=0.0 , type=snake_case_ , help="Weight decay if we apply some." )
parser.add_argument("--learning_rate" , default=5E-4 , type=snake_case_ , help="The initial learning rate for Adam." )
parser.add_argument("--adam_epsilon" , default=1E-6 , type=snake_case_ , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , default=5.0 , type=snake_case_ , help="Max gradient norm." )
parser.add_argument("--initializer_range" , default=0.02 , type=snake_case_ , help="Random initialization range." )
parser.add_argument(
"--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , )
parser.add_argument(
"--fp16_opt_level" , type=snake_case_ , default="O1" , help=(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
) , )
parser.add_argument("--n_gpu" , type=snake_case_ , default=1 , help="Number of GPUs in the node." )
parser.add_argument("--local_rank" , type=snake_case_ , default=-1 , help="Distributed training - Local rank" )
parser.add_argument("--seed" , type=snake_case_ , default=56 , help="Random seed" )
parser.add_argument("--log_interval" , type=snake_case_ , default=5_00 , help="Tensorboard logging interval." )
parser.add_argument("--checkpoint_interval" , type=snake_case_ , default=40_00 , help="Checkpoint interval." )
UpperCAmelCase_ = parser.parse_args()
sanity_checks(snake_case_ )
# ARGS #
init_gpu_params(snake_case_ )
set_seed(snake_case_ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"""
" itUse `--force` if you want to overwrite it" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f"""Experiment will be dumped and logged in {args.dump_path}""" )
# SAVE PARAMS #
logger.info(f"""Param: {args}""" )
with open(os.path.join(args.dump_path , "parameters.json" ) , "w" ) as f:
json.dump(vars(snake_case_ ) , snake_case_ , indent=4 )
git_log(args.dump_path )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = MODEL_CLASSES[args.student_type]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
UpperCAmelCase_ = teacher_tokenizer_class.from_pretrained(args.teacher_name )
UpperCAmelCase_ = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
UpperCAmelCase_ = tokenizer.all_special_tokens.index(snake_case_ )
UpperCAmelCase_ = tokenizer.all_special_ids[idx]
logger.info(f"""Special tokens {special_tok_ids}""" )
UpperCAmelCase_ = special_tok_ids
UpperCAmelCase_ = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f"""Loading data from {args.data_file}""" )
with open(args.data_file , "rb" ) as fp:
UpperCAmelCase_ = pickle.load(snake_case_ )
if args.mlm:
logger.info(f"""Loading token counts from {args.token_counts} (already pre-computed)""" )
with open(args.token_counts , "rb" ) as fp:
UpperCAmelCase_ = pickle.load(snake_case_ )
UpperCAmelCase_ = np.maximum(snake_case_ , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
UpperCAmelCase_ = 0.0 # do not predict special tokens
UpperCAmelCase_ = torch.from_numpy(snake_case_ )
else:
UpperCAmelCase_ = None
UpperCAmelCase_ = LmSeqsDataset(params=snake_case_ , data=snake_case_ )
logger.info("Data loader created." )
# STUDENT #
logger.info(f"""Loading student config from {args.student_config}""" )
UpperCAmelCase_ = student_config_class.from_pretrained(args.student_config )
UpperCAmelCase_ = True
if args.student_pretrained_weights is not None:
logger.info(f"""Loading pretrained weights from {args.student_pretrained_weights}""" )
UpperCAmelCase_ = student_model_class.from_pretrained(args.student_pretrained_weights , config=snake_case_ )
else:
UpperCAmelCase_ = student_model_class(snake_case_ )
if args.n_gpu > 0:
student.to(f"""cuda:{args.local_rank}""" )
logger.info("Student loaded." )
# TEACHER #
UpperCAmelCase_ = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=snake_case_ )
if args.n_gpu > 0:
teacher.to(f"""cuda:{args.local_rank}""" )
logger.info(f"""Teacher loaded from {args.teacher_name}.""" )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(snake_case_ , snake_case_ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(snake_case_ , snake_case_ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
UpperCAmelCase_ = Distiller(
params=snake_case_ , dataset=snake_case_ , token_probs=snake_case_ , student=snake_case_ , teacher=snake_case_ )
distiller.train()
logger.info("Let's go get some drinks." )
if __name__ == "__main__":
main()
| 78
| 0
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__SCREAMING_SNAKE_CASE : str = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Tuple = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 701
|
import math
def snake_case (__lowercase , __lowercase ) -> float:
'''simple docstring'''
if (
not isinstance(__lowercase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * power_factor
def snake_case (__lowercase , __lowercase ) -> float:
'''simple docstring'''
if (
not isinstance(__lowercase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 580
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Optional[int] = {
'xlm-mlm-en-2048': 'https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json',
'xlm-mlm-ende-1024': 'https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json',
'xlm-mlm-enfr-1024': 'https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json',
'xlm-mlm-enro-1024': 'https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json',
'xlm-mlm-tlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json',
'xlm-mlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json',
'xlm-clm-enfr-1024': 'https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json',
'xlm-clm-ende-1024': 'https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json',
'xlm-mlm-17-1280': 'https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json',
'xlm-mlm-100-1280': 'https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json',
}
class _SCREAMING_SNAKE_CASE ( A ):
__SCREAMING_SNAKE_CASE = '''xlm'''
__SCREAMING_SNAKE_CASE = {
'''hidden_size''': '''emb_dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
'''n_words''': '''vocab_size''', # For backward compatibility
}
def __init__( self , A_=3_01_45 , A_=20_48 , A_=12 , A_=16 , A_=0.1 , A_=0.1 , A_=True , A_=False , A_=False , A_=False , A_=1 , A_=True , A_=5_12 , A_=20_48**-0.5 , A_=1e-12 , A_=0.0_2 , A_=0 , A_=1 , A_=2 , A_=3 , A_=5 , A_=True , A_="first" , A_=True , A_=None , A_=True , A_=0.1 , A_=5 , A_=5 , A_=0 , A_=0 , A_=2 , A_=0 , **A_ , ):
_UpperCAmelCase : Tuple = vocab_size
_UpperCAmelCase : List[str] = emb_dim
_UpperCAmelCase : str = n_layers
_UpperCAmelCase : int = n_heads
_UpperCAmelCase : Optional[int] = dropout
_UpperCAmelCase : Any = attention_dropout
_UpperCAmelCase : str = gelu_activation
_UpperCAmelCase : str = sinusoidal_embeddings
_UpperCAmelCase : Union[str, Any] = causal
_UpperCAmelCase : Optional[int] = asm
_UpperCAmelCase : Union[str, Any] = n_langs
_UpperCAmelCase : List[Any] = use_lang_emb
_UpperCAmelCase : List[str] = layer_norm_eps
_UpperCAmelCase : Optional[Any] = bos_index
_UpperCAmelCase : List[Any] = eos_index
_UpperCAmelCase : Optional[Any] = pad_index
_UpperCAmelCase : str = unk_index
_UpperCAmelCase : Dict = mask_index
_UpperCAmelCase : Union[str, Any] = is_encoder
_UpperCAmelCase : Optional[Any] = max_position_embeddings
_UpperCAmelCase : int = embed_init_std
_UpperCAmelCase : str = init_std
_UpperCAmelCase : str = summary_type
_UpperCAmelCase : Tuple = summary_use_proj
_UpperCAmelCase : Optional[Any] = summary_activation
_UpperCAmelCase : Optional[Any] = summary_proj_to_labels
_UpperCAmelCase : str = summary_first_dropout
_UpperCAmelCase : Optional[Any] = start_n_top
_UpperCAmelCase : List[Any] = end_n_top
_UpperCAmelCase : Dict = mask_token_id
_UpperCAmelCase : int = lang_id
if "n_words" in kwargs:
_UpperCAmelCase : Dict = kwargs["""n_words"""]
super().__init__(pad_token_id=A_ , bos_token_id=A_ , **A_ )
class _SCREAMING_SNAKE_CASE ( A ):
@property
def __snake_case( self ):
if self.task == "multiple-choice":
_UpperCAmelCase : int = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_UpperCAmelCase : Dict = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 643
|
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class _SCREAMING_SNAKE_CASE ( A , A , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = IFPipeline
__SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
__SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_BATCH_PARAMS
__SCREAMING_SNAKE_CASE = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __snake_case( self ):
return self._get_dummy_components()
def __snake_case( self , A_ , A_=0 ):
if str(A_ ).startswith("""mps""" ):
_UpperCAmelCase : Tuple = torch.manual_seed(A_ )
else:
_UpperCAmelCase : Optional[Any] = torch.Generator(device=A_ ).manual_seed(A_ )
_UpperCAmelCase : Any = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def __snake_case( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def __snake_case( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __snake_case( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __snake_case( self ):
self._test_save_load_local()
def __snake_case( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __snake_case( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __snake_case( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case( self ):
# if
_UpperCAmelCase : List[str] = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa )
_UpperCAmelCase : Dict = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=A_ , tokenizer=A_ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""" )
_UpperCAmelCase,_UpperCAmelCase : Dict = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : Any = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(A_ , A_ , A_ , A_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
_UpperCAmelCase : Any = IFImgaImgPipeline(**pipe_a.components )
_UpperCAmelCase : Union[str, Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(A_ , A_ , A_ , A_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
_UpperCAmelCase : Optional[int] = IFInpaintingPipeline(**pipe_a.components )
_UpperCAmelCase : Union[str, Any] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(A_ , A_ , A_ , A_ )
def __snake_case( self , A_ , A_ , A_ , A_ ):
# pipeline 1
_start_torch_memory_measurement()
_UpperCAmelCase : Optional[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
_UpperCAmelCase : List[str] = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , num_inference_steps=2 , generator=A_ , output_type="""np""" , )
_UpperCAmelCase : int = output.images[0]
assert image.shape == (64, 64, 3)
_UpperCAmelCase : Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
_UpperCAmelCase : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""" )
assert_mean_pixel_difference(A_ , A_ )
# pipeline 2
_start_torch_memory_measurement()
_UpperCAmelCase : List[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
_UpperCAmelCase : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(A_ )
_UpperCAmelCase : str = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , generator=A_ , num_inference_steps=2 , output_type="""np""" , )
_UpperCAmelCase : int = output.images[0]
assert image.shape == (2_56, 2_56, 3)
_UpperCAmelCase : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_UpperCAmelCase : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(A_ , A_ )
def __snake_case( self , A_ , A_ , A_ , A_ ):
# pipeline 1
_start_torch_memory_measurement()
_UpperCAmelCase : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(A_ )
_UpperCAmelCase : Dict = torch.Generator(device="""cpu""" ).manual_seed(0 )
_UpperCAmelCase : List[str] = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , num_inference_steps=2 , generator=A_ , output_type="""np""" , )
_UpperCAmelCase : int = output.images[0]
assert image.shape == (64, 64, 3)
_UpperCAmelCase : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_UpperCAmelCase : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""" )
assert_mean_pixel_difference(A_ , A_ )
# pipeline 2
_start_torch_memory_measurement()
_UpperCAmelCase : List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 )
_UpperCAmelCase : Union[str, Any] = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(A_ )
_UpperCAmelCase : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(A_ )
_UpperCAmelCase : Optional[int] = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , original_image=A_ , generator=A_ , num_inference_steps=2 , output_type="""np""" , )
_UpperCAmelCase : Dict = output.images[0]
assert image.shape == (2_56, 2_56, 3)
_UpperCAmelCase : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_UpperCAmelCase : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(A_ , A_ )
def __snake_case( self , A_ , A_ , A_ , A_ ):
# pipeline 1
_start_torch_memory_measurement()
_UpperCAmelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(A_ )
_UpperCAmelCase : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(A_ )
_UpperCAmelCase : Optional[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
_UpperCAmelCase : str = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , mask_image=A_ , num_inference_steps=2 , generator=A_ , output_type="""np""" , )
_UpperCAmelCase : Optional[Any] = output.images[0]
assert image.shape == (64, 64, 3)
_UpperCAmelCase : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_UpperCAmelCase : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""" )
assert_mean_pixel_difference(A_ , A_ )
# pipeline 2
_start_torch_memory_measurement()
_UpperCAmelCase : str = torch.Generator(device="""cpu""" ).manual_seed(0 )
_UpperCAmelCase : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(A_ )
_UpperCAmelCase : Tuple = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(A_ )
_UpperCAmelCase : str = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(1 ) ).to(A_ )
_UpperCAmelCase : Union[str, Any] = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , mask_image=A_ , original_image=A_ , generator=A_ , num_inference_steps=2 , output_type="""np""" , )
_UpperCAmelCase : Any = output.images[0]
assert image.shape == (2_56, 2_56, 3)
_UpperCAmelCase : Optional[int] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_UpperCAmelCase : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(A_ , A_ )
def a__ ( ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 643
| 1
|
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
UpperCamelCase__ : Tuple = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f"""{bindir}/../../examples/pytorch/translation"""):
from run_translation import main # noqa
set_seed(42)
UpperCamelCase__ : Union[str, Any] = "sshleifer/student_marian_en_ro_6_1"
UpperCamelCase__ : str = "sshleifer/tiny-mbart"
@require_torch
class _a (_lowerCamelCase):
"""simple docstring"""
def UpperCamelCase ( self , A__=False , A__=None , A__=True , A__=True , A__=True , A__=True , ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=A__ , num_train_epochs=1 , distributed=A__ , extra_args_str=A__ , predict_with_generate=A__ , do_train=A__ , do_eval=A__ , do_predict=A__ , )
_SCREAMING_SNAKE_CASE = TrainerState.load_from_json(os.path.join(A__ , """trainer_state.json""" ) ).log_history
if not do_eval:
return
_SCREAMING_SNAKE_CASE = [log for log in logs if """eval_loss""" in log.keys()]
_SCREAMING_SNAKE_CASE = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
_SCREAMING_SNAKE_CASE = eval_metrics[-1]
assert isinstance(last_step_stats["""eval_bleu"""] , A__ )
assert not math.isnan(float(last_step_stats["""eval_loss"""] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def UpperCamelCase ( self ) -> Optional[int]:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def UpperCamelCase ( self ) -> Optional[Any]:
self.run_seqaseq_quick(distributed=A__ )
@require_torch_multi_gpu
def UpperCamelCase ( self ) -> Union[str, Any]:
self.run_seqaseq_quick(distributed=A__ )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def UpperCamelCase ( self ) -> Any:
self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--sharded_ddp simple""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def UpperCamelCase ( self ) -> Tuple:
self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--sharded_ddp simple --fp16""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def UpperCamelCase ( self ) -> str:
self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--sharded_ddp zero_dp_2""" , predict_with_generate=A__ )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def UpperCamelCase ( self ) -> List[str]:
self.run_seqaseq_quick(
distributed=A__ , extra_args_str="""--sharded_ddp zero_dp_2 --fp16""" , predict_with_generate=A__ )
@require_apex
@require_torch_gpu
def UpperCamelCase ( self ) -> Optional[Any]:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--fp16 --fp16_backend=apex""" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--fp16 --fp16_backend=apex""" )
@parameterized.expand(["""base""", """low""", """high""", """mixed"""] )
@require_torch_multi_gpu
def UpperCamelCase ( self , A__ ) -> List[Any]:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
_SCREAMING_SNAKE_CASE = {
# test with the default log_level - should be info and thus log info once
"""base""": {"""extra_args_str""": """""", """n_matches""": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"""low""": {"""extra_args_str""": """--log_level debug --log_level_replica debug""", """n_matches""": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"""high""": {"""extra_args_str""": """--log_level error --log_level_replica debug""", """n_matches""": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"""mixed""": {"""extra_args_str""": """--log_level error --log_level_replica error""", """n_matches""": 0},
}
_SCREAMING_SNAKE_CASE = experiments[experiment_id]
_SCREAMING_SNAKE_CASE = {"""distributed""": True, """predict_with_generate""": False, """do_eval""": False, """do_predict""": False}
_SCREAMING_SNAKE_CASE = """Running training"""
with CaptureStderr() as cl:
self.run_seqaseq_quick(**A__ , extra_args_str=data["""extra_args_str"""] )
_SCREAMING_SNAKE_CASE = len(re.findall(A__ , cl.err ) )
self.assertEqual(A__ , data["""n_matches"""] )
@slow
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = self.run_trainer(
eval_steps=2 , max_len=1_28 , model_name=A__ , learning_rate=3E-4 , num_train_epochs=10 , distributed=A__ , )
# Check metrics
_SCREAMING_SNAKE_CASE = TrainerState.load_from_json(os.path.join(A__ , """trainer_state.json""" ) ).log_history
_SCREAMING_SNAKE_CASE = [log for log in logs if """eval_loss""" in log.keys()]
_SCREAMING_SNAKE_CASE = eval_metrics[0]
_SCREAMING_SNAKE_CASE = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["""eval_bleu"""] , A__ )
# test if do_predict saves generations and metrics
_SCREAMING_SNAKE_CASE = os.listdir(A__ )
_SCREAMING_SNAKE_CASE = {os.path.basename(A__ ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def UpperCamelCase ( self ) -> Dict:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(A__ ) -> Tuple[int, float]:
_SCREAMING_SNAKE_CASE = """--skip_memory_metrics 0"""
_SCREAMING_SNAKE_CASE = self.run_trainer(
max_len=1_28 , model_name=A__ , learning_rate=3E-4 , num_train_epochs=1 , optim=A__ , distributed=A__ , extra_args_str=A__ , do_eval=A__ , do_predict=A__ , n_gpus_to_use=1 , )
# Check metrics
_SCREAMING_SNAKE_CASE = TrainerState.load_from_json(Path(A__ , """trainer_state.json""" ) ).log_history
_SCREAMING_SNAKE_CASE = int(logs[0]["""train_mem_gpu_peaked_delta"""] / 2**20 )
_SCREAMING_SNAKE_CASE = int(logs[0]["""train_mem_gpu_alloc_delta"""] / 2**20 )
_SCREAMING_SNAKE_CASE = logs[0]["""train_loss"""]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
_SCREAMING_SNAKE_CASE = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
_SCREAMING_SNAKE_CASE = gpu_peak_mem_orig + gpu_alloc_mem_orig
_SCREAMING_SNAKE_CASE = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
_SCREAMING_SNAKE_CASE = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
_SCREAMING_SNAKE_CASE = 1_20
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
A__ , A__ , """should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"""
F" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"
F" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB" , )
self.assertGreater(
A__ , A__ , """should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"""
F" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"
F" gpu_total_mem_bnb={gpu_total_mem_bnb}MB" , )
self.assertEqual(
A__ , A__ , F"loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}" )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ = 3E-3 , A__ = "adafactor" , A__ = False , A__ = None , A__ = 0 , A__ = True , A__ = True , A__ = True , A__ = True , A__ = None , ) -> Dict:
_SCREAMING_SNAKE_CASE = self.test_file_dir / """../fixtures/tests_samples/wmt_en_ro"""
_SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir()
_SCREAMING_SNAKE_CASE = F"\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(A__ )}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(A__ )}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n ".split()
_SCREAMING_SNAKE_CASE = F"\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(A__ )}\n ".split()
_SCREAMING_SNAKE_CASE = """
--do_predict
""".split()
_SCREAMING_SNAKE_CASE = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F"--optim {optim}".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
_SCREAMING_SNAKE_CASE = get_gpu_count()
_SCREAMING_SNAKE_CASE = get_torch_dist_unique_port()
_SCREAMING_SNAKE_CASE = F"\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n ".split()
_SCREAMING_SNAKE_CASE = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(A__ , env=self.get_env() )
else:
_SCREAMING_SNAKE_CASE = ["""run_translation.py"""] + args
with patch.object(A__ , """argv""" , A__ ):
main()
return output_dir
| 714
|
'''simple docstring'''
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str:
"""simple docstring"""
return "".join([hex(SCREAMING_SNAKE_CASE_ )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE_ )] )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> bytes:
"""simple docstring"""
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(SCREAMING_SNAKE_CASE_ ) % 2) != 0:
raise ValueError(
"""Base16 encoded data is invalid:
Data does not have an even number of hex digits.""" )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(SCREAMING_SNAKE_CASE_ ) <= set("""0123456789ABCDEF""" ):
raise ValueError(
"""Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.""" )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0
| 0
|
SCREAMING_SNAKE_CASE__ : Dict = [0, 2, 4, 6, 8]
SCREAMING_SNAKE_CASE__ : Optional[Any] = [1, 3, 5, 7, 9]
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
'''simple docstring'''
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
UpperCAmelCase__ : Optional[int] = 0
for digit in range(10 ):
UpperCAmelCase__ : Dict = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , __lowerCamelCase , __lowerCamelCase )
return result
UpperCAmelCase__ : Dict = 0
for digita in range(10 ):
UpperCAmelCase__ : Union[str, Any] = digita
if (remainder + digita) % 2 == 0:
UpperCAmelCase__ : Any = ODD_DIGITS
else:
UpperCAmelCase__ : Optional[Any] = EVEN_DIGITS
for digita in other_parity_digits:
UpperCAmelCase__ : Optional[Any] = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , __lowerCamelCase , __lowerCamelCase , )
return result
def _lowerCamelCase ( __lowerCamelCase = 9 ) -> int:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(__lowerCamelCase , 0 , [0] * length , __lowerCamelCase )
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 79
|
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=9_9 , snake_case_=6_4 , snake_case_=3_2 , snake_case_=5 , snake_case_=4 , snake_case_=3_7 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_1_2 , snake_case_=1_6 , snake_case_=2 , snake_case_=0.0_2 , snake_case_=3 , snake_case_=4 , snake_case_=None , ) -> Tuple:
'''simple docstring'''
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = embedding_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
def A ( self ) -> List[str]:
'''simple docstring'''
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = ids_tensor([self.batch_size] , self.num_choices )
__lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self ) -> List[Any]:
'''simple docstring'''
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
def A ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[str]:
'''simple docstring'''
__lowercase = MegatronBertModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__lowercase = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
__lowercase = model(snake_case_ , token_type_ids=snake_case_ )
__lowercase = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[str]:
'''simple docstring'''
__lowercase = MegatronBertForMaskedLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__lowercase = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int:
'''simple docstring'''
__lowercase = MegatronBertForCausalLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__lowercase = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Tuple:
'''simple docstring'''
__lowercase = MegatronBertForNextSentencePrediction(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__lowercase = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def A ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Optional[int]:
'''simple docstring'''
__lowercase = MegatronBertForPreTraining(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__lowercase = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , next_sentence_label=snake_case_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def A ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Any:
'''simple docstring'''
__lowercase = MegatronBertForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__lowercase = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.num_labels
__lowercase = MegatronBertForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
__lowercase = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Dict:
'''simple docstring'''
__lowercase = self.num_labels
__lowercase = MegatronBertForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__lowercase = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Dict:
'''simple docstring'''
__lowercase = self.num_choices
__lowercase = MegatronBertForMultipleChoice(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__lowercase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = True
# test_resize_embeddings = False
__UpperCAmelCase = False
def A ( self , snake_case_ , snake_case_ , snake_case_=False ) -> Tuple:
'''simple docstring'''
__lowercase = super()._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
if return_labels:
if model_class in get_values(snake_case_ ):
__lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case_ )
__lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case_ )
return inputs_dict
def A ( self ) -> List[Any]:
'''simple docstring'''
__lowercase = MegatronBertModelTester(self )
__lowercase = ConfigTester(self , config_class=snake_case_ , hidden_size=3_7 )
def A ( self ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self ) -> List[str]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*snake_case_ )
def A ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*snake_case_ )
def A ( self ) -> str:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*snake_case_ )
def A ( self ) -> List[Any]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*snake_case_ )
def A ( self ) -> Dict:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*snake_case_ )
def A ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*snake_case_ )
def A ( self ) -> List[Any]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*snake_case_ )
def A ( self ) -> Dict:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*snake_case_ )
def lowercase_ ( _UpperCamelCase ):
'''simple docstring'''
return torch.tensor(
_UpperCamelCase , dtype=torch.long , device=_UpperCamelCase , )
a : Optional[Any] = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip('''Model is not available.''' )
def A ( self ) -> Tuple:
'''simple docstring'''
__lowercase = '''nvidia/megatron-bert-uncased-345m'''
if "MYDIR" in os.environ:
__lowercase = os.path.join(os.environ['''MYDIR'''] , snake_case_ )
__lowercase = MegatronBertModel.from_pretrained(snake_case_ )
model.to(snake_case_ )
model.half()
__lowercase = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] )
with torch.no_grad():
__lowercase = model(snake_case_ )[0]
__lowercase = torch.Size((1, 9, 1_0_2_4) )
self.assertEqual(output.shape , snake_case_ )
__lowercase = [-0.6_0_4_0, -0.2_5_1_7, -0.1_0_2_5, 0.3_4_2_0, -0.6_7_5_8, -0.0_0_1_7, -0.1_0_8_9, -0.1_9_9_0, 0.5_7_2_8]
for ii in range(3 ):
for jj in range(3 ):
__lowercase = output[0, ii, jj]
__lowercase = expected[3 * ii + jj]
__lowercase = '''ii={} jj={} a={} b={}'''.format(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
self.assertTrue(math.isclose(snake_case_ , snake_case_ , rel_tol=snake_case_ , abs_tol=snake_case_ ) , msg=snake_case_ )
| 639
| 0
|
'''simple docstring'''
def _lowerCamelCase (__lowerCamelCase : str ) -> str:
if not all(char in "01" for char in bin_string ):
raise ValueError("Non-binary value was passed to the function" )
if not bin_string:
raise ValueError("Empty string was passed to the function" )
a__ = ""
while len(__lowerCamelCase ) % 3 != 0:
a__ = "0" + bin_string
a__ = [
bin_string[index : index + 3]
for index in range(len(__lowerCamelCase ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
a__ = 0
for index, val in enumerate(__lowerCamelCase ):
oct_val += int(2 ** (2 - index) * int(__lowerCamelCase ) )
oct_string += str(__lowerCamelCase )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 289
|
'''simple docstring'''
import math
def _lowerCamelCase (__lowerCamelCase : list , __lowerCamelCase : int = 0 , __lowerCamelCase : int = 0 ) -> list:
a__ = end or len(__lowerCamelCase )
for i in range(__lowerCamelCase , __lowerCamelCase ):
a__ = i
a__ = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
a__ = array[temp_index - 1]
temp_index -= 1
a__ = temp_index_value
return array
def _lowerCamelCase (__lowerCamelCase : list , __lowerCamelCase : int , __lowerCamelCase : int ) -> None: # Max Heap
a__ = index
a__ = 2 * index + 1 # Left Node
a__ = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
a__ = left_index
if right_index < heap_size and array[largest] < array[right_index]:
a__ = right_index
if largest != index:
a__ , a__ = array[largest], array[index]
heapify(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _lowerCamelCase (__lowerCamelCase : list ) -> list:
a__ = len(__lowerCamelCase )
for i in range(n // 2 , -1 , -1 ):
heapify(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
for i in range(n - 1 , 0 , -1 ):
a__ , a__ = array[0], array[i]
heapify(__lowerCamelCase , 0 , __lowerCamelCase )
return array
def _lowerCamelCase (__lowerCamelCase : list , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ) -> int:
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def _lowerCamelCase (__lowerCamelCase : list , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ) -> int:
a__ = low
a__ = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
a__ , a__ = array[j], array[i]
i += 1
def _lowerCamelCase (__lowerCamelCase : list ) -> list:
if len(__lowerCamelCase ) == 0:
return array
a__ = 2 * math.ceil(math.loga(len(__lowerCamelCase ) ) )
a__ = 16
return intro_sort(__lowerCamelCase , 0 , len(__lowerCamelCase ) , __lowerCamelCase , __lowerCamelCase )
def _lowerCamelCase (__lowerCamelCase : list , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ) -> list:
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(__lowerCamelCase )
max_depth -= 1
a__ = median_of_a(__lowerCamelCase , __lowerCamelCase , start + ((end - start) // 2) + 1 , end - 1 )
a__ = partition(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
intro_sort(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
a__ = p
return insertion_sort(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase_ : Optional[int] = input("Enter numbers separated by a comma : ").strip()
lowerCAmelCase_ : List[str] = [float(item) for item in user_input.split(",")]
print(sort(unsorted))
| 289
| 1
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowercase__ :List[str] = logging.get_logger(__name__)
class snake_case ( __UpperCAmelCase ):
'''simple docstring'''
_A : Dict = ['input_features', 'attention_mask']
def __init__( self : Any , __lowercase : int=80 , __lowercase : List[Any]=16_000 , __lowercase : List[str]=80 , __lowercase : List[Any]=0.0 , __lowercase : List[Any]=True , __lowercase : Dict=True , __lowercase : Union[str, Any]=True , **__lowercase : Tuple , ):
'''simple docstring'''
super().__init__(feature_size=__lowercase , sampling_rate=__lowercase , padding_value=__lowercase , **__lowercase )
__UpperCAmelCase : Dict = num_mel_bins
__UpperCAmelCase : Tuple = do_ceptral_normalize
__UpperCAmelCase : Tuple = normalize_means
__UpperCAmelCase : List[Any] = normalize_vars
__UpperCAmelCase : Optional[Any] = True
def A_ ( self : Dict , __lowercase : np.ndarray , ):
'''simple docstring'''
__UpperCAmelCase : Tuple = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
__UpperCAmelCase : Optional[Any] = torch.from_numpy(__lowercase ).unsqueeze(0 )
__UpperCAmelCase : List[str] = ta_kaldi.fbank(__lowercase , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def A_ ( __lowercase : np.ndarray , __lowercase : int , __lowercase : Optional[bool] = True , __lowercase : Optional[bool] = True , __lowercase : float = 0.0 , ):
'''simple docstring'''
if normalize_means:
__UpperCAmelCase : Optional[Any] = x[:input_length].mean(axis=0 )
__UpperCAmelCase : Union[str, Any] = np.subtract(__lowercase , __lowercase )
if normalize_vars:
__UpperCAmelCase : Union[str, Any] = x[:input_length].std(axis=0 )
__UpperCAmelCase : str = np.divide(__lowercase , __lowercase )
if input_length < x.shape[0]:
__UpperCAmelCase : Dict = padding_value
# make sure array is in float32
__UpperCAmelCase : str = x.astype(np.floataa )
return x
def A_ ( self : List[str] , __lowercase : List[np.ndarray] , __lowercase : Optional[np.ndarray] = None ):
'''simple docstring'''
__UpperCAmelCase : Dict = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(__lowercase , __lowercase , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(__lowercase , __lowercase )
]
def __call__( self : Optional[int] , __lowercase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __lowercase : Union[bool, str, PaddingStrategy] = False , __lowercase : Optional[int] = None , __lowercase : bool = False , __lowercase : Optional[int] = None , __lowercase : Optional[Union[str, TensorType]] = None , __lowercase : Optional[int] = None , __lowercase : Optional[bool] = None , **__lowercase : Any , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
__UpperCAmelCase : Any = isinstance(__lowercase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
__UpperCAmelCase : List[str] = is_batched_numpy or (
isinstance(__lowercase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__UpperCAmelCase : Dict = [np.asarray(__lowercase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__lowercase , np.ndarray ):
__UpperCAmelCase : Optional[int] = np.asarray(__lowercase , dtype=np.floataa )
elif isinstance(__lowercase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__UpperCAmelCase : Dict = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__UpperCAmelCase : Optional[int] = [raw_speech]
# extract fbank features
__UpperCAmelCase : Any = [self._extract_fbank_features(__lowercase ) for waveform in raw_speech]
# convert into correct format for padding
__UpperCAmelCase : Optional[int] = BatchFeature({'''input_features''': features} )
__UpperCAmelCase : Optional[int] = self.pad(
__lowercase , padding=__lowercase , max_length=__lowercase , truncation=__lowercase , pad_to_multiple_of=__lowercase , return_attention_mask=__lowercase , **__lowercase , )
# make sure list is in array format
__UpperCAmelCase : int = padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , __lowercase ):
__UpperCAmelCase : str = [np.asarray(__lowercase , dtype=np.floataa ) for feature in input_features]
__UpperCAmelCase : int = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
__UpperCAmelCase : List[Any] = [np.asarray(__lowercase , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
__UpperCAmelCase : Union[str, Any] = (
np.array(__lowercase , dtype=np.intaa )
if self._get_padding_strategies(__lowercase , max_length=__lowercase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
__UpperCAmelCase : int = self.normalize(
padded_inputs['''input_features'''] , attention_mask=__lowercase )
if return_tensors is not None:
__UpperCAmelCase : Tuple = padded_inputs.convert_to_tensors(__lowercase )
return padded_inputs
| 522
|
"""simple docstring"""
from __future__ import annotations
lowercase__ :Dict = 'Muhammad Umer Farooq'
lowercase__ :Any = 'MIT'
lowercase__ :List[str] = '1.0.0'
lowercase__ :str = 'Muhammad Umer Farooq'
lowercase__ :List[str] = 'contact@muhammadumerfarooq.me'
lowercase__ :Dict = 'Alpha'
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class snake_case ( __UpperCAmelCase ):
'''simple docstring'''
def __init__( self : str , __lowercase : str ):
'''simple docstring'''
super().__init__()
__UpperCAmelCase : list[str] = []
__UpperCAmelCase : Tuple = domain
def A_ ( self : Any , __lowercase : str , __lowercase : list[tuple[str, str | None]] ):
'''simple docstring'''
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
__UpperCAmelCase : List[Any] = parse.urljoin(self.domain , __lowercase )
self.urls.append(__lowercase )
def lowerCamelCase_ ( UpperCAmelCase_ ) ->str:
"""simple docstring"""
return ".".join(get_sub_domain_name(UpperCAmelCase_ ).split('''.''' )[-2:] )
def lowerCamelCase_ ( UpperCAmelCase_ ) ->str:
"""simple docstring"""
return parse.urlparse(UpperCAmelCase_ ).netloc
def lowerCamelCase_ ( UpperCAmelCase_ = "https://github.com" ) ->list[str]:
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = get_domain_name(UpperCAmelCase_ )
# Initialize the parser
__UpperCAmelCase : int = Parser(UpperCAmelCase_ )
try:
# Open URL
__UpperCAmelCase : Union[str, Any] = requests.get(UpperCAmelCase_ )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
__UpperCAmelCase : str = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
__UpperCAmelCase : Optional[int] = requests.get(UpperCAmelCase_ )
# Get the valid email.
__UpperCAmelCase : Tuple = re.findall('''[a-zA-Z0-9]+@''' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(UpperCAmelCase_ )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(UpperCAmelCase_ )
if __name__ == "__main__":
lowercase__ :List[str] = emails_from_url('https://github.com')
print(f"""{len(emails)} emails found:""")
print('\n'.join(sorted(emails)))
| 522
| 1
|
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__( __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = KandinskyVaaPriorPipeline
__snake_case = ['prompt']
__snake_case = ['prompt', 'negative_prompt']
__snake_case = [
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
__snake_case = False
@property
def UpperCamelCase_ ( self ) -> Union[str, Any]:
return 3_2
@property
def UpperCamelCase_ ( self ) -> int:
return 3_2
@property
def UpperCamelCase_ ( self ) -> Any:
return self.time_input_dim
@property
def UpperCamelCase_ ( self ) -> Optional[int]:
return self.time_input_dim * 4
@property
def UpperCamelCase_ ( self ) -> Union[str, Any]:
return 1_0_0
@property
def UpperCamelCase_ ( self ) -> Any:
_SCREAMING_SNAKE_CASE : int = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def UpperCamelCase_ ( self ) -> Dict:
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(__lowerCamelCase )
@property
def UpperCamelCase_ ( self ) -> Optional[Any]:
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : Tuple = {
"num_attention_heads": 2,
"attention_head_dim": 1_2,
"embedding_dim": self.text_embedder_hidden_size,
"num_layers": 1,
}
_SCREAMING_SNAKE_CASE : Optional[int] = PriorTransformer(**__lowerCamelCase )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
_SCREAMING_SNAKE_CASE : Tuple = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def UpperCamelCase_ ( self ) -> List[str]:
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : str = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_2_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1_4 , )
_SCREAMING_SNAKE_CASE : str = CLIPVisionModelWithProjection(__lowerCamelCase )
return model
@property
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=__lowerCamelCase , do_normalize=__lowerCamelCase , do_resize=__lowerCamelCase , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_2_4 , )
return image_processor
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : Dict = self.dummy_prior
_SCREAMING_SNAKE_CASE : Tuple = self.dummy_image_encoder
_SCREAMING_SNAKE_CASE : Dict = self.dummy_text_encoder
_SCREAMING_SNAKE_CASE : Dict = self.dummy_tokenizer
_SCREAMING_SNAKE_CASE : List[Any] = self.dummy_image_processor
_SCREAMING_SNAKE_CASE : Tuple = UnCLIPScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_0_0_0 , clip_sample=__lowerCamelCase , clip_sample_range=10.0 , )
_SCREAMING_SNAKE_CASE : Dict = {
"prior": prior,
"image_encoder": image_encoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"scheduler": scheduler,
"image_processor": image_processor,
}
return components
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=0 ) -> Optional[Any]:
if str(__lowerCamelCase ).startswith("mps" ):
_SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(__lowerCamelCase )
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = {
"prompt": "horse",
"generator": generator,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def UpperCamelCase_ ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : List[str] = "cpu"
_SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_components()
_SCREAMING_SNAKE_CASE : int = self.pipeline_class(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = pipe(**self.get_dummy_inputs(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : Tuple = output.image_embeds
_SCREAMING_SNAKE_CASE : Union[str, Any] = pipe(
**self.get_dummy_inputs(__lowerCamelCase ) , return_dict=__lowerCamelCase , )[0]
_SCREAMING_SNAKE_CASE : Dict = image[0, -1_0:]
_SCREAMING_SNAKE_CASE : Optional[int] = image_from_tuple[0, -1_0:]
assert image.shape == (1, 3_2)
_SCREAMING_SNAKE_CASE : Optional[Any] = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def UpperCamelCase_ ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Dict = torch_device == "cpu"
_SCREAMING_SNAKE_CASE : List[str] = True
_SCREAMING_SNAKE_CASE : int = False
self._test_inference_batch_single_identical(
test_max_difference=__lowerCamelCase , relax_max_difference=__lowerCamelCase , test_mean_pixel_difference=__lowerCamelCase , )
@skip_mps
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : List[str] = torch_device == "cpu"
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
self._test_attention_slicing_forward_pass(
test_max_difference=__lowerCamelCase , test_mean_pixel_difference=__lowerCamelCase , )
| 381
|
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
# Construct model
if gpta_config_file == "":
_SCREAMING_SNAKE_CASE : str = GPTaConfig()
else:
_SCREAMING_SNAKE_CASE : int = GPTaConfig.from_json_file(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = GPTaModel(__lowerCamelCase )
# Load weights from numpy
load_tf_weights_in_gpta(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
# Save pytorch-model
_SCREAMING_SNAKE_CASE : Optional[Any] = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
_SCREAMING_SNAKE_CASE : Tuple = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict(), __lowerCamelCase )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(__lowerCamelCase, "w", encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCamelCase__ =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--gpt2_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
UpperCamelCase__ =parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 381
| 1
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self : List[str] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A_ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Any = 1
__UpperCAmelCase : Dict = 3
__UpperCAmelCase : Optional[Any] = (32, 32)
__UpperCAmelCase : Union[str, Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__lowercase )
return image
@property
def A_ ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCAmelCase : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
return model
@property
def A_ ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCAmelCase : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def A_ ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCAmelCase : Dict = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_006 , )
return RobertaSeriesModelWithTransformation(__lowercase )
@property
def A_ ( self : Optional[Any] ):
'''simple docstring'''
def extract(*__lowercase : Dict , **__lowercase : Any ):
class snake_case :
'''simple docstring'''
def __init__( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : str = torch.ones([0] )
def A_ ( self : List[str] , __lowercase : Optional[Any] ):
'''simple docstring'''
self.pixel_values.to(__lowercase )
return self
return Out()
return extract
def A_ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase : Optional[Any] = self.dummy_cond_unet
__UpperCAmelCase : List[Any] = PNDMScheduler(skip_prk_steps=__lowercase )
__UpperCAmelCase : Optional[Any] = self.dummy_vae
__UpperCAmelCase : Optional[Any] = self.dummy_text_encoder
__UpperCAmelCase : List[str] = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
__UpperCAmelCase : List[Any] = 77
__UpperCAmelCase : Optional[int] = self.dummy_image.to(__lowercase )
__UpperCAmelCase : int = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
__UpperCAmelCase : Optional[int] = AltDiffusionImgaImgPipeline(
unet=__lowercase , scheduler=__lowercase , vae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , safety_checker=__lowercase , feature_extractor=self.dummy_extractor , )
__UpperCAmelCase : Union[str, Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__lowercase )
__UpperCAmelCase : Optional[int] = alt_pipe.to(__lowercase )
alt_pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : Dict = '''A painting of a squirrel eating a burger'''
__UpperCAmelCase : int = torch.Generator(device=__lowercase ).manual_seed(0 )
__UpperCAmelCase : Union[str, Any] = alt_pipe(
[prompt] , generator=__lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=__lowercase , )
__UpperCAmelCase : int = output.images
__UpperCAmelCase : str = torch.Generator(device=__lowercase ).manual_seed(0 )
__UpperCAmelCase : List[str] = alt_pipe(
[prompt] , generator=__lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=__lowercase , return_dict=__lowercase , )[0]
__UpperCAmelCase : int = image[0, -3:, -3:, -1]
__UpperCAmelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__UpperCAmelCase : List[str] = np.array([0.4_4_2_7, 0.3_7_3_1, 0.4_2_4_9, 0.4_9_4_1, 0.4_5_4_6, 0.4_1_4_8, 0.4_1_9_3, 0.4_6_6_6, 0.4_4_9_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def A_ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = self.dummy_cond_unet
__UpperCAmelCase : Union[str, Any] = PNDMScheduler(skip_prk_steps=__lowercase )
__UpperCAmelCase : List[Any] = self.dummy_vae
__UpperCAmelCase : int = self.dummy_text_encoder
__UpperCAmelCase : str = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
__UpperCAmelCase : List[str] = 77
__UpperCAmelCase : Optional[Any] = self.dummy_image.to(__lowercase )
# put models in fp16
__UpperCAmelCase : Optional[Any] = unet.half()
__UpperCAmelCase : Union[str, Any] = vae.half()
__UpperCAmelCase : List[str] = bert.half()
# make sure here that pndm scheduler skips prk
__UpperCAmelCase : List[Any] = AltDiffusionImgaImgPipeline(
unet=__lowercase , scheduler=__lowercase , vae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , safety_checker=__lowercase , feature_extractor=self.dummy_extractor , )
__UpperCAmelCase : List[Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__lowercase )
__UpperCAmelCase : List[str] = alt_pipe.to(__lowercase )
alt_pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : str = '''A painting of a squirrel eating a burger'''
__UpperCAmelCase : List[str] = torch.manual_seed(0 )
__UpperCAmelCase : Optional[int] = alt_pipe(
[prompt] , generator=__lowercase , num_inference_steps=2 , output_type='''np''' , image=__lowercase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def A_ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
# resize to resolution that is divisible by 8 but not 16 or 32
__UpperCAmelCase : Dict = init_image.resize((760, 504) )
__UpperCAmelCase : List[Any] = '''BAAI/AltDiffusion'''
__UpperCAmelCase : Optional[int] = AltDiffusionImgaImgPipeline.from_pretrained(
__lowercase , safety_checker=__lowercase , )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
pipe.enable_attention_slicing()
__UpperCAmelCase : str = '''A fantasy landscape, trending on artstation'''
__UpperCAmelCase : List[str] = torch.manual_seed(0 )
__UpperCAmelCase : Union[str, Any] = pipe(
prompt=__lowercase , image=__lowercase , strength=0.7_5 , guidance_scale=7.5 , generator=__lowercase , output_type='''np''' , )
__UpperCAmelCase : Any = output.images[0]
__UpperCAmelCase : Optional[int] = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
__UpperCAmelCase : Optional[int] = np.array([0.9_3_5_8, 0.9_3_9_7, 0.9_5_9_9, 0.9_9_0_1, 1.0_0_0_0, 1.0_0_0_0, 0.9_8_8_2, 1.0_0_0_0, 1.0_0_0_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self : int ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
__UpperCAmelCase : Any = init_image.resize((768, 512) )
__UpperCAmelCase : Dict = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''' )
__UpperCAmelCase : List[str] = '''BAAI/AltDiffusion'''
__UpperCAmelCase : str = AltDiffusionImgaImgPipeline.from_pretrained(
__lowercase , safety_checker=__lowercase , )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
pipe.enable_attention_slicing()
__UpperCAmelCase : List[str] = '''A fantasy landscape, trending on artstation'''
__UpperCAmelCase : Union[str, Any] = torch.manual_seed(0 )
__UpperCAmelCase : str = pipe(
prompt=__lowercase , image=__lowercase , strength=0.7_5 , guidance_scale=7.5 , generator=__lowercase , output_type='''np''' , )
__UpperCAmelCase : Union[str, Any] = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 522
|
"""simple docstring"""
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
lowercase__ :List[Any] = Lock()
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->List[str]:
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(UpperCAmelCase_ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
__UpperCAmelCase : str = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
__UpperCAmelCase : List[Any] = min(UpperCAmelCase_ , UpperCAmelCase_ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(UpperCAmelCase_ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
__UpperCAmelCase : Union[str, Any] = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
__UpperCAmelCase : Union[str, Any] = max(UpperCAmelCase_ , UpperCAmelCase_ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ ) ->Optional[int]:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = []
__UpperCAmelCase : int = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
__UpperCAmelCase : Optional[int] = Pipe()
__UpperCAmelCase : str = Pipe()
process_array_.append(
Process(
target=UpperCAmelCase_ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
__UpperCAmelCase : Optional[int] = temp_rs
__UpperCAmelCase : Any = temp_rr
for i in range(1 , len(UpperCAmelCase_ ) - 1 ):
__UpperCAmelCase : List[Any] = Pipe()
__UpperCAmelCase : List[Any] = Pipe()
process_array_.append(
Process(
target=UpperCAmelCase_ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
__UpperCAmelCase : int = temp_rs
__UpperCAmelCase : Optional[Any] = temp_rr
process_array_.append(
Process(
target=UpperCAmelCase_ , args=(
len(UpperCAmelCase_ ) - 1,
arr[len(UpperCAmelCase_ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(UpperCAmelCase_ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(UpperCAmelCase_ ) ):
__UpperCAmelCase : str = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def lowerCamelCase_ ( ) ->Optional[int]:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = list(range(10 , 0 , -1 ) )
print('''Initial List''' )
print(*UpperCAmelCase_ )
__UpperCAmelCase : int = odd_even_transposition(UpperCAmelCase_ )
print('''Sorted List\n''' )
print(*UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 522
| 1
|
from __future__ import annotations
def A__ ( __lowerCamelCase ):
if not nums:
raise ValueError('''List is empty''' )
return sum(__lowerCamelCase ) / len(__lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720
|
from PIL import Image
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = image.size
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = image.load()
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(__lowerCamelCase ):
for i in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = 2_55 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
__UpperCAmelCase = mean_threshold(Image.open("path_to_image").convert("L"))
image.save("output_image_path")
| 597
| 0
|
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any]=sys.maxsize ):
SCREAMING_SNAKE_CASE : str = "bilinear"
SCREAMING_SNAKE_CASE : str = max_size
SCREAMING_SNAKE_CASE : int = short_edge_length
def __call__( self : Dict , UpperCAmelCase_ : Optional[int] ):
SCREAMING_SNAKE_CASE : Tuple = []
for img in imgs:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = img.shape[:2]
# later: provide list and randomly choose index for resize
SCREAMING_SNAKE_CASE : Optional[int] = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
SCREAMING_SNAKE_CASE : Any = size * 1.0 / min(UpperCAmelCase_ , UpperCAmelCase_ )
if h < w:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = size, scale * w
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = scale * h, size
if max(UpperCAmelCase_ , UpperCAmelCase_ ) > self.max_size:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.max_size * 1.0 / max(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = newh * scale
SCREAMING_SNAKE_CASE : List[str] = neww * scale
SCREAMING_SNAKE_CASE : List[str] = int(neww + 0.5 )
SCREAMING_SNAKE_CASE : str = int(newh + 0.5 )
if img.dtype == np.uinta:
SCREAMING_SNAKE_CASE : Optional[Any] = Image.fromarray(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
SCREAMING_SNAKE_CASE : Union[str, Any] = np.asarray(UpperCAmelCase_ )
else:
SCREAMING_SNAKE_CASE : int = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
SCREAMING_SNAKE_CASE : Optional[int] = nn.functional.interpolate(
UpperCAmelCase_ , (newh, neww) , mode=self.interp_method , align_corners=UpperCAmelCase_ ).squeeze(0 )
img_augs.append(UpperCAmelCase_ )
return img_augs
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCAmelCase_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Any = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
SCREAMING_SNAKE_CASE : Optional[Any] = cfg.INPUT.FORMAT
SCREAMING_SNAKE_CASE : List[str] = cfg.SIZE_DIVISIBILITY
SCREAMING_SNAKE_CASE : List[str] = cfg.PAD_VALUE
SCREAMING_SNAKE_CASE : List[str] = cfg.INPUT.MAX_SIZE_TEST
SCREAMING_SNAKE_CASE : int = cfg.MODEL.DEVICE
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
SCREAMING_SNAKE_CASE : List[Any] = lambda UpperCAmelCase_ : (x - self.pixel_mean) / self.pixel_std
def _A ( self : List[str] , UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE : Tuple = tuple(max(UpperCAmelCase_ ) for s in zip(*[img.shape for img in images] ) )
SCREAMING_SNAKE_CASE : Dict = [im.shape[-2:] for im in images]
SCREAMING_SNAKE_CASE : str = [
nn.functional.pad(
UpperCAmelCase_ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(UpperCAmelCase_ , UpperCAmelCase_ )
]
return torch.stack(UpperCAmelCase_ ), torch.tensor(UpperCAmelCase_ )
def __call__( self : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any]=False ):
with torch.no_grad():
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE : Optional[int] = [images]
if single_image:
assert len(UpperCAmelCase_ ) == 1
for i in range(len(UpperCAmelCase_ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(UpperCAmelCase_ , images.pop(UpperCAmelCase_ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
UpperCAmelCase_ , torch.as_tensor(img_tensorize(images.pop(UpperCAmelCase_ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([im.shape[:2] for im in images] )
SCREAMING_SNAKE_CASE : Any = self.aug(UpperCAmelCase_ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.normalizer(UpperCAmelCase_ ) for x in images]
# now pad them to do the following operations
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.pad(UpperCAmelCase_ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.true_divide(UpperCAmelCase_ , UpperCAmelCase_ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
assert torch.isfinite(lowercase ).all(), "Box tensor contains infinite or NaN!"
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = box_size
tensor[:, 0].clamp_(min=0 , max=lowercase )
tensor[:, 1].clamp_(min=0 , max=lowercase )
tensor[:, 2].clamp_(min=0 , max=lowercase )
tensor[:, 3].clamp_(min=0 , max=lowercase )
| 62
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
SCREAMING_SNAKE_CASE : List[Any] = [144, 192, 240]
SCREAMING_SNAKE_CASE : Tuple = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
SCREAMING_SNAKE_CASE : List[str] = [96, 120, 144]
SCREAMING_SNAKE_CASE : Dict = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
SCREAMING_SNAKE_CASE : List[str] = [64, 80, 96]
SCREAMING_SNAKE_CASE : List[str] = [16, 16, 24, 48, 64, 80, 320]
SCREAMING_SNAKE_CASE : int = 0.05
SCREAMING_SNAKE_CASE : int = 2.0
if mobilevit_name.startswith("deeplabv3_" ):
SCREAMING_SNAKE_CASE : str = 512
SCREAMING_SNAKE_CASE : List[str] = 16
SCREAMING_SNAKE_CASE : Union[str, Any] = 21
SCREAMING_SNAKE_CASE : Dict = "pascal-voc-id2label.json"
else:
SCREAMING_SNAKE_CASE : Optional[Any] = 1000
SCREAMING_SNAKE_CASE : Optional[Any] = "imagenet-1k-id2label.json"
SCREAMING_SNAKE_CASE : Any = "huggingface/label-files"
SCREAMING_SNAKE_CASE : Tuple = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="dataset" ) , "r" ) )
SCREAMING_SNAKE_CASE : List[str] = {int(lowercase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : Optional[Any] = idalabel
SCREAMING_SNAKE_CASE : str = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase__ ( lowercase , lowercase=False ):
"""simple docstring"""
for i in range(1 , 6 ):
if F'''layer_{i}.''' in name:
SCREAMING_SNAKE_CASE : Tuple = name.replace(F'''layer_{i}.''' , F'''encoder.layer.{i - 1}.''' )
if "conv_1." in name:
SCREAMING_SNAKE_CASE : Dict = name.replace("conv_1." , "conv_stem." )
if ".block." in name:
SCREAMING_SNAKE_CASE : List[str] = name.replace(".block." , "." )
if "exp_1x1" in name:
SCREAMING_SNAKE_CASE : str = name.replace("exp_1x1" , "expand_1x1" )
if "red_1x1" in name:
SCREAMING_SNAKE_CASE : Optional[int] = name.replace("red_1x1" , "reduce_1x1" )
if ".local_rep.conv_3x3." in name:
SCREAMING_SNAKE_CASE : Dict = name.replace(".local_rep.conv_3x3." , ".conv_kxk." )
if ".local_rep.conv_1x1." in name:
SCREAMING_SNAKE_CASE : int = name.replace(".local_rep.conv_1x1." , ".conv_1x1." )
if ".norm." in name:
SCREAMING_SNAKE_CASE : Optional[int] = name.replace(".norm." , ".normalization." )
if ".conv." in name:
SCREAMING_SNAKE_CASE : Optional[int] = name.replace(".conv." , ".convolution." )
if ".conv_proj." in name:
SCREAMING_SNAKE_CASE : Dict = name.replace(".conv_proj." , ".conv_projection." )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F'''.{i}.{j}.''' in name:
SCREAMING_SNAKE_CASE : Optional[Any] = name.replace(F'''.{i}.{j}.''' , F'''.{i}.layer.{j}.''' )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F'''.{i}.{j}.''' in name:
SCREAMING_SNAKE_CASE : Optional[int] = name.replace(F'''.{i}.{j}.''' , F'''.{i}.''' )
if "expand_1x1" in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("expand_1x1" , "downsampling_layer.expand_1x1" )
if "conv_3x3" in name:
SCREAMING_SNAKE_CASE : str = name.replace("conv_3x3" , "downsampling_layer.conv_3x3" )
if "reduce_1x1" in name:
SCREAMING_SNAKE_CASE : List[Any] = name.replace("reduce_1x1" , "downsampling_layer.reduce_1x1" )
for i in range(2 , 5 ):
if F'''.global_rep.{i}.weight''' in name:
SCREAMING_SNAKE_CASE : Optional[int] = name.replace(F'''.global_rep.{i}.weight''' , ".layernorm.weight" )
if F'''.global_rep.{i}.bias''' in name:
SCREAMING_SNAKE_CASE : str = name.replace(F'''.global_rep.{i}.bias''' , ".layernorm.bias" )
if ".global_rep." in name:
SCREAMING_SNAKE_CASE : Dict = name.replace(".global_rep." , ".transformer." )
if ".pre_norm_mha.0." in name:
SCREAMING_SNAKE_CASE : Tuple = name.replace(".pre_norm_mha.0." , ".layernorm_before." )
if ".pre_norm_mha.1.out_proj." in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace(".pre_norm_mha.1.out_proj." , ".attention.output.dense." )
if ".pre_norm_ffn.0." in name:
SCREAMING_SNAKE_CASE : Optional[Any] = name.replace(".pre_norm_ffn.0." , ".layernorm_after." )
if ".pre_norm_ffn.1." in name:
SCREAMING_SNAKE_CASE : Tuple = name.replace(".pre_norm_ffn.1." , ".intermediate.dense." )
if ".pre_norm_ffn.4." in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace(".pre_norm_ffn.4." , ".output.dense." )
if ".transformer." in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace(".transformer." , ".transformer.layer." )
if ".aspp_layer." in name:
SCREAMING_SNAKE_CASE : int = name.replace(".aspp_layer." , "." )
if ".aspp_pool." in name:
SCREAMING_SNAKE_CASE : Tuple = name.replace(".aspp_pool." , "." )
if "seg_head." in name:
SCREAMING_SNAKE_CASE : Optional[int] = name.replace("seg_head." , "segmentation_head." )
if "segmentation_head.classifier.classifier." in name:
SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("segmentation_head.classifier.classifier." , "segmentation_head.classifier." )
if "classifier.fc." in name:
SCREAMING_SNAKE_CASE : List[Any] = name.replace("classifier.fc." , "classifier." )
elif (not base_model) and ("segmentation_head." not in name):
SCREAMING_SNAKE_CASE : List[Any] = "mobilevit." + name
return name
def lowerCamelCase__ ( lowercase , lowercase , lowercase=False ):
"""simple docstring"""
if base_model:
SCREAMING_SNAKE_CASE : Optional[int] = ""
else:
SCREAMING_SNAKE_CASE : Any = "mobilevit."
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE : Union[str, Any] = orig_state_dict.pop(lowercase )
if key[:8] == "encoder.":
SCREAMING_SNAKE_CASE : int = key[8:]
if "qkv" in key:
SCREAMING_SNAKE_CASE : Optional[int] = key.split("." )
SCREAMING_SNAKE_CASE : Any = int(key_split[0][6:] ) - 1
SCREAMING_SNAKE_CASE : List[Any] = int(key_split[3] )
SCREAMING_SNAKE_CASE : List[Any] = model.get_submodule(F'''{model_prefix}encoder.layer.{layer_num}''' )
SCREAMING_SNAKE_CASE : int = layer.transformer.layer[transformer_num].attention.attention.all_head_size
SCREAMING_SNAKE_CASE : Union[str, Any] = (
F'''{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'''
)
if "weight" in key:
SCREAMING_SNAKE_CASE : Optional[int] = val[:dim, :]
SCREAMING_SNAKE_CASE : Tuple = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE : Dict = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE : str = val[:dim]
SCREAMING_SNAKE_CASE : Union[str, Any] = val[dim : dim * 2]
SCREAMING_SNAKE_CASE : Union[str, Any] = val[-dim:]
else:
SCREAMING_SNAKE_CASE : List[Any] = val
return orig_state_dict
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE : Union[str, Any] = Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = get_mobilevit_config(lowercase )
# load original state_dict
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.load(lowercase , map_location="cpu" )
# load 🤗 model
if mobilevit_name.startswith("deeplabv3_" ):
SCREAMING_SNAKE_CASE : List[str] = MobileViTForSemanticSegmentation(lowercase ).eval()
else:
SCREAMING_SNAKE_CASE : str = MobileViTForImageClassification(lowercase ).eval()
SCREAMING_SNAKE_CASE : Any = convert_state_dict(lowercase , lowercase )
model.load_state_dict(lowercase )
# Check outputs on an image, prepared by MobileViTImageProcessor
SCREAMING_SNAKE_CASE : List[Any] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(images=prepare_img() , return_tensors="pt" )
SCREAMING_SNAKE_CASE : List[Any] = model(**lowercase )
SCREAMING_SNAKE_CASE : str = outputs.logits
if mobilevit_name.startswith("deeplabv3_" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
] )
else:
raise ValueError(F'''Unknown mobilevit_name: {mobilevit_name}''' )
assert torch.allclose(logits[0, :3, :3, :3] , lowercase , atol=1E-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([-0.9866, 0.2392, -1.1241] )
elif mobilevit_name == "mobilevit_xs":
SCREAMING_SNAKE_CASE : Dict = torch.tensor([-2.4761, -0.9399, -1.9587] )
elif mobilevit_name == "mobilevit_xxs":
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([-1.9364, -1.2327, -0.4653] )
else:
raise ValueError(F'''Unknown mobilevit_name: {mobilevit_name}''' )
assert torch.allclose(logits[0, :3] , lowercase , atol=1E-4 )
Path(lowercase ).mkdir(exist_ok=lowercase )
print(F'''Saving model {mobilevit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase )
if push_to_hub:
SCREAMING_SNAKE_CASE : List[str] = {
"mobilevit_s": "mobilevit-small",
"mobilevit_xs": "mobilevit-x-small",
"mobilevit_xxs": "mobilevit-xx-small",
"deeplabv3_mobilevit_s": "deeplabv3-mobilevit-small",
"deeplabv3_mobilevit_xs": "deeplabv3-mobilevit-x-small",
"deeplabv3_mobilevit_xxs": "deeplabv3-mobilevit-xx-small",
}
print("Pushing to the hub..." )
SCREAMING_SNAKE_CASE : int = model_mapping[mobilevit_name]
image_processor.push_to_hub(lowercase , organization="apple" )
model.push_to_hub(lowercase , organization="apple" )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--mobilevit_name""",
default="""mobilevit_s""",
type=str,
help=(
"""Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',"""
""" 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'."""
),
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
snake_case = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 62
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''ViTFeatureExtractor''']
lowercase = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 710
|
'''simple docstring'''
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowercase = logging.getLogger()
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ ={}
a_ =os.path.join(lowercase__ , "all_results.json" )
if os.path.exists(lowercase__ ):
with open(lowercase__ , "r" ) as f:
a_ =json.load(lowercase__ )
else:
raise ValueError(F"""can't find {path}""" )
return results
lowercase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class UpperCAmelCase ( __a):
'''simple docstring'''
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
import xla_spawn
a_ =self.get_auto_remove_tmp_dir()
a_ =f"""
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(lowerCAmelCase_ , "argv" , lowerCAmelCase_):
a_ =time()
xla_spawn.main()
a_ =time()
a_ =get_results(lowerCAmelCase_)
self.assertGreaterEqual(result["eval_accuracy"] , 0.7_5)
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 5_0_0)
def lowercase_ ( self) -> Tuple:
"""simple docstring"""
import xla_spawn
a_ ="\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n ".split()
with patch.object(lowerCAmelCase_ , "argv" , lowerCAmelCase_):
xla_spawn.main()
| 41
| 0
|
from __future__ import annotations
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : str =[]
__magic_name__ , __magic_name__ : str =input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
__magic_name__ : int =result + left + right
return input_list
def lowerCAmelCase_ ( lowerCamelCase ):
if len(lowerCamelCase ) <= 1:
return input_list
__magic_name__ : Any =list(lowerCamelCase )
# iteration for two-way merging
__magic_name__ : Optional[Any] =2
while p <= len(lowerCamelCase ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(lowerCamelCase ) , lowerCamelCase ):
__magic_name__ : Union[str, Any] =i
__magic_name__ : Union[str, Any] =i + p - 1
__magic_name__ : Dict =(low + high + 1) // 2
__magic_name__ : str =merge(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
# final merge of last two parts
if p * 2 >= len(lowerCamelCase ):
__magic_name__ : Any =i
__magic_name__ : Any =merge(lowerCamelCase , 0 , lowerCamelCase , len(lowerCamelCase ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
UpperCAmelCase_ : Dict = input("Enter numbers separated by a comma:\n").strip()
if user_input == "":
UpperCAmelCase_ : Optional[Any] = []
else:
UpperCAmelCase_ : Optional[int] = [int(item.strip()) for item in user_input.split(",")]
print(iter_merge_sort(unsorted))
| 21
|
from collections.abc import Sequence
def lowerCAmelCase_ ( lowerCamelCase = None ):
if nums is None or not nums:
raise ValueError("""Input sequence should not be empty""" )
__magic_name__ : str =nums[0]
for i in range(1 , len(lowerCamelCase ) ):
__magic_name__ : Any =nums[i]
__magic_name__ : Dict =max(lowerCamelCase , ans + num , lowerCamelCase )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
UpperCAmelCase_ : List[str] = int(input("Enter number of elements : ").strip())
UpperCAmelCase_ : Tuple = list(map(int, input("\nEnter the numbers : ").strip().split()))[:n]
print(max_subsequence_sum(array))
| 21
| 1
|
"""simple docstring"""
from typing import Dict
from .base import GenericTensor, Pipeline
class _UpperCAmelCase ( lowerCAmelCase__):
def _snake_case ( self : Dict , lowercase_ : Tuple=None , lowercase_ : int=None , lowercase_ : List[str]=None , **lowercase_ : Optional[Any] ):
if tokenize_kwargs is None:
snake_case_ : str = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'''truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)''' )
snake_case_ : int = truncation
snake_case_ : Optional[int] = tokenize_kwargs
snake_case_ : int = {}
if return_tensors is not None:
snake_case_ : Any = return_tensors
return preprocess_params, {}, postprocess_params
def _snake_case ( self : Tuple , lowercase_ : str , **lowercase_ : Any ):
snake_case_ : List[str] = self.framework
snake_case_ : Optional[int] = self.tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
return model_inputs
def _snake_case ( self : Dict , lowercase_ : List[Any] ):
snake_case_ : str = self.model(**lowercase_ )
return model_outputs
def _snake_case ( self : List[str] , lowercase_ : int , lowercase_ : Optional[int]=False ):
# [0] is the first available tensor, logits or last_hidden_state.
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self : List[Any] , *lowercase_ : Tuple , **lowercase_ : Any ):
return super().__call__(*lowercase_ , **lowercase_ )
| 701
|
"""simple docstring"""
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def __lowercase ( _a ):
return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def __lowercase ( ):
snake_case_ : List[str] = ArgumentParser(
'''HuggingFace Datasets CLI tool''' , usage='''datasets-cli <command> [<args>]''' , allow_abbrev=_a )
snake_case_ : List[Any] = parser.add_subparsers(help='''datasets-cli command helpers''' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(_a )
EnvironmentCommand.register_subcommand(_a )
TestCommand.register_subcommand(_a )
RunBeamCommand.register_subcommand(_a )
DummyDataCommand.register_subcommand(_a )
# Parse args
snake_case_, snake_case_ : Optional[Any] = parser.parse_known_args()
if not hasattr(_a , '''func''' ):
parser.print_help()
exit(1 )
snake_case_ : Optional[int] = parse_unknown_args(_a )
# Run
snake_case_ : Optional[int] = args.func(_a , **_a )
service.run()
if __name__ == "__main__":
main()
| 485
| 0
|
def _lowerCAmelCase ( __magic_name__ :str , __magic_name__ :str ):
UpperCAmelCase_ = len(__magic_name__ )
UpperCAmelCase_ = len(__magic_name__ )
UpperCAmelCase_ = (
first_str_length if first_str_length > second_str_length else second_str_length
)
UpperCAmelCase_ = []
for char_count in range(__magic_name__ ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(__magic_name__ )
if __name__ == "__main__":
print(alternative_string_arrange('AB', 'XYZ'), end=' ')
| 121
|
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def _lowerCAmelCase ( __magic_name__ :Optional[Any] ):
UpperCAmelCase_ = os.path.join(args.tf_model_dir , '''parameters.json''' )
UpperCAmelCase_ = json.loads(open(__magic_name__ ).read() )
if not params:
raise ValueError(
F'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith('''.pt''' ):
UpperCAmelCase_ = args.output + '''.pt'''
UpperCAmelCase_ = OrderedDict()
with tf.device('''/CPU:0''' ):
UpperCAmelCase_ = tf.train.load_checkpoint(args.tf_model_dir )
UpperCAmelCase_ = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
UpperCAmelCase_ = reader.get_tensor(__magic_name__ ).astype(np.floataa )
if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ):
continue
if key_name.startswith('''pasts/''' ):
if key_name.startswith('''pasts/mlp''' ):
UpperCAmelCase_ = int(key_name[9] )
elif key_name.startswith('''pasts/out''' ):
UpperCAmelCase_ = 8
UpperCAmelCase_ = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
UpperCAmelCase_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ = torch.tensor(__magic_name__ )
elif key_name.startswith('''model/moe''' ):
UpperCAmelCase_ = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/switch_gating/kernel''' ):
UpperCAmelCase_ = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player
UpperCAmelCase_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ = torch.tensor(__magic_name__ )
elif key_name.endswith('''/softmlp/kernel''' ):
UpperCAmelCase_ = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player
UpperCAmelCase_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ = torch.tensor(__magic_name__ )
elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ):
UpperCAmelCase_ = key_name[-9:-7]
for i in range(1_6 ):
UpperCAmelCase_ = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer)
UpperCAmelCase_ = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
UpperCAmelCase_ = torch.tensor(__magic_name__ )
elif key_name.startswith('''model/mlp''' ):
UpperCAmelCase_ = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/p1/kernel''' ):
UpperCAmelCase_ = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player
UpperCAmelCase_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ = torch.tensor(__magic_name__ )
elif key_name.endswith('''/p1/bias''' ):
UpperCAmelCase_ = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player
UpperCAmelCase_ = vnp.copy() # same because it is one dimensional
UpperCAmelCase_ = torch.tensor(__magic_name__ )
elif key_name.endswith('''/p2/kernel''' ):
UpperCAmelCase_ = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player
UpperCAmelCase_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ = torch.tensor(__magic_name__ )
elif key_name.endswith('''/p2/bias''' ):
UpperCAmelCase_ = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player
UpperCAmelCase_ = vnp.copy() # same because it is one dimensional
UpperCAmelCase_ = torch.tensor(__magic_name__ )
elif key_name.startswith('''model/ln''' ):
UpperCAmelCase_ = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
UpperCAmelCase_ = '''model.blocks.%d.feed_forward.norm.bias''' % player
UpperCAmelCase_ = vnp.copy() # same because it is one dimensional
UpperCAmelCase_ = torch.tensor(__magic_name__ )
elif key_name.endswith('''/g''' ):
UpperCAmelCase_ = '''model.blocks.%d.feed_forward.norm.weight''' % player
UpperCAmelCase_ = vnp.copy() # same because it is one dimensional
UpperCAmelCase_ = torch.tensor(__magic_name__ )
elif key_name.startswith('''model/att''' ):
UpperCAmelCase_ = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/qkv/kernel''' ):
UpperCAmelCase_ = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
UpperCAmelCase_ = state[:, 0, :, :]
UpperCAmelCase_ = state[:, 1, :, :]
UpperCAmelCase_ = state[:, 2, :, :]
UpperCAmelCase_ = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player
UpperCAmelCase_ = torch.tensor(__magic_name__ )
UpperCAmelCase_ = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player
UpperCAmelCase_ = torch.tensor(__magic_name__ )
UpperCAmelCase_ = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player
UpperCAmelCase_ = torch.tensor(__magic_name__ )
elif key_name.endswith('''/o/kernel''' ):
UpperCAmelCase_ = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player
UpperCAmelCase_ = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ = torch.tensor(__magic_name__ )
elif key_name.startswith('''model/an''' ):
UpperCAmelCase_ = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
UpperCAmelCase_ = '''model.blocks.%d.self_attn.norm.bias''' % player
UpperCAmelCase_ = vnp.copy() # same because it is one dimensional
UpperCAmelCase_ = torch.tensor(__magic_name__ )
elif key_name.endswith('''/g''' ):
UpperCAmelCase_ = '''model.blocks.%d.self_attn.norm.weight''' % player
UpperCAmelCase_ = vnp.copy() # same because it is one dimensional
UpperCAmelCase_ = torch.tensor(__magic_name__ )
elif (
key_name.startswith('''model/wte''' )
or key_name.startswith('''model/wpe''' )
or key_name.startswith('''model/ete''' )
):
UpperCAmelCase_ = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[
key_name[-3:]
]
UpperCAmelCase_ = '''model.%s.weight''' % nlayer
UpperCAmelCase_ = vnp.copy() # same in embedded
UpperCAmelCase_ = torch.tensor(__magic_name__ )
if key_name.startswith('''model/wte''' ):
UpperCAmelCase_ = '''lm_head.weight'''
UpperCAmelCase_ = vnp.copy() # same in embedded
UpperCAmelCase_ = torch.tensor(__magic_name__ )
elif key_name.startswith('''model/wob''' ):
UpperCAmelCase_ = '''final_logits_bias'''
UpperCAmelCase_ = vnp.copy() # same in embedded
UpperCAmelCase_ = state.reshape((1, -1) )
UpperCAmelCase_ = torch.tensor(__magic_name__ )
elif key_name == "model/dense/kernel":
UpperCAmelCase_ = '''model.last_project.weight'''
UpperCAmelCase_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ = torch.tensor(__magic_name__ )
elif key_name == "model/dense_1/bias":
UpperCAmelCase_ = '''model.last_project.bias'''
UpperCAmelCase_ = vnp.copy() # same because it is one dimensional
UpperCAmelCase_ = torch.tensor(__magic_name__ )
torch.save(__magic_name__ , args.output )
if __name__ == "__main__":
_lowerCamelCase : Dict = argparse.ArgumentParser(
description='model converter.', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--tf_model_dir', metavar='PATH', type=str, required=True, help='import model')
parser.add_argument('--output', metavar='PATH', type=str, required=True, help='output model')
_lowerCamelCase : Optional[Any] = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 121
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase_ : List[Any] = {
"""configuration_efficientformer""": [
"""EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientFormerConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = ["""EfficientFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Union[str, Any] = [
"""EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientFormerForImageClassification""",
"""EfficientFormerForImageClassificationWithTeacher""",
"""EfficientFormerModel""",
"""EfficientFormerPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[str] = [
"""TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFEfficientFormerForImageClassification""",
"""TFEfficientFormerForImageClassificationWithTeacher""",
"""TFEfficientFormerModel""",
"""TFEfficientFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670
|
from manim import *
class _UpperCamelCase ( _A ):
'''simple docstring'''
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Dict = Rectangle(height=0.5 , width=0.5 )
UpperCamelCase_: Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCamelCase_: Tuple = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Tuple = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Union[str, Any] = VGroup(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Optional[Any] = Text("""CPU""" , font_size=24 )
UpperCamelCase_: int = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(snake_case_ )
UpperCamelCase_: Optional[int] = [mem.copy() for i in range(1 )]
UpperCamelCase_: Dict = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Optional[int] = Text("""GPU""" , font_size=24 )
UpperCamelCase_: Optional[int] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
gpu.align_to(snake_case_ , snake_case_ )
gpu.set_x(gpu.get_x() - 1 )
self.add(snake_case_ )
UpperCamelCase_: Dict = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Any = Text("""Model""" , font_size=24 )
UpperCamelCase_: Optional[Any] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
model.move_to([3, -1.0, 0] )
self.play(
Create(snake_case_ , run_time=1 ) , Create(snake_case_ , run_time=1 ) , Create(snake_case_ , run_time=1 ) , )
UpperCamelCase_: List[Any] = MarkupText(
f'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' , font_size=24 , )
UpperCamelCase_: Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCamelCase_: Union[str, Any] = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(snake_case_ , run_time=2.5 ) , Write(snake_case_ ) , Write(snake_case_ ) )
self.add(snake_case_ )
UpperCamelCase_: Union[str, Any] = []
UpperCamelCase_: Union[str, Any] = []
UpperCamelCase_: Tuple = []
for i, rect in enumerate(snake_case_ ):
UpperCamelCase_: Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(snake_case_ , opacity=0.7 )
cpu_target.move_to(snake_case_ )
cpu_target.generate_target()
UpperCamelCase_: int = 0.46 / 4
UpperCamelCase_: Optional[int] = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=snake_case_ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=snake_case_ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=snake_case_ , buff=0.0 )
cpu_targs.append(snake_case_ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(snake_case_ ) )
second_animations.append(MoveToTarget(snake_case_ , run_time=1.5 ) )
self.play(*snake_case_ )
self.play(*snake_case_ )
self.wait()
| 670
| 1
|
'''simple docstring'''
from __future__ import annotations
A_ : Tuple = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
class __snake_case :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Tuple = graph
# mapping node to its parent in resulting breadth first tree
snake_case__ : dict[str, str | None] = {}
snake_case__ : Dict = source_vertex
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = {self.source_vertex}
snake_case__ : int = None
snake_case__ : Any = [self.source_vertex] # first in first out queue
while queue:
snake_case__ : List[Any] = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = vertex
queue.append(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
if target_vertex == self.source_vertex:
return self.source_vertex
snake_case__ : Union[str, Any] = self.parent.get(__SCREAMING_SNAKE_CASE )
if target_vertex_parent is None:
snake_case__ : Optional[Any] = (
f"No path from vertex: {self.source_vertex} to vertex: {target_vertex}"
)
raise ValueError(__SCREAMING_SNAKE_CASE )
return self.shortest_path(__SCREAMING_SNAKE_CASE ) + f"->{target_vertex}"
if __name__ == "__main__":
A_ : Optional[int] = Graph(graph, "G")
g.breath_first_search()
print(g.shortest_path("D"))
print(g.shortest_path("G"))
print(g.shortest_path("Foo"))
| 38
|
'''simple docstring'''
import enum
import shutil
import sys
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = shutil.get_terminal_size()
__SCREAMING_SNAKE_CASE = {'UP': 'A', 'DOWN': 'B', 'RIGHT': 'C', 'LEFT': 'D'}
class lowerCAmelCase__ ( enum.Enum ):
"""simple docstring"""
__UpperCamelCase = 0
__UpperCamelCase = 1
def __a ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Dict="" ):
sys.stdout.write(str(lowerCAmelCase__ ) + end )
sys.stdout.flush()
def __a ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : int , lowerCAmelCase__ : int="" ):
forceWrite(F'\u001b[{color}m{content}\u001b[0m' , lowerCAmelCase__ )
def __a ( ):
forceWrite('''\r''' )
def __a ( lowerCAmelCase__ : int , lowerCAmelCase__ : str ):
forceWrite(F'\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}' )
def __a ( ):
forceWrite(''' ''' * TERMINAL_WIDTH )
reset_cursor()
def __a ( ):
reset_cursor()
forceWrite('''-''' * TERMINAL_WIDTH )
| 688
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
lowerCamelCase__ : int = {
"Helsinki-NLP/opus-mt-en-de": "https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json",
# See all Marian models at https://huggingface.co/models?filter=marian
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """marian"""
UpperCamelCase = ["""past_key_values"""]
UpperCamelCase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self :Dict , lowerCamelCase_ :Optional[int]=5_81_01 , lowerCamelCase_ :List[str]=None , lowerCamelCase_ :Optional[Any]=10_24 , lowerCamelCase_ :Optional[Any]=12 , lowerCamelCase_ :int=40_96 , lowerCamelCase_ :Dict=16 , lowerCamelCase_ :Optional[Any]=12 , lowerCamelCase_ :str=40_96 , lowerCamelCase_ :str=16 , lowerCamelCase_ :List[Any]=0.0 , lowerCamelCase_ :int=0.0 , lowerCamelCase_ :Union[str, Any]=True , lowerCamelCase_ :Any=True , lowerCamelCase_ :Union[str, Any]="gelu" , lowerCamelCase_ :Union[str, Any]=10_24 , lowerCamelCase_ :Optional[int]=0.1 , lowerCamelCase_ :Optional[Any]=0.0 , lowerCamelCase_ :Any=0.0 , lowerCamelCase_ :Any=0.0_2 , lowerCamelCase_ :List[str]=5_81_00 , lowerCamelCase_ :Union[str, Any]=False , lowerCamelCase_ :int=5_81_00 , lowerCamelCase_ :Any=0 , lowerCamelCase_ :List[str]=0 , lowerCamelCase_ :Tuple=True , **lowerCamelCase_ :Dict , ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE : Dict = decoder_vocab_size or vocab_size
SCREAMING_SNAKE_CASE : Any = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = d_model
SCREAMING_SNAKE_CASE : Union[str, Any] = encoder_ffn_dim
SCREAMING_SNAKE_CASE : List[Any] = encoder_layers
SCREAMING_SNAKE_CASE : Dict = encoder_attention_heads
SCREAMING_SNAKE_CASE : int = decoder_ffn_dim
SCREAMING_SNAKE_CASE : List[Any] = decoder_layers
SCREAMING_SNAKE_CASE : Optional[Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE : Dict = dropout
SCREAMING_SNAKE_CASE : str = attention_dropout
SCREAMING_SNAKE_CASE : Any = activation_dropout
SCREAMING_SNAKE_CASE : int = activation_function
SCREAMING_SNAKE_CASE : Any = init_std
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_layerdrop
SCREAMING_SNAKE_CASE : Optional[int] = decoder_layerdrop
SCREAMING_SNAKE_CASE : List[Any] = use_cache
SCREAMING_SNAKE_CASE : List[Any] = encoder_layers
SCREAMING_SNAKE_CASE : int = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE : List[str] = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , forced_eos_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def __lowerCAmelCase ( self :Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : List[str] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE : List[str] = {0: '''batch'''}
SCREAMING_SNAKE_CASE : Any = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
SCREAMING_SNAKE_CASE : List[str] = {0: '''batch''', 1: '''decoder_sequence'''}
SCREAMING_SNAKE_CASE : Optional[int] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase_ , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
SCREAMING_SNAKE_CASE : Optional[Any] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = self.num_layers
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Dict = {0: '''batch''', 2: '''past_sequence + sequence'''}
SCREAMING_SNAKE_CASE : List[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
SCREAMING_SNAKE_CASE : int = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def __lowerCAmelCase ( self :List[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Union[str, Any] = super().outputs
else:
SCREAMING_SNAKE_CASE : Dict = super(lowerCamelCase_ , self ).outputs
if self.use_past:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = self.num_layers
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : List[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
SCREAMING_SNAKE_CASE : Optional[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :Any , lowerCamelCase_ :int = -1 , lowerCamelCase_ :List[Any] = -1 , lowerCamelCase_ :Union[str, Any] = False , lowerCamelCase_ :Any = None , ) -> Mapping[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self._generate_dummy_inputs_for_encoder_and_decoder(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Generate decoder inputs
SCREAMING_SNAKE_CASE : int = seq_length if not self.use_past else 1
SCREAMING_SNAKE_CASE : Optional[int] = self._generate_dummy_inputs_for_encoder_and_decoder(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
SCREAMING_SNAKE_CASE : Optional[Any] = dict(**lowerCamelCase_ , **lowerCamelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = common_inputs['''input_ids'''].shape
SCREAMING_SNAKE_CASE : Optional[Any] = common_inputs['''decoder_input_ids'''].shape[1]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = self.num_attention_heads
SCREAMING_SNAKE_CASE : int = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE : Optional[Any] = decoder_seq_length + 3
SCREAMING_SNAKE_CASE : Tuple = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
SCREAMING_SNAKE_CASE : List[Any] = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(lowerCamelCase_ , lowerCamelCase_ )] , dim=1 )
SCREAMING_SNAKE_CASE : Optional[Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = self.num_layers
SCREAMING_SNAKE_CASE : Optional[int] = min(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = max(lowerCamelCase_ , lowerCamelCase_ ) - min_num_layers
SCREAMING_SNAKE_CASE : List[str] = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(lowerCamelCase_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCamelCase_ ),
torch.zeros(lowerCamelCase_ ),
torch.zeros(lowerCamelCase_ ),
torch.zeros(lowerCamelCase_ ),
) )
# TODO: test this.
SCREAMING_SNAKE_CASE : Tuple = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(lowerCamelCase_ , lowerCamelCase_ ):
common_inputs["past_key_values"].append((torch.zeros(lowerCamelCase_ ), torch.zeros(lowerCamelCase_ )) )
return common_inputs
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :Any , lowerCamelCase_ :Union[str, Any] = -1 , lowerCamelCase_ :Union[str, Any] = -1 , lowerCamelCase_ :List[str] = False , lowerCamelCase_ :Any = None , ) -> Mapping[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self._generate_dummy_inputs_for_encoder_and_decoder(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE : Dict = seqlen + 2
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = self.num_layers
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = self.num_attention_heads
SCREAMING_SNAKE_CASE : Optional[Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE : List[Any] = common_inputs['''attention_mask'''].dtype
SCREAMING_SNAKE_CASE : Any = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(lowerCamelCase_ , lowerCamelCase_ , dtype=lowerCamelCase_ )] , dim=1 )
SCREAMING_SNAKE_CASE : Any = [
(torch.zeros(lowerCamelCase_ ), torch.zeros(lowerCamelCase_ )) for _ in range(lowerCamelCase_ )
]
return common_inputs
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Tuple , lowerCamelCase_ :Dict = -1 , lowerCamelCase_ :Optional[int] = -1 , lowerCamelCase_ :Union[str, Any] = False , lowerCamelCase_ :str = None , ) -> Mapping[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.num_special_tokens_to_add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE : Optional[Any] = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE : Any = dict(tokenizer(lowerCamelCase_ , return_tensors=lowerCamelCase_ ) )
return common_inputs
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Optional[Any] = -1 , lowerCamelCase_ :str = -1 , lowerCamelCase_ :List[str] = False , lowerCamelCase_ :Any = None , ) -> Mapping[str, Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Optional[Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCamelCase_ , batch_size=lowerCamelCase_ , seq_length=lowerCamelCase_ , is_pair=lowerCamelCase_ , framework=lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : str = self._generate_dummy_inputs_for_causal_lm(
lowerCamelCase_ , batch_size=lowerCamelCase_ , seq_length=lowerCamelCase_ , is_pair=lowerCamelCase_ , framework=lowerCamelCase_ )
return common_inputs
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :int , lowerCamelCase_ :int , lowerCamelCase_ :Tuple ) -> str:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Optional[Any] = super()._flatten_past_key_values_(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : Tuple = super(lowerCamelCase_ , self )._flatten_past_key_values_(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
@property
def __lowerCAmelCase ( self :Optional[Any] ) -> float:
'''simple docstring'''
return 1E-4
| 701
|
"""simple docstring"""
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
lowerCamelCase__ : Optional[Any] = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def __A ( a_ : Optional[int] )-> Dict:
'''simple docstring'''
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def __A ( a_ : List[Any] , a_ : Optional[int] , a_ : Optional[int] )-> Dict:
'''simple docstring'''
return max(metric_fn(a_ , a_ ) for gt in ground_truths )
def __A ( a_ : List[Any] , a_ : Union[str, Any] , a_ : str )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Optional[Any] = []
if args.gold_data_mode == "qa":
SCREAMING_SNAKE_CASE : List[Any] = pd.read_csv(a_ , sep='''\t''' , header=a_ )
for answer_list in data[1]:
SCREAMING_SNAKE_CASE : str = ast.literal_eval(a_ )
answers.append(a_ )
else:
SCREAMING_SNAKE_CASE : Any = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Dict = [[reference] for reference in references]
SCREAMING_SNAKE_CASE : Dict = 0
for prediction, ground_truths in zip(a_ , a_ ):
total += 1
em += metric_max_over_ground_truths(a_ , a_ , a_ )
fa += metric_max_over_ground_truths(a_ , a_ , a_ )
SCREAMING_SNAKE_CASE : Any = 100.0 * em / total
SCREAMING_SNAKE_CASE : Optional[int] = 100.0 * fa / total
logger.info(F"F1: {fa:.2f}" )
logger.info(F"EM: {em:.2f}" )
def __A ( a_ : Any , a_ : Any , a_ : List[Any] )-> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = args.k
SCREAMING_SNAKE_CASE : Tuple = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Union[str, Any] = [line.strip() for line in open(a_ , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE : Dict = 0
for hypo, reference in zip(a_ , a_ ):
SCREAMING_SNAKE_CASE : Optional[int] = set(hypo.split('''\t''' )[:k] )
SCREAMING_SNAKE_CASE : List[str] = set(reference.split('''\t''' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
SCREAMING_SNAKE_CASE : Dict = 100.0 * em / total
logger.info(F"Precision@{k}: {em: .2f}" )
def __A ( a_ : Any , a_ : List[str] , a_ : str )-> int:
'''simple docstring'''
def strip_title(a_ : Optional[Any] ):
if title.startswith('''"''' ):
SCREAMING_SNAKE_CASE : Tuple = title[1:]
if title.endswith('''"''' ):
SCREAMING_SNAKE_CASE : Any = title[:-1]
return title
SCREAMING_SNAKE_CASE : Tuple = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
a_ , return_tensors='''pt''' , padding=a_ , truncation=a_ , )['''input_ids'''].to(args.device )
SCREAMING_SNAKE_CASE : Any = rag_model.rag.question_encoder(a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = question_enc_outputs[0]
SCREAMING_SNAKE_CASE : Dict = rag_model.retriever(
a_ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE : Any = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
SCREAMING_SNAKE_CASE : Dict = []
for docs in all_docs:
SCREAMING_SNAKE_CASE : List[Any] = [strip_title(a_ ) for title in docs['''title''']]
provenance_strings.append('''\t'''.join(a_ ) )
return provenance_strings
def __A ( a_ : List[Any] , a_ : int , a_ : str )-> Tuple:
'''simple docstring'''
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
a_ , return_tensors='''pt''' , padding=a_ , truncation=a_ )
SCREAMING_SNAKE_CASE : Dict = inputs_dict.input_ids.to(args.device )
SCREAMING_SNAKE_CASE : Any = inputs_dict.attention_mask.to(args.device )
SCREAMING_SNAKE_CASE : Tuple = rag_model.generate( # rag_model overwrites generate
a_ , attention_mask=a_ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=a_ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
SCREAMING_SNAKE_CASE : Dict = rag_model.retriever.generator_tokenizer.batch_decode(a_ , skip_special_tokens=a_ )
if args.print_predictions:
for q, a in zip(a_ , a_ ):
logger.info('''Q: {} - A: {}'''.format(a_ , a_ ) )
return answers
def __A ( )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=a_ , help=(
'''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'''
''' model_name_or_path'''
) , )
parser.add_argument(
'''--index_name''' , default=a_ , choices=['''exact''', '''compressed''', '''legacy'''] , type=a_ , help='''RAG model retriever type''' , )
parser.add_argument(
'''--index_path''' , default=a_ , type=a_ , help='''Path to the retrieval index''' , )
parser.add_argument('''--n_docs''' , default=5 , type=a_ , help='''Number of retrieved docs''' )
parser.add_argument(
'''--model_name_or_path''' , default=a_ , type=a_ , required=a_ , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=a_ , help=(
'''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'''
''' precision@k.'''
) , )
parser.add_argument('''--k''' , default=1 , type=a_ , help='''k for the precision@k calculation''' )
parser.add_argument(
'''--evaluation_set''' , default=a_ , type=a_ , required=a_ , help='''Path to a file containing evaluation samples''' , )
parser.add_argument(
'''--gold_data_path''' , default=a_ , type=a_ , required=a_ , help='''Path to a tab-separated file with gold samples''' , )
parser.add_argument(
'''--gold_data_mode''' , default='''qa''' , type=a_ , choices=['''qa''', '''ans'''] , help=(
'''Format of the gold data file'''
'''qa - a single line in the following format: question [tab] answer_list'''
'''ans - a single line of the gold file contains the expected answer string'''
) , )
parser.add_argument(
'''--predictions_path''' , type=a_ , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , )
parser.add_argument(
'''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , )
parser.add_argument(
'''--eval_batch_size''' , default=8 , type=a_ , help='''Batch size per GPU/CPU for evaluation.''' , )
parser.add_argument(
'''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , )
parser.add_argument(
'''--num_beams''' , default=4 , type=a_ , help='''Number of beams to be used when generating answers''' , )
parser.add_argument('''--min_length''' , default=1 , type=a_ , help='''Min length of the generated answers''' )
parser.add_argument('''--max_length''' , default=50 , type=a_ , help='''Max length of the generated answers''' )
parser.add_argument(
'''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , )
parser.add_argument(
'''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , )
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
SCREAMING_SNAKE_CASE : Dict = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
return args
def __A ( a_ : Optional[Any] )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = {}
if args.model_type is None:
SCREAMING_SNAKE_CASE : List[str] = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE : List[str] = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration
SCREAMING_SNAKE_CASE : Optional[Any] = args.n_docs
if args.index_name is not None:
SCREAMING_SNAKE_CASE : Tuple = args.index_name
if args.index_path is not None:
SCREAMING_SNAKE_CASE : List[Any] = args.index_path
else:
SCREAMING_SNAKE_CASE : str = BartForConditionalGeneration
SCREAMING_SNAKE_CASE : Optional[int] = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('''Evaluate the following checkpoints: %s''' , a_ )
SCREAMING_SNAKE_CASE : int = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k
SCREAMING_SNAKE_CASE : str = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) )
score_fn(a_ , args.predictions_path , args.gold_data_path )
continue
logger.info('''***** Running evaluation for {} *****'''.format(a_ ) )
logger.info(''' Batch size = %d''' , args.eval_batch_size )
logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) )
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE : Dict = RagRetriever.from_pretrained(a_ , **a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class.from_pretrained(a_ , retriever=a_ , **a_ )
model.retriever.init_retrieval()
else:
SCREAMING_SNAKE_CASE : str = model_class.from_pretrained(a_ , **a_ )
model.to(args.device )
with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file:
SCREAMING_SNAKE_CASE : Dict = []
for line in tqdm(a_ ):
questions.append(line.strip() )
if len(a_ ) == args.eval_batch_size:
SCREAMING_SNAKE_CASE : str = evaluate_batch_fn(a_ , a_ , a_ )
preds_file.write('''\n'''.join(a_ ) + '''\n''' )
preds_file.flush()
SCREAMING_SNAKE_CASE : Union[str, Any] = []
if len(a_ ) > 0:
SCREAMING_SNAKE_CASE : str = evaluate_batch_fn(a_ , a_ , a_ )
preds_file.write('''\n'''.join(a_ ) )
preds_file.flush()
score_fn(a_ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
lowerCamelCase__ : List[str] = get_args()
main(args)
| 18
| 0
|
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase , lowerCAmelCase=3 , lowerCAmelCase=32 , lowerCAmelCase=3 , lowerCAmelCase=10 , lowerCAmelCase=[10, 20, 30, 40] , lowerCAmelCase=[1, 1, 2, 1] , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase="relu" , lowerCAmelCase=3 , lowerCAmelCase=None , ) -> List[str]:
'''simple docstring'''
_lowercase =parent
_lowercase =batch_size
_lowercase =image_size
_lowercase =num_channels
_lowercase =embeddings_size
_lowercase =hidden_sizes
_lowercase =depths
_lowercase =is_training
_lowercase =use_labels
_lowercase =hidden_act
_lowercase =num_labels
_lowercase =scope
_lowercase =len(_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> List[str]:
'''simple docstring'''
_lowercase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase =self.get_config()
return config, pixel_values
def A__ ( self ) -> List[Any]:
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def A__ ( self , lowerCAmelCase , lowerCAmelCase ) -> Any:
'''simple docstring'''
_lowercase =FlaxRegNetModel(config=_SCREAMING_SNAKE_CASE )
_lowercase =model(_SCREAMING_SNAKE_CASE )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A__ ( self , lowerCAmelCase , lowerCAmelCase ) -> Tuple:
'''simple docstring'''
_lowercase =self.num_labels
_lowercase =FlaxRegNetForImageClassification(config=_SCREAMING_SNAKE_CASE )
_lowercase =model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self ) -> Tuple:
'''simple docstring'''
_lowercase =self.prepare_config_and_inputs()
_lowercase =config_and_inputs
_lowercase ={'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_a = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
_a = False
_a = False
_a = False
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
_lowercase =FlaxRegNetModelTester(self )
_lowercase =ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> str:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self ) -> Any:
'''simple docstring'''
return
def A__ ( self ) -> List[Any]:
'''simple docstring'''
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def A__ ( self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def A__ ( self ) -> str:
'''simple docstring'''
pass
def A__ ( self ) -> Tuple:
'''simple docstring'''
_lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase =model_class(_SCREAMING_SNAKE_CASE )
_lowercase =inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase =[*signature.parameters.keys()]
_lowercase =['pixel_values']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
def check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
_lowercase =model_class(_SCREAMING_SNAKE_CASE )
_lowercase =model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
_lowercase =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowercase =self.model_tester.num_stages
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 )
_lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase =True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowercase =True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
_lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowercase =self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_lowercase =model_class(_SCREAMING_SNAKE_CASE )
@jax.jit
def model_jitted(lowerCAmelCase , **lowerCAmelCase ):
return model(pixel_values=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
with self.subTest('JIT Enabled' ):
_lowercase =model_jitted(**_SCREAMING_SNAKE_CASE ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
_lowercase =model_jitted(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) )
for jitted_output, output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertEqual(jitted_output.shape , output.shape )
def a ( ) -> Any:
"""simple docstring"""
_lowercase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def A__ ( self ) -> int:
'''simple docstring'''
return AutoImageProcessor.from_pretrained('facebook/regnet-y-040' ) if is_vision_available() else None
@slow
def A__ ( self ) -> Any:
'''simple docstring'''
_lowercase =FlaxRegNetForImageClassification.from_pretrained('facebook/regnet-y-040' )
_lowercase =self.default_image_processor
_lowercase =prepare_img()
_lowercase =image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='np' )
_lowercase =model(**_SCREAMING_SNAKE_CASE )
# verify the logits
_lowercase =(1, 1_000)
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
_lowercase =jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 291
|
from __future__ import annotations
lowerCAmelCase : List[Any] = list[list[int]]
# assigning initial values to the grid
lowerCAmelCase : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
lowerCAmelCase : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def A_ ( a , a , a , a ):
"""simple docstring"""
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def A_ ( a ):
"""simple docstring"""
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def A_ ( a ):
"""simple docstring"""
if location := find_empty_location(a ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 1_0 ):
if is_safe(a , a , a , a ):
SCREAMING_SNAKE_CASE_ : List[str] = digit
if sudoku(a ) is not None:
return grid
SCREAMING_SNAKE_CASE_ : List[Any] = 0
return None
def A_ ( a ):
"""simple docstring"""
for row in grid:
for cell in row:
print(a , end=' ' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('\nExample grid:\n' + '=' * 20)
print_solution(example_grid)
print('\nExample grid solution:')
lowerCAmelCase : Any = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('Cannot find a solution.')
| 511
| 0
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( a__ , unittest.TestCase ):
_UpperCAmelCase = DanceDiffusionPipeline
_UpperCAmelCase = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
_UpperCAmelCase = PipelineTesterMixin.required_optional_params - {
"callback",
"latents",
"callback_steps",
"output_type",
"num_images_per_prompt",
}
_UpperCAmelCase = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
_UpperCAmelCase = False
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = UNetaDModel(
block_out_channels=(32, 32, 64) ,extra_in_channels=16 ,sample_size=512 ,sample_rate=1_6000 ,in_channels=2 ,out_channels=2 ,flip_sin_to_cos=_A ,use_timestep_embedding=_A ,time_embedding_type='fourier' ,mid_block_type='UNetMidBlock1D' ,down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') ,up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') ,)
_lowerCAmelCase : int = IPNDMScheduler()
_lowerCAmelCase : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def __lowerCamelCase ( self ,_A ,_A=0 ):
'''simple docstring'''
if str(_A ).startswith('mps' ):
_lowerCAmelCase : str = torch.manual_seed(_A )
else:
_lowerCAmelCase : Optional[Any] = torch.Generator(device=_A ).manual_seed(_A )
_lowerCAmelCase : int = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : Optional[Any] = DanceDiffusionPipeline(**_A )
_lowerCAmelCase : int = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(_A )
_lowerCAmelCase : List[str] = pipe(**_A )
_lowerCAmelCase : List[Any] = output.audios
_lowerCAmelCase : List[str] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
_lowerCAmelCase : Optional[Any] = np.array([-0.7_2_6_5, 1.0_0_0_0, -0.8_3_8_8, 0.1_1_7_5, 0.9_4_9_8, -1.0_0_0_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def __lowerCamelCase ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = torch_device
_lowerCAmelCase : int = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
_lowerCAmelCase : int = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Optional[int] = torch.manual_seed(0 )
_lowerCAmelCase : str = pipe(generator=_A ,num_inference_steps=100 ,audio_length_in_s=4.0_9_6 )
_lowerCAmelCase : str = output.audios
_lowerCAmelCase : List[str] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_lowerCAmelCase : Union[str, Any] = np.array([-0.0_1_9_2, -0.0_2_3_1, -0.0_3_1_8, -0.0_0_5_9, 0.0_0_0_2, -0.0_0_2_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = torch_device
_lowerCAmelCase : Tuple = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' ,torch_dtype=torch.floataa )
_lowerCAmelCase : Optional[int] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Union[str, Any] = torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = pipe(generator=_A ,num_inference_steps=100 ,audio_length_in_s=4.0_9_6 )
_lowerCAmelCase : Union[str, Any] = output.audios
_lowerCAmelCase : int = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_lowerCAmelCase : List[str] = np.array([-0.0_3_6_7, -0.0_4_8_8, -0.0_7_7_1, -0.0_5_2_5, -0.0_4_4_4, -0.0_3_4_1] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 16
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/vocab.txt""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/vocab.txt""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"""
),
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"""
),
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt""",
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"""
),
"""bert-base-multilingual-cased""": (
"""https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-cased""": (
"""https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""bert-base-uncased""": 5_1_2,
"""bert-large-uncased""": 5_1_2,
"""bert-base-cased""": 5_1_2,
"""bert-large-cased""": 5_1_2,
"""bert-base-multilingual-uncased""": 5_1_2,
"""bert-base-multilingual-cased""": 5_1_2,
"""bert-base-chinese""": 5_1_2,
"""bert-base-german-cased""": 5_1_2,
"""bert-large-uncased-whole-word-masking""": 5_1_2,
"""bert-large-cased-whole-word-masking""": 5_1_2,
"""bert-large-uncased-whole-word-masking-finetuned-squad""": 5_1_2,
"""bert-large-cased-whole-word-masking-finetuned-squad""": 5_1_2,
"""bert-base-cased-finetuned-mrpc""": 5_1_2,
"""bert-base-german-dbmdz-cased""": 5_1_2,
"""bert-base-german-dbmdz-uncased""": 5_1_2,
"""TurkuNLP/bert-base-finnish-cased-v1""": 5_1_2,
"""TurkuNLP/bert-base-finnish-uncased-v1""": 5_1_2,
"""wietsedv/bert-base-dutch-cased""": 5_1_2,
}
_lowerCAmelCase = {
"""bert-base-uncased""": {"""do_lower_case""": True},
"""bert-large-uncased""": {"""do_lower_case""": True},
"""bert-base-cased""": {"""do_lower_case""": False},
"""bert-large-cased""": {"""do_lower_case""": False},
"""bert-base-multilingual-uncased""": {"""do_lower_case""": True},
"""bert-base-multilingual-cased""": {"""do_lower_case""": False},
"""bert-base-chinese""": {"""do_lower_case""": False},
"""bert-base-german-cased""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": False},
"""bert-base-cased-finetuned-mrpc""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-cased""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-uncased""": {"""do_lower_case""": True},
"""TurkuNLP/bert-base-finnish-cased-v1""": {"""do_lower_case""": False},
"""TurkuNLP/bert-base-finnish-uncased-v1""": {"""do_lower_case""": True},
"""wietsedv/bert-base-dutch-cased""": {"""do_lower_case""": False},
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = BertTokenizer
def __init__( self ,_A=None ,_A=None ,_A=True ,_A="[UNK]" ,_A="[SEP]" ,_A="[PAD]" ,_A="[CLS]" ,_A="[MASK]" ,_A=True ,_A=None ,**_A ,):
'''simple docstring'''
super().__init__(
_A ,tokenizer_file=_A ,do_lower_case=_A ,unk_token=_A ,sep_token=_A ,pad_token=_A ,cls_token=_A ,mask_token=_A ,tokenize_chinese_chars=_A ,strip_accents=_A ,**_A ,)
_lowerCAmelCase : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' ,_A ) != do_lower_case
or normalizer_state.get('strip_accents' ,_A ) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,_A ) != tokenize_chinese_chars
):
_lowerCAmelCase : Dict = getattr(_A ,normalizer_state.pop('type' ) )
_lowerCAmelCase : Dict = do_lower_case
_lowerCAmelCase : Optional[int] = strip_accents
_lowerCAmelCase : Union[str, Any] = tokenize_chinese_chars
_lowerCAmelCase : Dict = normalizer_class(**_A )
_lowerCAmelCase : Union[str, Any] = do_lower_case
def __lowerCamelCase ( self ,_A ,_A=None ):
'''simple docstring'''
_lowerCAmelCase : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [self.sep_token_id]
_lowerCAmelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : str = self._tokenizer.model.save(_A ,name=_A )
return tuple(_A )
| 16
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
SCREAMING_SNAKE_CASE__ : List[str] = {
"""configuration_gpt_neo""": ["""GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoConfig""", """GPTNeoOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Any = [
"""GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoForCausalLM""",
"""GPTNeoForQuestionAnswering""",
"""GPTNeoForSequenceClassification""",
"""GPTNeoForTokenClassification""",
"""GPTNeoModel""",
"""GPTNeoPreTrainedModel""",
"""load_tf_weights_in_gpt_neo""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = [
"""FlaxGPTNeoForCausalLM""",
"""FlaxGPTNeoModel""",
"""FlaxGPTNeoPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE__ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 112
|
from sklearn.metrics import mean_squared_error
import datasets
SCREAMING_SNAKE_CASE__ : List[str] = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
SCREAMING_SNAKE_CASE__ : Tuple = """\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = """
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
\"raw_values\" : Returns a full set of errors in case of multioutput input.
\"uniform_average\" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric(\"mse\")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{'mse': 0.6123724356957945}
If you're using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mse': array([0.41666667, 1. ])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"
] , )
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("float" ) ),
"references": datasets.Sequence(datasets.Value("float" ) ),
}
else:
return {
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
}
def _snake_case ( self , snake_case , snake_case , snake_case=None , snake_case="uniform_average" , snake_case=True ) -> Optional[Any]:
"""simple docstring"""
a__ : List[str] = mean_squared_error(
snake_case , snake_case , sample_weight=snake_case , multioutput=snake_case , squared=snake_case )
return {"mse": mse}
| 112
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__: Tuple = {
'configuration_blenderbot': [
'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotConfig',
'BlenderbotOnnxConfig',
],
'tokenization_blenderbot': ['BlenderbotTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__: Optional[Any] = ['BlenderbotTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__: Any = [
'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotForCausalLM',
'BlenderbotForConditionalGeneration',
'BlenderbotModel',
'BlenderbotPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__: Union[str, Any] = [
'TFBlenderbotForConditionalGeneration',
'TFBlenderbotModel',
'TFBlenderbotPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__: List[str] = [
'FlaxBlenderbotForConditionalGeneration',
'FlaxBlenderbotModel',
'FlaxBlenderbotPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
a__: List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 212
|
from __future__ import annotations
import math
from collections.abc import Callable
def UpperCamelCase__( UpperCamelCase__ : Callable[[int | float], int | float] , UpperCamelCase__ : int | float , UpperCamelCase__ : int | float , UpperCamelCase__ : int = 1_00 , )->float:
A__ = x_start
A__ = fnc(UpperCamelCase__ )
A__ = 0.0
for _ in range(UpperCamelCase__ ):
# Approximates curve as a sequence of linear lines and sums their length
A__ = (x_end - x_start) / steps + xa
A__ = fnc(UpperCamelCase__ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
A__ = xa
A__ = fxa
return length
if __name__ == "__main__":
def UpperCamelCase__( UpperCamelCase__ : Dict )->List[Any]:
return math.sin(10 * x )
print('f(x) = sin(10 * x)')
print('The length of the curve from x = -10 to x = 10 is:')
a__: List[str] = 10
while i <= 100_000:
print(F"With {i} steps: {line_length(f, -10, 10, i)}")
i *= 10
| 212
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.