code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__A : Optional[Any] = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__A : Tuple = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__A : Optional[int] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_000))
def UpperCAmelCase ( lowerCamelCase_ :str , lowerCamelCase_ :str ):
'''simple docstring'''
snake_case_ : int = len([g for position, g in enumerate(lowerCamelCase_ ) if g == main_target[position]] )
return (item, float(lowerCamelCase_ ))
def UpperCAmelCase ( lowerCamelCase_ :str , lowerCamelCase_ :str ):
'''simple docstring'''
snake_case_ : Dict = random.randint(0 , len(lowerCamelCase_ ) - 1 )
snake_case_ : Optional[Any] = parent_a[:random_slice] + parent_a[random_slice:]
snake_case_ : Union[str, Any] = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def UpperCAmelCase ( lowerCamelCase_ :str , lowerCamelCase_ :list[str] ):
'''simple docstring'''
snake_case_ : int = list(lowerCamelCase_ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
snake_case_ : Union[str, Any] = random.choice(lowerCamelCase_ )
return "".join(lowerCamelCase_ )
def UpperCAmelCase ( lowerCamelCase_ :tuple[str, float] , lowerCamelCase_ :list[tuple[str, float]] , lowerCamelCase_ :list[str] , ):
'''simple docstring'''
snake_case_ : Optional[int] = []
# Generate more children proportionally to the fitness score.
snake_case_ : List[str] = int(parent_a[1] * 1_00 ) + 1
snake_case_ : Optional[int] = 10 if child_n >= 10 else child_n
for _ in range(lowerCamelCase_ ):
snake_case_ : Dict = population_score[random.randint(0 , lowerCamelCase_ )][0]
snake_case_ , snake_case_ : Dict = crossover(parent_a[0] , lowerCamelCase_ )
# Append new string to the population list.
pop.append(mutate(lowerCamelCase_ , lowerCamelCase_ ) )
pop.append(mutate(lowerCamelCase_ , lowerCamelCase_ ) )
return pop
def UpperCAmelCase ( lowerCamelCase_ :str , lowerCamelCase_ :list[str] , lowerCamelCase_ :bool = True ):
'''simple docstring'''
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
snake_case_ : Union[str, Any] = F'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(lowerCamelCase_ )
# Verify that the target contains no genes besides the ones inside genes variable.
snake_case_ : List[str] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
snake_case_ : List[Any] = F'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(lowerCamelCase_ )
# Generate random starting population.
snake_case_ : Optional[Any] = []
for _ in range(lowerCamelCase_ ):
population.append("""""".join([random.choice(lowerCamelCase_ ) for i in range(len(lowerCamelCase_ ) )] ) )
# Just some logs to know what the algorithms is doing.
snake_case_ , snake_case_ : Optional[int] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(lowerCamelCase_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
snake_case_ : List[str] = [evaluate(lowerCamelCase_ , lowerCamelCase_ ) for item in population]
# Check if there is a matching evolution.
snake_case_ : int = sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : x[1] , reverse=lowerCamelCase_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'''\nGeneration: {generation}'''
F'''\nTotal Population:{total_population}'''
F'''\nBest score: {population_score[0][1]}'''
F'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
snake_case_ : Union[str, Any] = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(lowerCamelCase_ )
# Normalize population score to be between 0 and 1.
snake_case_ : List[str] = [
(item, score / len(lowerCamelCase_ )) for item, score in population_score
]
# This is selection
for i in range(lowerCamelCase_ ):
population.extend(select(population_score[int(lowerCamelCase_ )] , lowerCamelCase_ , lowerCamelCase_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(lowerCamelCase_ ) > N_POPULATION:
break
if __name__ == "__main__":
__A : List[Any] = (
'This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'
)
__A : str = list(
' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'
'nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'
)
__A, __A, __A : Dict = basic(target_str, genes_list)
print(
F'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'
)
| 334
|
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __UpperCamelCase ( lowercase__ ):
lowercase : Tuple = (DEISMultistepScheduler,)
lowercase : List[str] = (('num_inference_steps', 2_5),)
def a__ ( self :Any ,**_UpperCamelCase :str ):
snake_case_ : List[Any] = {
"""num_train_timesteps""": 1_0_0_0,
"""beta_start""": 0.00_01,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""solver_order""": 2,
}
config.update(**_UpperCamelCase )
return config
def a__ ( self :str ,_UpperCamelCase :Optional[Any]=0 ,**_UpperCamelCase :Any ):
snake_case_ : Optional[Any] = dict(self.forward_default_kwargs )
snake_case_ : Optional[Any] = kwargs.pop("""num_inference_steps""" ,_UpperCamelCase )
snake_case_ : Union[str, Any] = self.dummy_sample
snake_case_ : Tuple = 0.1 * sample
snake_case_ : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
snake_case_ : Optional[Any] = self.get_scheduler_config(**_UpperCamelCase )
snake_case_ : Union[str, Any] = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(_UpperCamelCase )
# copy over dummy past residuals
snake_case_ : Optional[int] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCamelCase )
snake_case_ : Optional[int] = scheduler_class.from_pretrained(_UpperCamelCase )
new_scheduler.set_timesteps(_UpperCamelCase )
# copy over dummy past residuals
snake_case_ : Tuple = dummy_past_residuals[: new_scheduler.config.solver_order]
snake_case_ , snake_case_ : int = sample, sample
for t in range(_UpperCamelCase ,time_step + scheduler.config.solver_order + 1 ):
snake_case_ : str = scheduler.step(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,**_UpperCamelCase ).prev_sample
snake_case_ : Optional[int] = new_scheduler.step(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,**_UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def a__ ( self :str ):
pass
def a__ ( self :Any ,_UpperCamelCase :List[str]=0 ,**_UpperCamelCase :List[Any] ):
snake_case_ : Optional[Any] = dict(self.forward_default_kwargs )
snake_case_ : Union[str, Any] = kwargs.pop("""num_inference_steps""" ,_UpperCamelCase )
snake_case_ : str = self.dummy_sample
snake_case_ : Any = 0.1 * sample
snake_case_ : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
snake_case_ : Union[str, Any] = self.get_scheduler_config()
snake_case_ : Tuple = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(_UpperCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
snake_case_ : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCamelCase )
snake_case_ : Dict = scheduler_class.from_pretrained(_UpperCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_UpperCamelCase )
# copy over dummy past residual (must be after setting timesteps)
snake_case_ : str = dummy_past_residuals[: new_scheduler.config.solver_order]
snake_case_ : List[Any] = scheduler.step(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,**_UpperCamelCase ).prev_sample
snake_case_ : str = new_scheduler.step(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,**_UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def a__ ( self :List[Any] ,_UpperCamelCase :str=None ,**_UpperCamelCase :Dict ):
if scheduler is None:
snake_case_ : Union[str, Any] = self.scheduler_classes[0]
snake_case_ : List[str] = self.get_scheduler_config(**_UpperCamelCase )
snake_case_ : Dict = scheduler_class(**_UpperCamelCase )
snake_case_ : int = self.scheduler_classes[0]
snake_case_ : Optional[int] = self.get_scheduler_config(**_UpperCamelCase )
snake_case_ : List[str] = scheduler_class(**_UpperCamelCase )
snake_case_ : str = 1_0
snake_case_ : int = self.dummy_model()
snake_case_ : Any = self.dummy_sample_deter
scheduler.set_timesteps(_UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
snake_case_ : Union[str, Any] = model(_UpperCamelCase ,_UpperCamelCase )
snake_case_ : Union[str, Any] = scheduler.step(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ).prev_sample
return sample
def a__ ( self :Optional[int] ):
snake_case_ : str = dict(self.forward_default_kwargs )
snake_case_ : Dict = kwargs.pop("""num_inference_steps""" ,_UpperCamelCase )
for scheduler_class in self.scheduler_classes:
snake_case_ : List[str] = self.get_scheduler_config()
snake_case_ : int = scheduler_class(**_UpperCamelCase )
snake_case_ : str = self.dummy_sample
snake_case_ : str = 0.1 * sample
if num_inference_steps is not None and hasattr(_UpperCamelCase ,"""set_timesteps""" ):
scheduler.set_timesteps(_UpperCamelCase )
elif num_inference_steps is not None and not hasattr(_UpperCamelCase ,"""set_timesteps""" ):
snake_case_ : List[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
snake_case_ : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.10]
snake_case_ : Any = dummy_past_residuals[: scheduler.config.solver_order]
snake_case_ : List[str] = scheduler.timesteps[5]
snake_case_ : str = scheduler.timesteps[6]
snake_case_ : Optional[Any] = scheduler.step(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,**_UpperCamelCase ).prev_sample
snake_case_ : Dict = scheduler.step(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,**_UpperCamelCase ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def a__ ( self :Any ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
snake_case_ : Tuple = DEISMultistepScheduler(**self.get_scheduler_config() )
snake_case_ : List[str] = self.full_loop(scheduler=_UpperCamelCase )
snake_case_ : Tuple = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_mean.item() - 0.2_39_16 ) < 1E-3
snake_case_ : Tuple = DPMSolverSinglestepScheduler.from_config(scheduler.config )
snake_case_ : Any = DPMSolverMultistepScheduler.from_config(scheduler.config )
snake_case_ : str = UniPCMultistepScheduler.from_config(scheduler.config )
snake_case_ : Optional[int] = DEISMultistepScheduler.from_config(scheduler.config )
snake_case_ : Dict = self.full_loop(scheduler=_UpperCamelCase )
snake_case_ : Optional[int] = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_mean.item() - 0.2_39_16 ) < 1E-3
def a__ ( self :Tuple ):
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=_UpperCamelCase )
def a__ ( self :int ):
self.check_over_configs(thresholding=_UpperCamelCase )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_UpperCamelCase ,prediction_type=_UpperCamelCase ,sample_max_value=_UpperCamelCase ,algorithm_type="""deis""" ,solver_order=_UpperCamelCase ,solver_type=_UpperCamelCase ,)
def a__ ( self :List[str] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCamelCase )
def a__ ( self :Dict ):
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_UpperCamelCase ,solver_type=_UpperCamelCase ,prediction_type=_UpperCamelCase ,algorithm_type=_UpperCamelCase ,)
snake_case_ : Tuple = self.full_loop(
solver_order=_UpperCamelCase ,solver_type=_UpperCamelCase ,prediction_type=_UpperCamelCase ,algorithm_type=_UpperCamelCase ,)
assert not torch.isnan(_UpperCamelCase ).any(), "Samples have nan numbers"
def a__ ( self :int ):
self.check_over_configs(lower_order_final=_UpperCamelCase )
self.check_over_configs(lower_order_final=_UpperCamelCase )
def a__ ( self :Optional[int] ):
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=_UpperCamelCase ,time_step=0 )
def a__ ( self :Optional[Any] ):
snake_case_ : Union[str, Any] = self.full_loop()
snake_case_ : Optional[int] = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_mean.item() - 0.2_39_16 ) < 1E-3
def a__ ( self :List[str] ):
snake_case_ : int = self.full_loop(prediction_type="""v_prediction""" )
snake_case_ : Any = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_mean.item() - 0.0_91 ) < 1E-3
def a__ ( self :Union[str, Any] ):
snake_case_ : List[str] = self.scheduler_classes[0]
snake_case_ : Optional[Any] = self.get_scheduler_config(thresholding=_UpperCamelCase ,dynamic_thresholding_ratio=0 )
snake_case_ : Tuple = scheduler_class(**_UpperCamelCase )
snake_case_ : str = 1_0
snake_case_ : str = self.dummy_model()
snake_case_ : Optional[Any] = self.dummy_sample_deter.half()
scheduler.set_timesteps(_UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
snake_case_ : Union[str, Any] = model(_UpperCamelCase ,_UpperCamelCase )
snake_case_ : Optional[int] = scheduler.step(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ).prev_sample
assert sample.dtype == torch.floataa
| 334
| 1
|
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE : List[str] = get_tests_dir("fixtures/test_sentencepiece_bpe_char.model")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( __a , unittest.TestCase ):
a__ :Dict = SpeechTaTokenizer
a__ :str = False
a__ :int = True
def A_ (self ) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase_ : Any = SpeechTaTokenizer(__UpperCamelCase )
UpperCamelCase_ : Optional[int] = AddedToken("""<mask>""" , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase )
UpperCamelCase_ : Optional[int] = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
tokenizer.save_pretrained(self.tmpdirname )
def A_ (self , __UpperCamelCase ) -> Union[str, Any]:
UpperCamelCase_ : List[str] = """this is a test"""
UpperCamelCase_ : List[str] = """this is a test"""
return input_text, output_text
def A_ (self , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=20 , __UpperCamelCase=5 ) -> Optional[Any]:
UpperCamelCase_,UpperCamelCase_ : int = self.get_input_output_texts(__UpperCamelCase )
UpperCamelCase_ : Optional[Any] = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
UpperCamelCase_ : Optional[int] = tokenizer.decode(__UpperCamelCase , clean_up_tokenization_spaces=__UpperCamelCase )
return text, ids
def A_ (self ) -> Union[str, Any]:
UpperCamelCase_ : Union[str, Any] = """<pad>"""
UpperCamelCase_ : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) , __UpperCamelCase )
def A_ (self ) -> Optional[int]:
UpperCamelCase_ : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-4] , """œ""" )
self.assertEqual(vocab_keys[-2] , """<mask>""" )
self.assertEqual(vocab_keys[-1] , """<ctc_blank>""" )
self.assertEqual(len(__UpperCamelCase ) , 81 )
def A_ (self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def A_ (self ) -> str:
UpperCamelCase_ : int = self.get_tokenizers(do_lower_case=__UpperCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
UpperCamelCase_ : str = tokenizer.vocab_size
UpperCamelCase_ : List[Any] = len(__UpperCamelCase )
self.assertNotEqual(__UpperCamelCase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
UpperCamelCase_ : Optional[Any] = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""]
UpperCamelCase_ : List[str] = tokenizer.add_tokens(__UpperCamelCase )
UpperCamelCase_ : Dict = tokenizer.vocab_size
UpperCamelCase_ : Union[str, Any] = len(__UpperCamelCase )
self.assertNotEqual(__UpperCamelCase , 0 )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
self.assertEqual(__UpperCamelCase , len(__UpperCamelCase ) )
self.assertEqual(__UpperCamelCase , all_size + len(__UpperCamelCase ) )
UpperCamelCase_ : Optional[Any] = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=__UpperCamelCase )
self.assertGreaterEqual(len(__UpperCamelCase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
UpperCamelCase_ : str = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""}
UpperCamelCase_ : Optional[Any] = tokenizer.add_special_tokens(__UpperCamelCase )
UpperCamelCase_ : Union[str, Any] = tokenizer.vocab_size
UpperCamelCase_ : Dict = len(__UpperCamelCase )
self.assertNotEqual(__UpperCamelCase , 0 )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
self.assertEqual(__UpperCamelCase , len(__UpperCamelCase ) )
self.assertEqual(__UpperCamelCase , all_size_a + len(__UpperCamelCase ) )
UpperCamelCase_ : Union[str, Any] = tokenizer.encode(
""">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=__UpperCamelCase )
self.assertGreaterEqual(len(__UpperCamelCase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def A_ (self ) -> List[str]:
pass
def A_ (self ) -> int:
pass
def A_ (self ) -> Dict:
UpperCamelCase_ : str = self.get_tokenizer()
UpperCamelCase_ : List[str] = tokenizer.tokenize("""This is a test""" )
# fmt: off
self.assertListEqual(__UpperCamelCase , [SPIECE_UNDERLINE, """T""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """a""", SPIECE_UNDERLINE, """t""", """e""", """s""", """t"""] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
UpperCamelCase_ : Optional[int] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__UpperCamelCase , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """92000""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] )
UpperCamelCase_ : int = tokenizer.convert_tokens_to_ids(__UpperCamelCase )
# fmt: off
self.assertListEqual(__UpperCamelCase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
UpperCamelCase_ : Tuple = tokenizer.convert_ids_to_tokens(__UpperCamelCase )
self.assertListEqual(
__UpperCamelCase , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """<unk>""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] )
@slow
def A_ (self ) -> Any:
# Use custom sequence because this tokenizer does not handle numbers.
UpperCamelCase_ : int = [
"""Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides """
"""general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural """
"""Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained """
"""models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.""",
"""BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly """
"""conditioning on both left and right context in all layers.""",
"""The quick brown fox jumps over the lazy dog.""",
]
# fmt: off
UpperCamelCase_ : str = {
"""input_ids""": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"""attention_mask""": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCamelCase , model_name="""microsoft/speecht5_asr""" , revision="""c5ef64c71905caeccde0e4462ef3f9077224c524""" , sequences=__UpperCamelCase , )
| 138
|
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : dict ):
UpperCamelCase_ : str = set()
# edges = list of graph's edges
UpperCamelCase_ : Any = get_edges(_SCREAMING_SNAKE_CASE )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
UpperCamelCase_,UpperCamelCase_ : Optional[Any] = edges.pop()
chosen_vertices.add(_SCREAMING_SNAKE_CASE )
chosen_vertices.add(_SCREAMING_SNAKE_CASE )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(_SCREAMING_SNAKE_CASE )
return chosen_vertices
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : dict ):
UpperCamelCase_ : Dict = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 138
| 1
|
"""simple docstring"""
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
if index == r:
for j in range(UpperCamelCase ):
print(data[j] , end=" " )
print(" " )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
__UpperCAmelCase : Tuple = arr[i]
combination_util(UpperCamelCase , UpperCamelCase , UpperCamelCase , index + 1 , UpperCamelCase , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
"""simple docstring"""
# A temporary array to store all combination one by one
__UpperCAmelCase : Optional[Any] = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(UpperCamelCase , UpperCamelCase , UpperCamelCase , 0 , UpperCamelCase , 0 )
if __name__ == "__main__":
# Driver code to check the function above
A = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 77
|
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def __UpperCamelCase ( A ):
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
__magic_name__ ='''
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
'''
class _A ( __UpperCamelCase ):
@staticmethod
def _a (SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = parser.add_parser(
'''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , )
train_parser.add_argument('''--model_type''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='''Model\'s type.''' )
train_parser.add_argument(
'''--tf_checkpoint''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='''TensorFlow checkpoint path or folder.''' )
train_parser.add_argument(
'''--pytorch_dump_output''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='''Path to the PyTorch saved model output.''' )
train_parser.add_argument('''--config''' , type=SCREAMING_SNAKE_CASE_ , default='''''' , help='''Configuration file path or folder.''' )
train_parser.add_argument(
'''--finetuning_task_name''' , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , )
train_parser.set_defaults(func=SCREAMING_SNAKE_CASE_ )
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = logging.get_logger('''transformers-cli/converting''' )
self._logger.info(F"Loading model {model_type}" )
UpperCamelCase__ = model_type
UpperCamelCase__ = tf_checkpoint
UpperCamelCase__ = pytorch_dump_output
UpperCamelCase__ = config
UpperCamelCase__ = finetuning_task_name
def _a (self ) -> Tuple:
'''simple docstring'''
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE_ )
if "ckpt" in self._tf_checkpoint.lower():
UpperCamelCase__ = self._tf_checkpoint
UpperCamelCase__ = ''''''
else:
UpperCamelCase__ = self._tf_checkpoint
UpperCamelCase__ = ''''''
convert_transfo_xl_checkpoint_to_pytorch(
SCREAMING_SNAKE_CASE_ , self._config , self._pytorch_dump_output , SCREAMING_SNAKE_CASE_ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE_ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE_ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' )
| 415
| 0
|
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
UpperCAmelCase_ = """https://www.google.com/search?q=""" + """ """.join(sys.argv[1:])
UpperCAmelCase_ = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(10_000):
out_file.write(data)
UpperCAmelCase_ = BeautifulSoup(res.text, 'html.parser')
UpperCAmelCase_ = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(f"""https://google.com{link.get("href")}""")
| 720
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase):
UpperCAmelCase__ : Any = 'maskformer-swin'
UpperCAmelCase__ : List[Any] = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self: Any , UpperCamelCase_: Any=2_24 , UpperCamelCase_: List[str]=4 , UpperCamelCase_: Optional[int]=3 , UpperCamelCase_: Optional[int]=96 , UpperCamelCase_: List[str]=[2, 2, 6, 2] , UpperCamelCase_: Optional[Any]=[3, 6, 12, 24] , UpperCamelCase_: str=7 , UpperCamelCase_: int=4.0 , UpperCamelCase_: Optional[int]=True , UpperCamelCase_: Union[str, Any]=0.0 , UpperCamelCase_: Optional[int]=0.0 , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Union[str, Any]="gelu" , UpperCamelCase_: int=False , UpperCamelCase_: Optional[int]=0.02 , UpperCamelCase_: Optional[Any]=1E-5 , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: List[Any]=None , **UpperCamelCase_: Union[str, Any] , ):
super().__init__(**UpperCamelCase_ )
__lowerCamelCase = image_size
__lowerCamelCase = patch_size
__lowerCamelCase = num_channels
__lowerCamelCase = embed_dim
__lowerCamelCase = depths
__lowerCamelCase = len(UpperCamelCase_ )
__lowerCamelCase = num_heads
__lowerCamelCase = window_size
__lowerCamelCase = mlp_ratio
__lowerCamelCase = qkv_bias
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = drop_path_rate
__lowerCamelCase = hidden_act
__lowerCamelCase = use_absolute_embeddings
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__lowerCamelCase = int(embed_dim * 2 ** (len(UpperCamelCase_ ) - 1) )
__lowerCamelCase = ["""stem"""] + [F'stage{idx}' for idx in range(1 , len(UpperCamelCase_ ) + 1 )]
__lowerCamelCase, __lowerCamelCase = get_aligned_output_features_output_indices(
out_features=UpperCamelCase_ , out_indices=UpperCamelCase_ , stage_names=self.stage_names )
| 80
| 0
|
# Function to print upper half of diamond (pyramid)
def __snake_case ( lowerCAmelCase_ ) -> List[Any]:
for i in range(0 , lowerCAmelCase_ ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(''' ''' , end='''''' )
for _ in range(0 , i + 1 ): # printing stars
print('''* ''' , end='''''' )
print()
def __snake_case ( lowerCAmelCase_ ) -> Tuple:
for i in range(lowerCAmelCase_ , 0 , -1 ):
for _ in range(lowerCAmelCase_ , 0 , -1 ): # printing stars
print('''* ''' , end='''''' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(''' ''' , end='''''' )
def __snake_case ( lowerCAmelCase_ ) -> Tuple:
if n <= 0:
print(''' ... .... nothing printing :(''' )
return
floyd(lowerCAmelCase_ ) # upper half
reverse_floyd(lowerCAmelCase_ ) # lower half
if __name__ == "__main__":
print(r"""| /\ | |- | |- |--| |\ /| |-""")
print(r"""|/ \| |- |_ |_ |__| | \/ | |_""")
_A : str = 1
while K:
_A : str = int(input("""enter the number and , and see the magic : """))
print()
pretty_print(user_number)
_A : Dict = int(input("""press 0 to exit... and 1 to continue..."""))
print("""Good Bye...""")
| 100
|
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_A : List[str] = logging.get_logger(__name__)
def __snake_case ( lowerCAmelCase_ ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = SwinConfig.from_pretrained(
'''microsoft/swin-tiny-patch4-window7-224''' , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
SCREAMING_SNAKE_CASE__ = MaskFormerConfig(backbone_config=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = '''huggingface/label-files'''
if "ade20k-full" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE__ = 8_4_7
SCREAMING_SNAKE_CASE__ = '''maskformer-ade20k-full-id2label.json'''
elif "ade" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE__ = 1_5_0
SCREAMING_SNAKE_CASE__ = '''ade20k-id2label.json'''
elif "coco-stuff" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE__ = 1_7_1
SCREAMING_SNAKE_CASE__ = '''maskformer-coco-stuff-id2label.json'''
elif "coco" in model_name:
# TODO
SCREAMING_SNAKE_CASE__ = 1_3_3
SCREAMING_SNAKE_CASE__ = '''coco-panoptic-id2label.json'''
elif "cityscapes" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE__ = 1_9
SCREAMING_SNAKE_CASE__ = '''cityscapes-id2label.json'''
elif "vistas" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE__ = 6_5
SCREAMING_SNAKE_CASE__ = '''mapillary-vistas-id2label.json'''
SCREAMING_SNAKE_CASE__ = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type='''dataset''' ) , '''r''' ) )
SCREAMING_SNAKE_CASE__ = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
return config
def __snake_case ( lowerCAmelCase_ ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = []
# stem
# fmt: off
rename_keys.append(('''backbone.patch_embed.proj.weight''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.proj.bias''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''model.pixel_level_module.encoder.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''model.pixel_level_module.encoder.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm1.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm1.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm2.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm2.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.layers.{i}.downsample.reduction.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.layers.{i}.downsample.norm.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.layers.{i}.downsample.norm.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias''') )
# FPN
rename_keys.append(('''sem_seg_head.layer_4.weight''', '''model.pixel_level_module.decoder.fpn.stem.0.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.weight''', '''model.pixel_level_module.decoder.fpn.stem.1.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.bias''', '''model.pixel_level_module.decoder.fpn.stem.1.bias''') )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight''') )
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.norm.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight''') )
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.norm.bias''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.norm.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.norm.bias''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias''') )
rename_keys.append(('''sem_seg_head.mask_features.weight''', '''model.pixel_level_module.decoder.mask_projection.weight''') )
rename_keys.append(('''sem_seg_head.mask_features.bias''', '''model.pixel_level_module.decoder.mask_projection.bias''') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', f'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', f'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias''') )
# cross-attention out projection
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias''') )
# MLP 1
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', f'''model.transformer_module.decoder.layers.{idx}.fc1.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', f'''model.transformer_module.decoder.layers.{idx}.fc1.bias''') )
# MLP 2
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', f'''model.transformer_module.decoder.layers.{idx}.fc2.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', f'''model.transformer_module.decoder.layers.{idx}.fc2.bias''') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', f'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', f'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias''') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias''') )
# layernorm 3 (final layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', f'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', f'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.weight''', '''model.transformer_module.decoder.layernorm.weight''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.bias''', '''model.transformer_module.decoder.layernorm.bias''') )
# heads on top
rename_keys.append(('''sem_seg_head.predictor.query_embed.weight''', '''model.transformer_module.queries_embedder.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.weight''', '''model.transformer_module.input_projection.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.bias''', '''model.transformer_module.input_projection.bias''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.weight''', '''class_predictor.weight''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.bias''', '''class_predictor.bias''') )
for i in range(3 ):
rename_keys.append((f'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', f'''mask_embedder.{i}.0.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', f'''mask_embedder.{i}.0.bias''') )
# fmt: on
return rename_keys
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
SCREAMING_SNAKE_CASE__ = dct.pop(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = val
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
SCREAMING_SNAKE_CASE__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
SCREAMING_SNAKE_CASE__ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''' )
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ = in_proj_weight[:dim, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[: dim]
SCREAMING_SNAKE_CASE__ = in_proj_weight[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE__ = in_proj_bias[
dim : dim * 2
]
SCREAMING_SNAKE_CASE__ = in_proj_weight[
-dim :, :
]
SCREAMING_SNAKE_CASE__ = in_proj_bias[-dim :]
# fmt: on
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
# fmt: off
SCREAMING_SNAKE_CASE__ = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''' )
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ = in_proj_weight[: hidden_size, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[:config.hidden_size]
SCREAMING_SNAKE_CASE__ = in_proj_weight[hidden_size : hidden_size * 2, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[hidden_size : hidden_size * 2]
SCREAMING_SNAKE_CASE__ = in_proj_weight[-hidden_size :, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''' )
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ = in_proj_weight[: hidden_size, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[:config.hidden_size]
SCREAMING_SNAKE_CASE__ = in_proj_weight[hidden_size : hidden_size * 2, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[hidden_size : hidden_size * 2]
SCREAMING_SNAKE_CASE__ = in_proj_weight[-hidden_size :, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[-hidden_size :]
# fmt: on
def __snake_case ( ) -> torch.Tensor:
SCREAMING_SNAKE_CASE__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE__ = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = get_maskformer_config(lowerCAmelCase_ )
# load original state_dict
with open(lowerCAmelCase_ , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ = pickle.load(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = data['''model''']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
SCREAMING_SNAKE_CASE__ = create_rename_keys(lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
read_in_swin_q_k_v(lowerCAmelCase_ , config.backbone_config )
read_in_decoder_q_k_v(lowerCAmelCase_ , lowerCAmelCase_ )
# update to torch tensors
for key, value in state_dict.items():
SCREAMING_SNAKE_CASE__ = torch.from_numpy(lowerCAmelCase_ )
# load 🤗 model
SCREAMING_SNAKE_CASE__ = MaskFormerForInstanceSegmentation(lowerCAmelCase_ )
model.eval()
for name, param in model.named_parameters():
print(lowerCAmelCase_ , param.shape )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(lowerCAmelCase_ ) == 0, f'''Unexpected keys: {unexpected_keys}'''
# verify results
SCREAMING_SNAKE_CASE__ = prepare_img()
if "vistas" in model_name:
SCREAMING_SNAKE_CASE__ = 6_5
elif "cityscapes" in model_name:
SCREAMING_SNAKE_CASE__ = 6_5_5_3_5
else:
SCREAMING_SNAKE_CASE__ = 2_5_5
SCREAMING_SNAKE_CASE__ = True if '''ade''' in model_name else False
SCREAMING_SNAKE_CASE__ = MaskFormerImageProcessor(ignore_index=lowerCAmelCase_ , reduce_labels=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = image_processor(lowerCAmelCase_ , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ = model(**lowerCAmelCase_ )
print('''Logits:''' , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[3.63_53, -4.47_70, -2.60_65], [0.50_81, -4.23_94, -3.53_43], [2.19_09, -5.03_53, -1.93_23]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCAmelCase_ , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
image_processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
print('''Pushing model and image processor to the hub...''' )
model.push_to_hub(f'''nielsr/{model_name}''' )
image_processor.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
_A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""maskformer-swin-tiny-ade""",
type=str,
help=("""Name of the MaskFormer model you'd like to convert""",),
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""",
type=str,
help="""Path to the original state dict (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_A : str = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 100
| 1
|
'''simple docstring'''
from functools import reduce
a = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def __magic_name__ ( __UpperCAmelCase = N ) -> int:
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda __UpperCAmelCase , __UpperCAmelCase : str(int(__UpperCAmelCase ) * int(__UpperCAmelCase ) ) , n[i : i + 13] ) )
for i in range(len(__UpperCAmelCase ) - 12 ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 718
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class __a ( unittest.TestCase ):
def __init__( self : List[Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : List[str]=7 ,lowerCamelCase : List[str]=3 ,lowerCamelCase : List[str]=18 ,lowerCamelCase : Any=30 ,lowerCamelCase : Optional[Any]=400 ,lowerCamelCase : Optional[Any]=True ,lowerCamelCase : Optional[Any]=None ,lowerCamelCase : Optional[int]=True ,lowerCamelCase : int=None ,lowerCamelCase : str=True ,lowerCamelCase : Dict=[0.48_145_466, 0.4_578_275, 0.40_821_073] ,lowerCamelCase : List[str]=[0.26_862_954, 0.26_130_258, 0.27_577_711] ,lowerCamelCase : Tuple=True ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = size if size is not None else {"""height""": 224, """width""": 224}
__SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = min_resolution
__SCREAMING_SNAKE_CASE = max_resolution
__SCREAMING_SNAKE_CASE = do_resize
__SCREAMING_SNAKE_CASE = size
__SCREAMING_SNAKE_CASE = do_center_crop
__SCREAMING_SNAKE_CASE = crop_size
__SCREAMING_SNAKE_CASE = do_normalize
__SCREAMING_SNAKE_CASE = image_mean
__SCREAMING_SNAKE_CASE = image_std
__SCREAMING_SNAKE_CASE = do_convert_rgb
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def UpperCAmelCase__ ( self : int ,lowerCamelCase : Union[str, Any]=False ,lowerCamelCase : str=False ,lowerCamelCase : str=False ):
'''simple docstring'''
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
__SCREAMING_SNAKE_CASE = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 ,size=(self.num_channels, self.max_resolution, self.max_resolution) ,dtype=np.uinta ) )
else:
__SCREAMING_SNAKE_CASE = []
for i in range(self.batch_size ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = np.random.choice(np.arange(self.min_resolution ,self.max_resolution ) ,2 )
image_inputs.append(np.random.randint(255 ,size=(self.num_channels, width, height) ,dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
__SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(lowerCamelCase ,0 ,-1 ) ) for x in image_inputs]
if torchify:
__SCREAMING_SNAKE_CASE = [torch.from_numpy(lowerCamelCase ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class __a ( _snake_case, unittest.TestCase ):
__UpperCamelCase : int = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ChineseCLIPImageProcessingTester(self ,do_center_crop=lowerCamelCase )
@property
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase ,"""do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""size""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""image_std""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_convert_rgb""" ) )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""height""": 224, """width""": 224} )
self.assertEqual(image_processor.crop_size ,{"""height""": 18, """width""": 18} )
__SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size ,{"""height""": 84, """width""": 84} )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase ,Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase ,numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase ,np.ndarray )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase ,torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase ,torch.Tensor )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
@require_torch
@require_vision
class __a ( _snake_case, unittest.TestCase ):
__UpperCamelCase : Optional[int] = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ChineseCLIPImageProcessingTester(self ,num_channels=4 ,do_center_crop=lowerCamelCase )
__SCREAMING_SNAKE_CASE = 3
@property
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase ,"""do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""size""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""image_std""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_convert_rgb""" ) )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase ,Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
| 13
| 0
|
from ..utils import DummyObject, requires_backends
class _UpperCamelCase ( metaclass=A ):
'''simple docstring'''
a_ : Tuple = ["flax"]
def __init__( self : Any , *_lowerCamelCase : Dict , **_lowerCamelCase : Tuple ):
'''simple docstring'''
requires_backends(self , ["""flax"""] )
@classmethod
def _snake_case ( cls : str , *_lowerCamelCase : Dict , **_lowerCamelCase : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
@classmethod
def _snake_case ( cls : Optional[Any] , *_lowerCamelCase : Tuple , **_lowerCamelCase : str ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
class _UpperCamelCase ( metaclass=A ):
'''simple docstring'''
a_ : Optional[int] = ["flax"]
def __init__( self : Any , *_lowerCamelCase : Tuple , **_lowerCamelCase : int ):
'''simple docstring'''
requires_backends(self , ["""flax"""] )
@classmethod
def _snake_case ( cls : Any , *_lowerCamelCase : List[Any] , **_lowerCamelCase : List[str] ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
@classmethod
def _snake_case ( cls : str , *_lowerCamelCase : str , **_lowerCamelCase : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
class _UpperCamelCase ( metaclass=A ):
'''simple docstring'''
a_ : Tuple = ["flax"]
def __init__( self : Dict , *_lowerCamelCase : int , **_lowerCamelCase : List[Any] ):
'''simple docstring'''
requires_backends(self , ["""flax"""] )
@classmethod
def _snake_case ( cls : int , *_lowerCamelCase : Dict , **_lowerCamelCase : Any ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
@classmethod
def _snake_case ( cls : List[Any] , *_lowerCamelCase : Optional[Any] , **_lowerCamelCase : List[Any] ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
class _UpperCamelCase ( metaclass=A ):
'''simple docstring'''
a_ : Optional[int] = ["flax"]
def __init__( self : List[Any] , *_lowerCamelCase : Dict , **_lowerCamelCase : Any ):
'''simple docstring'''
requires_backends(self , ["""flax"""] )
@classmethod
def _snake_case ( cls : Any , *_lowerCamelCase : Any , **_lowerCamelCase : str ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
@classmethod
def _snake_case ( cls : List[Any] , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : Tuple ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
class _UpperCamelCase ( metaclass=A ):
'''simple docstring'''
a_ : List[str] = ["flax"]
def __init__( self : List[Any] , *_lowerCamelCase : str , **_lowerCamelCase : int ):
'''simple docstring'''
requires_backends(self , ["""flax"""] )
@classmethod
def _snake_case ( cls : List[Any] , *_lowerCamelCase : int , **_lowerCamelCase : Dict ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
@classmethod
def _snake_case ( cls : Optional[Any] , *_lowerCamelCase : List[str] , **_lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
class _UpperCamelCase ( metaclass=A ):
'''simple docstring'''
a_ : Union[str, Any] = ["flax"]
def __init__( self : Optional[int] , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : Optional[int] ):
'''simple docstring'''
requires_backends(self , ["""flax"""] )
@classmethod
def _snake_case ( cls : Optional[int] , *_lowerCamelCase : int , **_lowerCamelCase : str ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
@classmethod
def _snake_case ( cls : Optional[int] , *_lowerCamelCase : Dict , **_lowerCamelCase : Tuple ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
class _UpperCamelCase ( metaclass=A ):
'''simple docstring'''
a_ : Optional[Any] = ["flax"]
def __init__( self : int , *_lowerCamelCase : str , **_lowerCamelCase : str ):
'''simple docstring'''
requires_backends(self , ["""flax"""] )
@classmethod
def _snake_case ( cls : Any , *_lowerCamelCase : Optional[Any] , **_lowerCamelCase : int ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
@classmethod
def _snake_case ( cls : List[Any] , *_lowerCamelCase : str , **_lowerCamelCase : int ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
class _UpperCamelCase ( metaclass=A ):
'''simple docstring'''
a_ : List[Any] = ["flax"]
def __init__( self : Dict , *_lowerCamelCase : Optional[Any] , **_lowerCamelCase : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ["""flax"""] )
@classmethod
def _snake_case ( cls : int , *_lowerCamelCase : List[Any] , **_lowerCamelCase : Any ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
@classmethod
def _snake_case ( cls : Union[str, Any] , *_lowerCamelCase : Dict , **_lowerCamelCase : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
class _UpperCamelCase ( metaclass=A ):
'''simple docstring'''
a_ : str = ["flax"]
def __init__( self : Union[str, Any] , *_lowerCamelCase : List[str] , **_lowerCamelCase : Optional[int] ):
'''simple docstring'''
requires_backends(self , ["""flax"""] )
@classmethod
def _snake_case ( cls : Optional[Any] , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : Tuple ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
@classmethod
def _snake_case ( cls : Union[str, Any] , *_lowerCamelCase : List[Any] , **_lowerCamelCase : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
class _UpperCamelCase ( metaclass=A ):
'''simple docstring'''
a_ : Tuple = ["flax"]
def __init__( self : int , *_lowerCamelCase : List[Any] , **_lowerCamelCase : int ):
'''simple docstring'''
requires_backends(self , ["""flax"""] )
@classmethod
def _snake_case ( cls : Dict , *_lowerCamelCase : List[str] , **_lowerCamelCase : List[Any] ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
@classmethod
def _snake_case ( cls : str , *_lowerCamelCase : int , **_lowerCamelCase : Dict ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
class _UpperCamelCase ( metaclass=A ):
'''simple docstring'''
a_ : List[Any] = ["flax"]
def __init__( self : str , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : List[str] ):
'''simple docstring'''
requires_backends(self , ["""flax"""] )
@classmethod
def _snake_case ( cls : Dict , *_lowerCamelCase : Tuple , **_lowerCamelCase : List[Any] ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
@classmethod
def _snake_case ( cls : int , *_lowerCamelCase : Tuple , **_lowerCamelCase : List[Any] ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
class _UpperCamelCase ( metaclass=A ):
'''simple docstring'''
a_ : Dict = ["flax"]
def __init__( self : Optional[int] , *_lowerCamelCase : List[str] , **_lowerCamelCase : List[Any] ):
'''simple docstring'''
requires_backends(self , ["""flax"""] )
@classmethod
def _snake_case ( cls : List[Any] , *_lowerCamelCase : List[Any] , **_lowerCamelCase : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
@classmethod
def _snake_case ( cls : Dict , *_lowerCamelCase : Any , **_lowerCamelCase : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
class _UpperCamelCase ( metaclass=A ):
'''simple docstring'''
a_ : int = ["flax"]
def __init__( self : Optional[int] , *_lowerCamelCase : List[str] , **_lowerCamelCase : List[str] ):
'''simple docstring'''
requires_backends(self , ["""flax"""] )
@classmethod
def _snake_case ( cls : List[str] , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : List[str] ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
@classmethod
def _snake_case ( cls : Any , *_lowerCamelCase : int , **_lowerCamelCase : List[Any] ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
| 519
|
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
__UpperCamelCase : Tuple = {
'E': 12.70,
'T': 9.06,
'A': 8.17,
'O': 7.51,
'I': 6.97,
'N': 6.75,
'S': 6.33,
'H': 6.09,
'R': 5.99,
'D': 4.25,
'L': 4.03,
'C': 2.78,
'U': 2.76,
'M': 2.41,
'W': 2.36,
'F': 2.23,
'G': 2.02,
'Y': 1.97,
'P': 1.93,
'B': 1.29,
'V': 0.98,
'K': 0.77,
'J': 0.15,
'X': 0.15,
'Q': 0.10,
'Z': 0.07,
}
__UpperCamelCase : Dict = 'ETAOINSHRDLCUMWFGYPBVKJXQZ'
__UpperCamelCase : List[Any] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def _UpperCAmelCase ( UpperCAmelCase : str ):
"""simple docstring"""
__lowerCamelCase : str = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def _UpperCAmelCase ( UpperCAmelCase : tuple ):
"""simple docstring"""
return x[0]
def _UpperCAmelCase ( UpperCAmelCase : str ):
"""simple docstring"""
__lowerCamelCase : Tuple = get_letter_count(UpperCAmelCase )
__lowerCamelCase : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(UpperCAmelCase )
__lowerCamelCase : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=UpperCAmelCase )
__lowerCamelCase : Any = """""".join(freq_to_letter[freq] )
__lowerCamelCase : Optional[Any] = list(freq_to_letter_str.items() )
freq_pairs.sort(key=UpperCAmelCase , reverse=UpperCAmelCase )
__lowerCamelCase : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(UpperCAmelCase )
def _UpperCAmelCase ( UpperCAmelCase : str ):
"""simple docstring"""
__lowerCamelCase : List[str] = get_frequency_order(UpperCAmelCase )
__lowerCamelCase : Any = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 519
| 1
|
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def lowercase_ ( __snake_case : List[Any] ) -> List[Any]:
'''simple docstring'''
if (
(cp >= 0X4_e00 and cp <= 0X9_fff)
or (cp >= 0X3_400 and cp <= 0X4_dbf) #
or (cp >= 0X20_000 and cp <= 0X2a_6df) #
or (cp >= 0X2a_700 and cp <= 0X2b_73f) #
or (cp >= 0X2b_740 and cp <= 0X2b_81f) #
or (cp >= 0X2b_820 and cp <= 0X2c_eaf) #
or (cp >= 0Xf_900 and cp <= 0Xf_aff)
or (cp >= 0X2f_800 and cp <= 0X2f_a1f) #
): #
return True
return False
def lowercase_ ( __snake_case : List[Any] ) -> Dict:
'''simple docstring'''
for char in word:
snake_case__ :Tuple = ord(lowerCAmelCase_ )
if not _is_chinese_char(lowerCAmelCase_ ):
return 0
return 1
def lowercase_ ( __snake_case : Any ) -> Dict:
'''simple docstring'''
snake_case__ :Tuple = set()
for token in tokens:
snake_case__ :Tuple = len(lowerCAmelCase_ ) > 1 and is_chinese(lowerCAmelCase_ )
if chinese_word:
word_set.add(lowerCAmelCase_ )
snake_case__ :Any = list(lowerCAmelCase_ )
return word_list
def lowercase_ ( __snake_case : List[str] , __snake_case : Dict ) -> Union[str, Any]:
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
snake_case__ :str = max([len(lowerCAmelCase_ ) for w in chinese_word_set] )
snake_case__ :Tuple = bert_tokens
snake_case__ :Dict = 0, len(lowerCAmelCase_ )
while start < end:
snake_case__ :Any = True
if is_chinese(bert_word[start] ):
snake_case__ :int = min(end - start , lowerCAmelCase_ )
for i in range(lowerCAmelCase_ , 1 , -1 ):
snake_case__ :int = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
snake_case__ :Optional[int] = '''##''' + bert_word[j]
snake_case__ :Dict = start + i
snake_case__ :Dict = False
break
if single_word:
start += 1
return bert_word
def lowercase_ ( __snake_case : Optional[Any] , __snake_case : str , __snake_case : List[str] ) -> Tuple:
'''simple docstring'''
snake_case__ :List[Any] = []
for i in range(0 , len(lowerCAmelCase_ ) , 1_00 ):
snake_case__ :Any = ltp_tokenizer.pipeline(lines[i : i + 1_00] , tasks=["cws"] ).cws
snake_case__ :List[Any] = [get_chinese_word(lowerCAmelCase_ ) for r in res]
ltp_res.extend(lowerCAmelCase_ )
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )
snake_case__ :Dict = []
for i in range(0 , len(lowerCAmelCase_ ) , 1_00 ):
snake_case__ :Dict = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=5_12 )
bert_res.extend(res["input_ids"] )
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )
snake_case__ :Optional[Any] = []
for input_ids, chinese_word in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
snake_case__ :Dict = []
for id in input_ids:
snake_case__ :Optional[int] = bert_tokenizer._convert_id_to_token(lowerCAmelCase_ )
input_tokens.append(lowerCAmelCase_ )
snake_case__ :Dict = add_sub_symbol(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case__ :Tuple = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(lowerCAmelCase_ ):
if token[:2] == "##":
snake_case__ :Tuple = token[2:]
# save chinese tokens' pos
if len(lowerCAmelCase_ ) == 1 and _is_chinese_char(ord(lowerCAmelCase_ ) ):
ref_id.append(lowerCAmelCase_ )
ref_ids.append(lowerCAmelCase_ )
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )
return ref_ids
def lowercase_ ( __snake_case : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
with open(args.file_name , "r" , encoding="utf-8" ) as f:
snake_case__ :int = f.readlines()
snake_case__ :int = [line.strip() for line in data if len(lowerCAmelCase_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
snake_case__ :Union[str, Any] = LTP(args.ltp ) # faster in GPU device
snake_case__ :Any = BertTokenizer.from_pretrained(args.bert )
snake_case__ :Tuple = prepare_ref(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
with open(args.save_path , "w" , encoding="utf-8" ) as f:
snake_case__ :Dict = [json.dumps(lowerCAmelCase_ ) + '''\n''' for ref in ref_ids]
f.writelines(lowerCAmelCase_ )
if __name__ == "__main__":
__UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
required=False,
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp",
required=False,
type=str,
default="./resources/ltp",
help="resources for LTP tokenizer, usually a path",
)
parser.add_argument(
"--bert",
required=False,
type=str,
default="./resources/robert",
help="resources for Bert tokenizer",
)
parser.add_argument(
"--save_path",
required=False,
type=str,
default="./resources/ref.txt",
help="path to save res",
)
__UpperCAmelCase : Union[str, Any] = parser.parse_args()
main(args)
| 703
|
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
__UpperCAmelCase : Dict = True
except ImportError:
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowercase_ ( __snake_case : Namespace ) -> Dict:
'''simple docstring'''
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class _snake_case ( _A ):
@staticmethod
def lowerCAmelCase_ ( UpperCamelCase ) -> Any:
snake_case__ :Dict = parser.add_parser("add-new-model" )
add_new_model_parser.add_argument("--testing" ,action="store_true" ,help="If in testing mode." )
add_new_model_parser.add_argument("--testing_file" ,type=UpperCamelCase ,help="Configuration file on which to run." )
add_new_model_parser.add_argument(
"--path" ,type=UpperCamelCase ,help="Path to cookiecutter. Should only be used for testing purposes." )
add_new_model_parser.set_defaults(func=UpperCamelCase )
def __init__( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=None ,*UpperCamelCase ) -> Any:
snake_case__ :Union[str, Any] = testing
snake_case__ :Union[str, Any] = testing_file
snake_case__ :List[str] = path
def lowerCAmelCase_ ( self ) -> List[Any]:
warnings.warn(
"The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. "
"It is not actively maintained anymore, so might give a result that won't pass all tests and quality "
"checks, you should use `transformers-cli add-new-model-like` instead." )
if not _has_cookiecutter:
raise ImportError(
"Model creation dependencies are required to use the `add_new_model` command. Install them by running "
"the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n" )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
snake_case__ :Tuple = [directory for directory in os.listdir() if "cookiecutter-template-" == directory[:22]]
if len(UpperCamelCase ) > 0:
raise ValueError(
"Several directories starting with `cookiecutter-template-` in current working directory. "
"Please clean your directory by removing all folders starting with `cookiecutter-template-` or "
"change your working directory." )
snake_case__ :str = (
Path(UpperCamelCase ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
snake_case__ :Tuple = path_to_transformer_root / "templates" / "adding_a_new_model"
# Execute cookiecutter
if not self._testing:
cookiecutter(str(UpperCamelCase ) )
else:
with open(self._testing_file ,"r" ) as configuration_file:
snake_case__ :str = json.load(UpperCamelCase )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) ,no_input=UpperCamelCase ,extra_context=UpperCamelCase ,)
snake_case__ :List[Any] = [directory for directory in os.listdir() if "cookiecutter-template-" in directory[:22]][0]
# Retrieve configuration
with open(directory + "/configuration.json" ,"r" ) as configuration_file:
snake_case__ :Dict = json.load(UpperCamelCase )
snake_case__ :Optional[Any] = configuration["lowercase_modelname"]
snake_case__ :List[Any] = configuration["generate_tensorflow_pytorch_and_flax"]
os.remove(f'{directory}/configuration.json' )
snake_case__ :Any = "PyTorch" in generate_tensorflow_pytorch_and_flax
snake_case__ :Any = "TensorFlow" in generate_tensorflow_pytorch_and_flax
snake_case__ :Any = "Flax" in generate_tensorflow_pytorch_and_flax
snake_case__ :Dict = f'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(UpperCamelCase ,exist_ok=UpperCamelCase )
os.makedirs(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}' ,exist_ok=UpperCamelCase )
# Tests require submodules as they have parent imports
with open(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' ,"w" ):
pass
shutil.move(
f'{directory}/__init__.py' ,f'{model_dir}/__init__.py' ,)
shutil.move(
f'{directory}/configuration_{lowercase_model_name}.py' ,f'{model_dir}/configuration_{lowercase_model_name}.py' ,)
def remove_copy_lines(UpperCamelCase ):
with open(UpperCamelCase ,"r" ) as f:
snake_case__ :List[str] = f.readlines()
with open(UpperCamelCase ,"w" ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(UpperCamelCase )
if output_pytorch:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_{lowercase_model_name}.py' ,f'{model_dir}/modeling_{lowercase_model_name}.py' ,)
shutil.move(
f'{directory}/test_modeling_{lowercase_model_name}.py' ,f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' ,)
else:
os.remove(f'{directory}/modeling_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_{lowercase_model_name}.py' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_tf_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_tf_{lowercase_model_name}.py' ,f'{model_dir}/modeling_tf_{lowercase_model_name}.py' ,)
shutil.move(
f'{directory}/test_modeling_tf_{lowercase_model_name}.py' ,f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' ,)
else:
os.remove(f'{directory}/modeling_tf_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_tf_{lowercase_model_name}.py' )
if output_flax:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_flax_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_flax_{lowercase_model_name}.py' ,f'{model_dir}/modeling_flax_{lowercase_model_name}.py' ,)
shutil.move(
f'{directory}/test_modeling_flax_{lowercase_model_name}.py' ,f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' ,)
else:
os.remove(f'{directory}/modeling_flax_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_flax_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/{lowercase_model_name}.md' ,f'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' ,)
shutil.move(
f'{directory}/tokenization_{lowercase_model_name}.py' ,f'{model_dir}/tokenization_{lowercase_model_name}.py' ,)
shutil.move(
f'{directory}/tokenization_fast_{lowercase_model_name}.py' ,f'{model_dir}/tokenization_{lowercase_model_name}_fast.py' ,)
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ):
# Create temp file
snake_case__ , snake_case__ :Optional[Any] = mkstemp()
snake_case__ :Optional[Any] = False
with fdopen(UpperCamelCase ,"w" ) as new_file:
with open(UpperCamelCase ) as old_file:
for line in old_file:
new_file.write(UpperCamelCase )
if line_to_copy_below in line:
snake_case__ :Optional[Any] = True
for line_to_copy in lines_to_copy:
new_file.write(UpperCamelCase )
if not line_found:
raise ValueError(f'Line {line_to_copy_below} was not found in file.' )
# Copy the file permissions from the old file to the new file
copymode(UpperCamelCase ,UpperCamelCase )
# Remove original file
remove(UpperCamelCase )
# Move new file
move(UpperCamelCase ,UpperCamelCase )
def skip_units(UpperCamelCase ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(UpperCamelCase ):
with open(UpperCamelCase ) as datafile:
snake_case__ :int = []
snake_case__ :Optional[int] = False
snake_case__ :List[str] = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
snake_case__ :Optional[Any] = line.split("\"" )[1]
snake_case__ :Tuple = skip_units(UpperCamelCase )
elif "# Below: " in line and "##" not in line:
snake_case__ :Optional[Any] = line.split("\"" )[1]
snake_case__ :List[str] = skip_units(UpperCamelCase )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
snake_case__ :Tuple = []
elif "# Replace with" in line and "##" not in line:
snake_case__ :Optional[Any] = []
elif "##" not in line:
lines_to_copy.append(UpperCamelCase )
remove(UpperCamelCase )
replace_in_files(f'{directory}/to_replace_{lowercase_model_name}.py' )
os.rmdir(UpperCamelCase )
| 57
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase__ = {
"configuration_encodec": [
"ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EncodecConfig",
],
"feature_extraction_encodec": ["EncodecFeatureExtractor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST",
"EncodecModel",
"EncodecPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 619
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : Optional[Any] = """encoder-decoder"""
snake_case : Optional[int] = True
def __init__( self , **__lowerCAmelCase ):
super().__init__(**__lowerCAmelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
UpperCamelCase__ = kwargs.pop("""encoder""" )
UpperCamelCase__ = encoder_config.pop("""model_type""" )
UpperCamelCase__ = kwargs.pop("""decoder""" )
UpperCamelCase__ = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
UpperCamelCase__ = AutoConfig.for_model(__lowerCAmelCase , **__lowerCAmelCase )
UpperCamelCase__ = AutoConfig.for_model(__lowerCAmelCase , **__lowerCAmelCase )
UpperCamelCase__ = True
@classmethod
def _lowerCamelCase ( cls , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ):
logger.info("""Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
UpperCamelCase__ = True
UpperCamelCase__ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = copy.deepcopy(self.__dict__ )
UpperCamelCase__ = self.encoder.to_dict()
UpperCamelCase__ = self.decoder.to_dict()
UpperCamelCase__ = self.__class__.model_type
return output
| 619
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ : Optional[int] = {
'configuration_xlm_roberta_xl': [
'XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaXLConfig',
'XLMRobertaXLOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = [
'XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaXLForCausalLM',
'XLMRobertaXLForMaskedLM',
'XLMRobertaXLForMultipleChoice',
'XLMRobertaXLForQuestionAnswering',
'XLMRobertaXLForSequenceClassification',
'XLMRobertaXLForTokenClassification',
'XLMRobertaXLModel',
'XLMRobertaXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
a_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 484
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ : Tuple = {
'configuration_xmod': [
'XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XmodConfig',
'XmodOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[Any] = [
'XMOD_PRETRAINED_MODEL_ARCHIVE_LIST',
'XmodForCausalLM',
'XmodForMaskedLM',
'XmodForMultipleChoice',
'XmodForQuestionAnswering',
'XmodForSequenceClassification',
'XmodForTokenClassification',
'XmodModel',
'XmodPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
a_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 484
| 1
|
'''simple docstring'''
import string
from math import logaa
def __UpperCamelCase ( lowercase__ : str, lowercase__ : str ):
'''simple docstring'''
__lowercase =document.translate(
str.maketrans('', '', string.punctuation ) ).replace('\n', '' )
__lowercase =document_without_punctuation.split(' ' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def __UpperCamelCase ( lowercase__ : str, lowercase__ : str ):
'''simple docstring'''
__lowercase =corpus.lower().translate(
str.maketrans('', '', string.punctuation ) ) # strip all punctuation and replace it with ''
__lowercase =corpus_without_punctuation.split('\n' )
__lowercase =term.lower()
return (len([doc for doc in docs if term in doc] ), len(lowercase__ ))
def __UpperCamelCase ( lowercase__ : int, lowercase__ : int, lowercase__ : Dict=False ):
'''simple docstring'''
if smoothing:
if n == 0:
raise ValueError('log10(0) is undefined.' )
return round(1 + logaa(n / (1 + df) ), 3 )
if df == 0:
raise ZeroDivisionError('df must be > 0' )
elif n == 0:
raise ValueError('log10(0) is undefined.' )
return round(logaa(n / df ), 3 )
def __UpperCamelCase ( lowercase__ : int, lowercase__ : int ):
'''simple docstring'''
return round(tf * idf, 3 )
| 119
|
'''simple docstring'''
def __UpperCamelCase ( lowercase__ : int ):
'''simple docstring'''
if upper_limit < 0:
raise ValueError('Limit for the Catalan sequence must be ≥ 0' )
__lowercase =[0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
__lowercase =1
if upper_limit > 0:
__lowercase =1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2, upper_limit + 1 ):
for j in range(lowercase__ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('''\n********* Catalan Numbers Using Dynamic Programming ************\n''')
print('''\n*** Enter -1 at any time to quit ***''')
print('''\nEnter the upper limit (≥ 0) for the Catalan number sequence: ''', end='''''')
try:
while True:
UpperCAmelCase = int(input().strip())
if N < 0:
print('''\n********* Goodbye!! ************''')
break
else:
print(F'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print('''Try another upper limit for the sequence: ''', end='''''')
except (NameError, ValueError):
print('''\n********* Invalid input, goodbye! ************\n''')
import doctest
doctest.testmod()
| 119
| 1
|
"""simple docstring"""
def lowercase_ ( _lowerCamelCase: Tuple ) -> str:
'''simple docstring'''
if collection == []:
return []
# get some information about the collection
__lowerCamelCase : Dict = len(_lowerCamelCase )
__lowerCamelCase : int = max(_lowerCamelCase )
__lowerCamelCase : Tuple = min(_lowerCamelCase )
# create the counting array
__lowerCamelCase : Optional[int] = coll_max + 1 - coll_min
__lowerCamelCase : Dict = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , _lowerCamelCase ):
__lowerCamelCase : List[Any] = counting_arr[i] + counting_arr[i - 1]
# create the output collection
__lowerCamelCase : Dict = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , _lowerCamelCase ) ):
__lowerCamelCase : Dict = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def lowercase_ ( _lowerCamelCase: List[Any] ) -> int:
'''simple docstring'''
return "".join([chr(_lowerCamelCase ) for i in counting_sort([ord(_lowerCamelCase ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('''thisisthestring''') == "eghhiiinrsssttt"
__A = input('''Enter numbers separated by a comma:\n''').strip()
__A = [int(item) for item in user_input.split(''',''')]
print(counting_sort(unsorted))
| 721
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A = {
'''configuration_bert''': ['''BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BertConfig''', '''BertOnnxConfig'''],
'''tokenization_bert''': ['''BasicTokenizer''', '''BertTokenizer''', '''WordpieceTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['''BertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BertForMaskedLM''',
'''BertForMultipleChoice''',
'''BertForNextSentencePrediction''',
'''BertForPreTraining''',
'''BertForQuestionAnswering''',
'''BertForSequenceClassification''',
'''BertForTokenClassification''',
'''BertLayer''',
'''BertLMHeadModel''',
'''BertModel''',
'''BertPreTrainedModel''',
'''load_tf_weights_in_bert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBertEmbeddings''',
'''TFBertForMaskedLM''',
'''TFBertForMultipleChoice''',
'''TFBertForNextSentencePrediction''',
'''TFBertForPreTraining''',
'''TFBertForQuestionAnswering''',
'''TFBertForSequenceClassification''',
'''TFBertForTokenClassification''',
'''TFBertLMHeadModel''',
'''TFBertMainLayer''',
'''TFBertModel''',
'''TFBertPreTrainedModel''',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['''TFBertTokenizer''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''FlaxBertForCausalLM''',
'''FlaxBertForMaskedLM''',
'''FlaxBertForMultipleChoice''',
'''FlaxBertForNextSentencePrediction''',
'''FlaxBertForPreTraining''',
'''FlaxBertForQuestionAnswering''',
'''FlaxBertForSequenceClassification''',
'''FlaxBertForTokenClassification''',
'''FlaxBertModel''',
'''FlaxBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 366
| 0
|
'''simple docstring'''
import math
def _UpperCamelCase ( lowerCAmelCase__: list ,lowerCAmelCase__: int = 0 ,lowerCAmelCase__: int = 0 ) -> list:
SCREAMING_SNAKE_CASE_ = end or len(lowerCAmelCase__ )
for i in range(lowerCAmelCase__ ,lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = i
SCREAMING_SNAKE_CASE_ = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
SCREAMING_SNAKE_CASE_ = array[temp_index - 1]
temp_index -= 1
SCREAMING_SNAKE_CASE_ = temp_index_value
return array
def _UpperCamelCase ( lowerCAmelCase__: list ,lowerCAmelCase__: int ,lowerCAmelCase__: int ) -> None: # Max Heap
SCREAMING_SNAKE_CASE_ = index
SCREAMING_SNAKE_CASE_ = 2 * index + 1 # Left Node
SCREAMING_SNAKE_CASE_ = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
SCREAMING_SNAKE_CASE_ = left_index
if right_index < heap_size and array[largest] < array[right_index]:
SCREAMING_SNAKE_CASE_ = right_index
if largest != index:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = array[largest], array[index]
heapify(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
def _UpperCamelCase ( lowerCAmelCase__: list ) -> list:
SCREAMING_SNAKE_CASE_ = len(lowerCAmelCase__ )
for i in range(n // 2 ,-1 ,-1 ):
heapify(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
for i in range(n - 1 ,0 ,-1 ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = array[0], array[i]
heapify(lowerCAmelCase__ ,0 ,lowerCAmelCase__ )
return array
def _UpperCamelCase ( lowerCAmelCase__: list ,lowerCAmelCase__: int ,lowerCAmelCase__: int ,lowerCAmelCase__: int ) -> int:
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def _UpperCamelCase ( lowerCAmelCase__: list ,lowerCAmelCase__: int ,lowerCAmelCase__: int ,lowerCAmelCase__: int ) -> int:
SCREAMING_SNAKE_CASE_ = low
SCREAMING_SNAKE_CASE_ = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = array[j], array[i]
i += 1
def _UpperCamelCase ( lowerCAmelCase__: list ) -> list:
if len(lowerCAmelCase__ ) == 0:
return array
SCREAMING_SNAKE_CASE_ = 2 * math.ceil(math.loga(len(lowerCAmelCase__ ) ) )
SCREAMING_SNAKE_CASE_ = 16
return intro_sort(lowerCAmelCase__ ,0 ,len(lowerCAmelCase__ ) ,lowerCAmelCase__ ,lowerCAmelCase__ )
def _UpperCamelCase ( lowerCAmelCase__: list ,lowerCAmelCase__: int ,lowerCAmelCase__: int ,lowerCAmelCase__: int ,lowerCAmelCase__: int ) -> list:
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(lowerCAmelCase__ )
max_depth -= 1
SCREAMING_SNAKE_CASE_ = median_of_a(lowerCAmelCase__ ,lowerCAmelCase__ ,start + ((end - start) // 2) + 1 ,end - 1 )
SCREAMING_SNAKE_CASE_ = partition(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
intro_sort(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = p
return insertion_sort(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : Any = input("Enter numbers separated by a comma : ").strip()
SCREAMING_SNAKE_CASE : Optional[Any] = [float(item) for item in user_input.split(",")]
print(sort(unsorted))
| 294
|
'''simple docstring'''
def _UpperCamelCase ( lowerCAmelCase__: int = 1000 ) -> int:
SCREAMING_SNAKE_CASE_ = 2**power
SCREAMING_SNAKE_CASE_ = str(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = list(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = 0
for i in list_num:
sum_of_num += int(lowerCAmelCase__ )
return sum_of_num
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Tuple = int(input("Enter the power of 2: ").strip())
print("2 ^ ", power, " = ", 2**power)
SCREAMING_SNAKE_CASE : str = solution(power)
print("Sum of the digits is: ", result)
| 294
| 1
|
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
__lowerCAmelCase :List[Any] = get_logger(__name__)
class _a:
def __init__( self , __snake_case = None ) -> Any:
'''simple docstring'''
_snake_case : str = (
os.path.join(__snake_case , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
_snake_case : Any = Extractor
def lowercase ( self , __snake_case ) -> str:
'''simple docstring'''
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
_snake_case : Union[str, Any] = os.path.abspath(__snake_case )
return os.path.join(self.extract_dir , hash_url_to_filename(__snake_case ) )
def lowercase ( self , __snake_case , __snake_case ) -> bool:
'''simple docstring'''
return force_extract or (
not os.path.isfile(__snake_case ) and not (os.path.isdir(__snake_case ) and os.listdir(__snake_case ))
)
def lowercase ( self , __snake_case , __snake_case = False ) -> str:
'''simple docstring'''
_snake_case : Optional[int] = self.extractor.infer_extractor_format(__snake_case )
if not extractor_format:
return input_path
_snake_case : Optional[int] = self._get_output_path(__snake_case )
if self._do_extract(__snake_case , __snake_case ):
self.extractor.extract(__snake_case , __snake_case , __snake_case )
return output_path
class _a( __A ):
@classmethod
@abstractmethod
def lowercase ( cls , __snake_case , **__snake_case ) -> bool:
'''simple docstring'''
...
@staticmethod
@abstractmethod
def lowercase ( __snake_case , __snake_case ) -> None:
'''simple docstring'''
...
class _a( __A , __A ):
lowerCamelCase__ :List[bytes] = []
@staticmethod
def lowercase ( __snake_case , __snake_case ) -> Union[str, Any]:
'''simple docstring'''
with open(__snake_case , "rb" ) as f:
return f.read(__snake_case )
@classmethod
def lowercase ( cls , __snake_case , __snake_case = b"" ) -> bool:
'''simple docstring'''
if not magic_number:
_snake_case : str = max(len(__snake_case ) for cls_magic_number in cls.magic_numbers )
try:
_snake_case : str = cls.read_magic_number(__snake_case , __snake_case )
except OSError:
return False
return any(magic_number.startswith(__snake_case ) for cls_magic_number in cls.magic_numbers )
class _a( __A ):
@classmethod
def lowercase ( cls , __snake_case , **__snake_case ) -> bool:
'''simple docstring'''
return tarfile.is_tarfile(__snake_case )
@staticmethod
def lowercase ( __snake_case , __snake_case ) -> Tuple:
'''simple docstring'''
def resolved(__snake_case ) -> str:
return os.path.realpath(os.path.abspath(__snake_case ) )
def badpath(__snake_case , __snake_case ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(__snake_case , __snake_case ) ).startswith(__snake_case )
def badlink(__snake_case , __snake_case ) -> bool:
# Links are interpreted relative to the directory containing the link
_snake_case : str = resolved(os.path.join(__snake_case , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=__snake_case )
_snake_case : Dict = resolved(__snake_case )
for finfo in members:
if badpath(finfo.name , __snake_case ):
logger.error(f"""Extraction of {finfo.name} is blocked (illegal path)""" )
elif finfo.issym() and badlink(__snake_case , __snake_case ):
logger.error(f"""Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}""" )
elif finfo.islnk() and badlink(__snake_case , __snake_case ):
logger.error(f"""Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}""" )
else:
yield finfo
@staticmethod
def lowercase ( __snake_case , __snake_case ) -> None:
'''simple docstring'''
os.makedirs(__snake_case , exist_ok=__snake_case )
_snake_case : List[Any] = tarfile.open(__snake_case )
tar_file.extractall(__snake_case , members=TarExtractor.safemembers(__snake_case , __snake_case ) )
tar_file.close()
class _a( __A ):
lowerCamelCase__ :Any = [B'\x1F\x8B']
@staticmethod
def lowercase ( __snake_case , __snake_case ) -> None:
'''simple docstring'''
with gzip.open(__snake_case , "rb" ) as gzip_file:
with open(__snake_case , "wb" ) as extracted_file:
shutil.copyfileobj(__snake_case , __snake_case )
class _a( __A ):
lowerCamelCase__ :Dict = [
B'PK\x03\x04',
B'PK\x05\x06', # empty archive
B'PK\x07\x08', # spanned archive
]
@classmethod
def lowercase ( cls , __snake_case , __snake_case = b"" ) -> bool:
'''simple docstring'''
if super().is_extractable(__snake_case , magic_number=__snake_case ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(__snake_case , "rb" ) as fp:
_snake_case : Union[str, Any] = _EndRecData(__snake_case )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
_snake_case : List[str] = fp.read(__snake_case ) # CD is where we expect it to be
if len(__snake_case ) == sizeCentralDir:
_snake_case : Optional[int] = struct.unpack(__snake_case , __snake_case ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def lowercase ( __snake_case , __snake_case ) -> None:
'''simple docstring'''
os.makedirs(__snake_case , exist_ok=__snake_case )
with zipfile.ZipFile(__snake_case , "r" ) as zip_file:
zip_file.extractall(__snake_case )
zip_file.close()
class _a( __A ):
lowerCamelCase__ :str = [B'\xFD\x37\x7A\x58\x5A\x00']
@staticmethod
def lowercase ( __snake_case , __snake_case ) -> None:
'''simple docstring'''
with lzma.open(__snake_case ) as compressed_file:
with open(__snake_case , "wb" ) as extracted_file:
shutil.copyfileobj(__snake_case , __snake_case )
class _a( __A ):
lowerCamelCase__ :List[Any] = [B'Rar!\x1a\x07\x00', B'Rar!\x1a\x07\x01\x00'] # RAR_ID # RAR5_ID
@staticmethod
def lowercase ( __snake_case , __snake_case ) -> None:
'''simple docstring'''
if not config.RARFILE_AVAILABLE:
raise ImportError("Please pip install rarfile" )
import rarfile
os.makedirs(__snake_case , exist_ok=__snake_case )
_snake_case : List[str] = rarfile.RarFile(__snake_case )
rf.extractall(__snake_case )
rf.close()
class _a( __A ):
lowerCamelCase__ :Any = [B'\x28\xb5\x2F\xFD']
@staticmethod
def lowercase ( __snake_case , __snake_case ) -> None:
'''simple docstring'''
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("Please pip install zstandard" )
import zstandard as zstd
_snake_case : Dict = zstd.ZstdDecompressor()
with open(__snake_case , "rb" ) as ifh, open(__snake_case , "wb" ) as ofh:
dctx.copy_stream(__snake_case , __snake_case )
class _a( __A ):
lowerCamelCase__ :Tuple = [B'\x42\x5A\x68']
@staticmethod
def lowercase ( __snake_case , __snake_case ) -> None:
'''simple docstring'''
with bza.open(__snake_case , "rb" ) as compressed_file:
with open(__snake_case , "wb" ) as extracted_file:
shutil.copyfileobj(__snake_case , __snake_case )
class _a( __A ):
lowerCamelCase__ :Tuple = [B'\x37\x7A\xBC\xAF\x27\x1C']
@staticmethod
def lowercase ( __snake_case , __snake_case ) -> None:
'''simple docstring'''
if not config.PY7ZR_AVAILABLE:
raise ImportError("Please pip install py7zr" )
import pyazr
os.makedirs(__snake_case , exist_ok=__snake_case )
with pyazr.SevenZipFile(__snake_case , "r" ) as archive:
archive.extractall(__snake_case )
class _a( __A ):
lowerCamelCase__ :Tuple = [B'\x04\x22\x4D\x18']
@staticmethod
def lowercase ( __snake_case , __snake_case ) -> None:
'''simple docstring'''
if not config.LZ4_AVAILABLE:
raise ImportError("Please pip install lz4" )
import lza.frame
with lza.frame.open(__snake_case , "rb" ) as compressed_file:
with open(__snake_case , "wb" ) as extracted_file:
shutil.copyfileobj(__snake_case , __snake_case )
class _a:
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
lowerCamelCase__ :Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def lowercase ( cls ) -> Optional[Any]:
'''simple docstring'''
return max(
len(__snake_case )
for extractor in cls.extractors.values()
if issubclass(__snake_case , __snake_case )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def lowercase ( __snake_case , __snake_case ) -> Any:
'''simple docstring'''
try:
return MagicNumberBaseExtractor.read_magic_number(__snake_case , magic_number_length=__snake_case )
except OSError:
return b""
@classmethod
def lowercase ( cls , __snake_case , __snake_case = False ) -> bool:
'''simple docstring'''
warnings.warn(
"Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'infer_extractor_format' instead." , category=__snake_case , )
_snake_case : Union[str, Any] = cls.infer_extractor_format(__snake_case )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def lowercase ( cls , __snake_case ) -> str: # <Added version="2.4.0"/>
'''simple docstring'''
_snake_case : Union[str, Any] = cls._get_magic_number_max_length()
_snake_case : Dict = cls._read_magic_number(__snake_case , __snake_case )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(__snake_case , magic_number=__snake_case ):
return extractor_format
@classmethod
def lowercase ( cls , __snake_case , __snake_case , __snake_case = None , __snake_case = "deprecated" , ) -> None:
'''simple docstring'''
os.makedirs(os.path.dirname(__snake_case ) , exist_ok=__snake_case )
# Prevent parallel extractions
_snake_case : Optional[int] = str(Path(__snake_case ).with_suffix(".lock" ) )
with FileLock(__snake_case ):
shutil.rmtree(__snake_case , ignore_errors=__snake_case )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(__snake_case , __snake_case ): # passed as positional arg
warnings.warn(
"Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'extractor_format' instead." , category=__snake_case , )
_snake_case : Any = extractor if extractor != "deprecated" else extractor_format
else:
_snake_case : Optional[Any] = cls.extractors[extractor_format]
return extractor.extract(__snake_case , __snake_case )
else:
warnings.warn(
"Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an "
"exception in 3.0.0." , category=__snake_case , )
for extractor in cls.extractors.values():
if extractor.is_extractable(__snake_case ):
return extractor.extract(__snake_case , __snake_case )
| 278
|
def A ( UpperCAmelCase ):
if n == 1 or not isinstance(UpperCAmelCase , UpperCAmelCase ):
return 0
elif n == 2:
return 1
else:
_snake_case : List[Any] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def A ( UpperCAmelCase ):
_snake_case : Tuple = 0
_snake_case : Optional[Any] = 2
while digits < n:
index += 1
_snake_case : str = len(str(fibonacci(UpperCAmelCase ) ) )
return index
def A ( UpperCAmelCase = 1_000 ):
return fibonacci_digits_index(UpperCAmelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 278
| 1
|
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__A = logging.get_logger(__name__)
__A = '▁'
__A = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
'tokenizer_config_file': 'tokenizer_config.json',
}
__A = {
'vocab_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json',
},
'spm_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_config_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json',
},
}
__A = {
'facebook/m2m100_418M': 1024,
}
# fmt: off
__A = {
'm2m100': ['af', 'am', 'ar', 'ast', 'az', 'ba', 'be', 'bg', 'bn', 'br', 'bs', 'ca', 'ceb', 'cs', 'cy', 'da', 'de', 'el', 'en', 'es', 'et', 'fa', 'ff', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl', 'gu', 'ha', 'he', 'hi', 'hr', 'ht', 'hu', 'hy', 'id', 'ig', 'ilo', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'lb', 'lg', 'ln', 'lo', 'lt', 'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'ne', 'nl', 'no', 'ns', 'oc', 'or', 'pa', 'pl', 'ps', 'pt', 'ro', 'ru', 'sd', 'si', 'sk', 'sl', 'so', 'sq', 'sr', 'ss', 'su', 'sv', 'sw', 'ta', 'th', 'tl', 'tn', 'tr', 'uk', 'ur', 'uz', 'vi', 'wo', 'xh', 'yi', 'yo', 'zh', 'zu'],
'wmt21': ['en', 'ha', 'is', 'ja', 'cs', 'ru', 'zh', 'de']
}
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = ["input_ids", "attention_mask"]
A_ = []
A_ = []
def __init__( self: Optional[int] , __A: Tuple , __A: List[str] , __A: Union[str, Any]=None , __A: Optional[int]=None , __A: str="<s>" , __A: Tuple="</s>" , __A: Dict="</s>" , __A: Optional[int]="<pad>" , __A: List[Any]="<unk>" , __A: Optional[Any]="m2m100" , __A: Optional[Dict[str, Any]] = None , __A: Union[str, Any]=8 , **__A: Tuple , ) -> None:
_A = {} if sp_model_kwargs is None else sp_model_kwargs
_A = language_codes
_A = FAIRSEQ_LANGUAGE_CODES[language_codes]
_A = {lang_code: f"""__{lang_code}__""" for lang_code in fairseq_language_code}
_A = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(__A )
for lang_code in fairseq_language_code
if self.get_lang_token(__A ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__A , tgt_lang=__A , bos_token=__A , eos_token=__A , sep_token=__A , unk_token=__A , pad_token=__A , language_codes=__A , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=__A , **__A , )
_A = vocab_file
_A = load_json(__A )
_A = {v: k for k, v in self.encoder.items()}
_A = spm_file
_A = load_spm(__A , self.sp_model_kwargs )
_A = len(self.encoder )
_A = {
self.get_lang_token(__A ): self.encoder_size + i for i, lang_code in enumerate(__A )
}
_A = {lang_code: self.encoder_size + i for i, lang_code in enumerate(__A )}
_A = {v: k for k, v in self.lang_token_to_id.items()}
_A = src_lang if src_lang is not None else '''en'''
_A = tgt_lang
_A = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
_A = num_madeup_words
@property
def __A ( self: Tuple ) -> int:
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def __A ( self: Optional[Any] ) -> str:
return self._src_lang
@src_lang.setter
def __A ( self: List[Any] , __A: str ) -> None:
_A = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __A ( self: Tuple , __A: str ) -> List[str]:
return self.sp_model.encode(__A , out_type=__A )
def __A ( self: Dict , __A: Dict ) -> Any:
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(__A , self.encoder[self.unk_token] )
def __A ( self: List[Any] , __A: int ) -> str:
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(__A , self.unk_token )
def __A ( self: List[str] , __A: Optional[Any] ) -> Optional[int]:
_A = []
_A = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__A ) + token
_A = []
else:
current_sub_tokens.append(__A )
out_string += self.sp_model.decode(__A )
return out_string.strip()
def __A ( self: Tuple , __A: List[int] , __A: Optional[List[int]] = None , __A: bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
_A = [1] * len(self.prefix_tokens )
_A = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__A )) + suffix_ones
return prefix_ones + ([0] * len(__A )) + ([0] * len(__A )) + suffix_ones
def __A ( self: Tuple , __A: List[int] , __A: Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __A ( self: Tuple ) -> Dict:
_A = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self: Union[str, Any] ) -> Dict:
_A = self.__dict__.copy()
_A = None
return state
def __setstate__( self: int , __A: Dict ) -> None:
_A = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_A = {}
_A = load_spm(self.spm_file , self.sp_model_kwargs )
def __A ( self: Dict , __A: str , __A: Optional[str] = None ) -> Tuple[str]:
_A = Path(__A )
if not save_dir.is_dir():
raise OSError(f"""{save_directory} should be a directory""" )
_A = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
_A = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , __A )
if os.path.abspath(self.spm_file ) != os.path.abspath(__A ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __A )
elif not os.path.isfile(self.spm_file ):
with open(__A , '''wb''' ) as fi:
_A = self.sp_model.serialized_model_proto()
fi.write(__A )
return (str(__A ), str(__A ))
def __A ( self: Tuple , __A: List[str] , __A: str = "en" , __A: Optional[List[str]] = None , __A: str = "ro" , **__A: Optional[int] , ) -> BatchEncoding:
_A = src_lang
_A = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(__A , __A , **__A )
def __A ( self: Optional[int] , __A: str , __A: Optional[str] , __A: Optional[str] , **__A: List[Any] ) -> str:
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
_A = src_lang
_A = self(__A , add_special_tokens=__A , **__A )
_A = self.get_lang_id(__A )
_A = tgt_lang_id
return inputs
def __A ( self: List[Any] ) -> Tuple:
self.set_src_lang_special_tokens(self.src_lang )
def __A ( self: Any ) -> Tuple:
self.set_tgt_lang_special_tokens(self.tgt_lang )
def __A ( self: Optional[Any] , __A: str ) -> None:
_A = self.get_lang_token(__A )
_A = self.lang_token_to_id[lang_token]
_A = [self.cur_lang_id]
_A = [self.eos_token_id]
def __A ( self: List[str] , __A: str ) -> None:
_A = self.get_lang_token(__A )
_A = self.lang_token_to_id[lang_token]
_A = [self.cur_lang_id]
_A = [self.eos_token_id]
def __A ( self: Dict , __A: str ) -> str:
return self.lang_code_to_token[lang]
def __A ( self: Dict , __A: str ) -> int:
_A = self.get_lang_token(__A )
return self.lang_token_to_id[lang_token]
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
_A = sentencepiece.SentencePieceProcessor(**_lowercase )
spm.Load(str(_lowercase ) )
return spm
def __A ( _lowercase ):
'''simple docstring'''
with open(_lowercase , '''r''' ) as f:
return json.load(_lowercase )
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
with open(_lowercase , '''w''' ) as f:
json.dump(_lowercase , _lowercase , indent=2 )
| 484
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
__A = logging.get_logger(__name__)
__A = {
'openai/imagegpt-small': '',
'openai/imagegpt-medium': '',
'openai/imagegpt-large': '',
}
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = "imagegpt"
A_ = ["past_key_values"]
A_ = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self: Union[str, Any] , __A: int=5_12 + 1 , __A: str=32 * 32 , __A: Tuple=5_12 , __A: Any=24 , __A: Dict=8 , __A: str=None , __A: Any="quick_gelu" , __A: Tuple=0.1 , __A: Optional[Any]=0.1 , __A: Any=0.1 , __A: Tuple=1e-5 , __A: Optional[int]=0.02 , __A: List[Any]=True , __A: Any=True , __A: Optional[Any]=False , __A: Optional[int]=False , __A: Optional[int]=False , **__A: str , ) -> List[Any]:
_A = vocab_size
_A = n_positions
_A = n_embd
_A = n_layer
_A = n_head
_A = n_inner
_A = activation_function
_A = resid_pdrop
_A = embd_pdrop
_A = attn_pdrop
_A = layer_norm_epsilon
_A = initializer_range
_A = scale_attn_weights
_A = use_cache
_A = scale_attn_by_inverse_layer_idx
_A = reorder_and_upcast_attn
_A = tie_word_embeddings
super().__init__(tie_word_embeddings=__A , **__A )
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
@property
def __A ( self: Optional[int] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
] )
def __A ( self: Any , __A: "FeatureExtractionMixin" , __A: int = 1 , __A: int = -1 , __A: bool = False , __A: Optional["TensorType"] = None , __A: int = 3 , __A: int = 32 , __A: int = 32 , ) -> Mapping[str, Any]:
_A = self._generate_dummy_images(__A , __A , __A , __A )
_A = dict(preprocessor(images=__A , return_tensors=__A ) )
return inputs
| 484
| 1
|
def _UpperCAmelCase ( a : list[int] ):
if not numbers:
return 0
if not isinstance(a , (list, tuple) ) or not all(
isinstance(a , a ) for number in numbers ):
raise ValueError("""numbers must be an iterable of integers""" )
snake_case__ = snake_case__ = snake_case__ = numbers[0]
for i in range(1 , len(a ) ):
# update the maximum and minimum subarray products
snake_case__ = numbers[i]
if number < 0:
snake_case__ , snake_case__ = min_till_now, max_till_now
snake_case__ = max(a , max_till_now * number )
snake_case__ = min(a , min_till_now * number )
# update the maximum product found till now
snake_case__ = max(a , a )
return max_prod
| 99
|
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
_lowercase : Optional[int] = VQModel
_lowercase : str = '''sample'''
@property
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : Tuple=(3_2, 3_2)):
'''simple docstring'''
snake_case__ = 4
snake_case__ = 3
snake_case__ = floats_tensor((batch_size, num_channels) + sizes).to(UpperCamelCase__)
return {"sample": image}
@property
def __magic_name__ ( self : str):
'''simple docstring'''
return (3, 3_2, 3_2)
@property
def __magic_name__ ( self : List[str]):
'''simple docstring'''
return (3, 3_2, 3_2)
def __magic_name__ ( self : int):
'''simple docstring'''
snake_case__ = {
"""block_out_channels""": [3_2, 6_4],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 3,
}
snake_case__ = self.dummy_input
return init_dict, inputs_dict
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
pass
def __magic_name__ ( self : Tuple):
'''simple docstring'''
pass
def __magic_name__ ( self : str):
'''simple docstring'''
snake_case__ , snake_case__ = VQModel.from_pretrained("""fusing/vqgan-dummy""" , output_loading_info=UpperCamelCase__)
self.assertIsNotNone(UpperCamelCase__)
self.assertEqual(len(loading_info["""missing_keys"""]) , 0)
model.to(UpperCamelCase__)
snake_case__ = model(**self.dummy_input)
assert image is not None, "Make sure output is not None"
def __magic_name__ ( self : Dict):
'''simple docstring'''
snake_case__ = VQModel.from_pretrained("""fusing/vqgan-dummy""")
model.to(UpperCamelCase__).eval()
torch.manual_seed(0)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0)
snake_case__ = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size)
snake_case__ = image.to(UpperCamelCase__)
with torch.no_grad():
snake_case__ = model(UpperCamelCase__).sample
snake_case__ = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
snake_case__ = torch.tensor([-0.01_53, -0.40_44, -0.18_80, -0.51_61, -0.24_18, -0.40_72, -0.16_12, -0.06_33, -0.01_43])
# fmt: on
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-3))
| 99
| 1
|
import os
# Precomputes a list of the 100 first triangular numbers
__a = [int(0.5 * n * (n + 1)) for n in range(1, 1_0_1)]
def a ( ):
'''simple docstring'''
lowercase_ = os.path.dirname(os.path.realpath(snake_case__ ) )
lowercase_ = os.path.join(snake_case__ , '''words.txt''' )
lowercase_ = ''''''
with open(snake_case__ ) as f:
lowercase_ = f.readline()
lowercase_ = [word.strip('''"''' ) for word in words.strip('''\r\n''' ).split(''',''' )]
lowercase_ = [
word
for word in [sum(ord(snake_case__ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(snake_case__ )
if __name__ == "__main__":
print(solution())
| 97
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_lowercase = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def _snake_case ( ):
A = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
A = get_sagemaker_input()
else:
A = get_cluster_input()
return config
def _snake_case ( snake_case__ : Any=None ):
if subparsers is not None:
A = subparsers.add_parser('config' , description=snake_case__ )
else:
A = argparse.ArgumentParser('Accelerate config command' , description=snake_case__ )
parser.add_argument(
'--config_file' , default=snake_case__ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=snake_case__ )
return parser
def _snake_case ( snake_case__ : Tuple ):
A = get_user_input()
if args.config_file is not None:
A = args.config_file
else:
if not os.path.isdir(snake_case__ ):
os.makedirs(snake_case__ )
A = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(snake_case__ )
else:
config.to_yaml_file(snake_case__ )
print(F'accelerate configuration saved at {config_file}' )
def _snake_case ( ):
A = config_command_parser()
A = parser.parse_args()
config_command(snake_case__ )
if __name__ == "__main__":
main()
| 91
| 0
|
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ):
A_ : Dict = XLMTokenizer
A_ : Optional[Any] = False
def a (self : int ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__snake_case = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
__snake_case = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
__snake_case = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(UpperCAmelCase__ ) )
def a (self : Optional[int] , a__ : Any ):
"""simple docstring"""
__snake_case = '''lower newer'''
__snake_case = '''lower newer'''
return input_text, output_text
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = XLMTokenizer(self.vocab_file , self.merges_file )
__snake_case = '''lower'''
__snake_case = ['''low''', '''er</w>''']
__snake_case = tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
__snake_case = tokens + ['''<unk>''']
__snake_case = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ )
@slow
def a (self : Any ):
"""simple docstring"""
__snake_case = XLMTokenizer.from_pretrained('''xlm-mlm-en-2048''' )
__snake_case = tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCAmelCase__ )
__snake_case = tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCAmelCase__ )
__snake_case = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ )
__snake_case = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 721
|
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
'''kwargs, expected''' , [
({'''num_shards''': 0, '''max_num_jobs''': 1}, []),
({'''num_shards''': 10, '''max_num_jobs''': 1}, [range(10 )]),
({'''num_shards''': 10, '''max_num_jobs''': 10}, [range(snake_case_ , i + 1 ) for i in range(10 )]),
({'''num_shards''': 1, '''max_num_jobs''': 10}, [range(1 )]),
({'''num_shards''': 10, '''max_num_jobs''': 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({'''num_shards''': 3, '''max_num_jobs''': 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def lowerCamelCase__ ( snake_case_ : Any , snake_case_ : int ) -> List[Any]:
__snake_case = _distribute_shards(**snake_case_ )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, max_num_jobs, expected''' , [
({'''foo''': 0}, 10, [{'''foo''': 0}]),
({'''shards''': [0, 1, 2, 3]}, 1, [{'''shards''': [0, 1, 2, 3]}]),
({'''shards''': [0, 1, 2, 3]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}, {'''shards''': [2]}, {'''shards''': [3]}]),
({'''shards''': [0, 1]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}]),
({'''shards''': [0, 1, 2, 3]}, 2, [{'''shards''': [0, 1]}, {'''shards''': [2, 3]}]),
] , )
def lowerCamelCase__ ( snake_case_ : Any , snake_case_ : Tuple , snake_case_ : List[Any] ) -> Any:
__snake_case = _split_gen_kwargs(snake_case_ , snake_case_ )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, expected''' , [
({'''foo''': 0}, 1),
({'''shards''': [0]}, 1),
({'''shards''': [0, 1, 2, 3]}, 4),
({'''shards''': [0, 1, 2, 3], '''foo''': 0}, 4),
({'''shards''': [0, 1, 2, 3], '''other''': (0, 1)}, 4),
({'''shards''': [0, 1, 2, 3], '''shards2''': [0, 1]}, RuntimeError),
] , )
def lowerCamelCase__ ( snake_case_ : List[str] , snake_case_ : str ) -> Tuple:
if expected is RuntimeError:
with pytest.raises(snake_case_ ):
_number_of_shards_in_gen_kwargs(snake_case_ )
else:
__snake_case = _number_of_shards_in_gen_kwargs(snake_case_ )
assert out == expected
| 388
| 0
|
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class __magic_name__ :
lowercase : Optional[Union[str, Path]] =None
lowercase : bool =False
lowercase : bool =False
lowercase : bool =False
lowercase : Optional[Dict] =None
lowercase : Optional[str] =None
lowercase : bool =False
lowercase : bool =False
lowercase : bool =False
lowercase : bool =True
lowercase : Optional[int] =None
lowercase : int =1
lowercase : Optional[Union[str, bool]] =None
lowercase : bool =False
lowercase : Optional[Dict] =None
lowercase : Optional[str] =None
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> "DownloadConfig":
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(UpperCamelCase__ ) for k, v in self.__dict__.items()} )
| 323
|
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class __magic_name__ ( A__, unittest.TestCase ):
lowercase : Dict =BertJapaneseTokenizer
lowercase : Union[str, Any] =False
lowercase : List[str] =True
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Optional[int]:
'''simple docstring'''
super().setUp()
UpperCAmelCase = [
"[UNK]",
"[CLS]",
"[SEP]",
"こんにちは",
"こん",
"にちは",
"ばんは",
"##こん",
"##にちは",
"##ばんは",
"世界",
"##世界",
"、",
"##、",
"。",
"##。",
]
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCamelCase__ : Dict ) -> str:
'''simple docstring'''
UpperCAmelCase = "こんにちは、世界。 \nこんばんは、世界。"
UpperCAmelCase = "こんにちは 、 世界 。 こんばんは 、 世界 。"
return input_text, output_text
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCamelCase__ : int ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.get_input_output_texts(UpperCamelCase__ )
UpperCAmelCase = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
UpperCAmelCase = tokenizer.decode(UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ )
return text, ids
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Tuple:
'''simple docstring'''
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> int:
'''simple docstring'''
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase = self.tokenizer_class(self.vocab_file )
UpperCAmelCase = tokenizer.tokenize("こんにちは、世界。\nこんばんは、世界。" )
self.assertListEqual(UpperCamelCase__ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Tuple:
'''simple docstring'''
UpperCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type="mecab" )
self.assertIsNotNone(UpperCamelCase__ )
UpperCAmelCase = "こんにちは、世界。\nこんばんは、世界。"
UpperCAmelCase = tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
UpperCAmelCase = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(UpperCamelCase__ , "wb" ) as handle:
pickle.dump(UpperCamelCase__ , UpperCamelCase__ )
with open(UpperCamelCase__ , "rb" ) as handle:
UpperCAmelCase = pickle.load(UpperCamelCase__ )
UpperCAmelCase = tokenizer_new.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
UpperCAmelCase = MecabTokenizer(mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> str:
'''simple docstring'''
try:
UpperCAmelCase = MecabTokenizer(mecab_dic="unidic_lite" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
try:
UpperCAmelCase = MecabTokenizer(mecab_dic="unidic" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase = MecabTokenizer(do_lower_case=UpperCamelCase__ , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iphone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> str:
'''simple docstring'''
try:
UpperCAmelCase = MecabTokenizer(
do_lower_case=UpperCamelCase__ , normalize_text=UpperCamelCase__ , mecab_option="-d /usr/local/lib/mecab/dic/jumandic" )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "\u3000", "。"] , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase = MecabTokenizer(normalize_text=UpperCamelCase__ , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", " ", "。"] , )
@require_sudachi
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Dict:
'''simple docstring'''
UpperCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type="sudachi" )
self.assertIsNotNone(UpperCamelCase__ )
UpperCAmelCase = "こんにちは、世界。\nこんばんは、世界。"
UpperCAmelCase = tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
UpperCAmelCase = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(UpperCamelCase__ , "wb" ) as handle:
pickle.dump(UpperCamelCase__ , UpperCamelCase__ )
with open(UpperCamelCase__ , "rb" ) as handle:
UpperCAmelCase = pickle.load(UpperCamelCase__ )
UpperCAmelCase = tokenizer_new.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
@require_sudachi
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase = SudachiTokenizer(sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
UpperCAmelCase = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="A" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国", "人", "参政", "権"] )
@require_sudachi
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="B" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人", "参政権"] )
@require_sudachi
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Tuple:
'''simple docstring'''
UpperCAmelCase = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="C" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人参政権"] )
@require_sudachi
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase = SudachiTokenizer(do_lower_case=UpperCamelCase__ , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iphone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase = SudachiTokenizer(normalize_text=UpperCamelCase__ , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", "\u3000", "。", " ", " "] , )
@require_sudachi
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase = SudachiTokenizer(trim_whitespace=UpperCamelCase__ , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type="jumanpp" )
self.assertIsNotNone(UpperCamelCase__ )
UpperCAmelCase = "こんにちは、世界。\nこんばんは、世界。"
UpperCAmelCase = tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
UpperCAmelCase = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(UpperCamelCase__ , "wb" ) as handle:
pickle.dump(UpperCamelCase__ , UpperCamelCase__ )
with open(UpperCamelCase__ , "rb" ) as handle:
UpperCAmelCase = pickle.load(UpperCamelCase__ )
UpperCAmelCase = tokenizer_new.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
@require_jumanpp
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> str:
'''simple docstring'''
UpperCAmelCase = JumanppTokenizer(do_lower_case=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iphone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase = JumanppTokenizer(normalize_text=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["ア", "ッ", "フ", "゚", "ル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
UpperCAmelCase = JumanppTokenizer(trim_whitespace=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "。"] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("ありがとうございますm(_ _)m見つけるのが大変です。" ) , ["ありがとう", "ございます", "m(_ _)m", "見つける", "の", "が", "大変です", "。"] , )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Any:
'''simple docstring'''
UpperCAmelCase = ["[UNK]", "[CLS]", "[SEP]", "こんにちは", "こん", "にちは", "ばんは", "##こん", "##にちは", "##ばんは"]
UpperCAmelCase = {}
for i, token in enumerate(UpperCamelCase__ ):
UpperCAmelCase = i
UpperCAmelCase = WordpieceTokenizer(vocab=UpperCamelCase__ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こんにちは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは" ) , ["こん", "##ばんは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは こんばんにちは こんにちは" ) , ["こん", "##ばんは", "[UNK]", "こんにちは"] )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
UpperCAmelCase = BertJapaneseTokenizer.from_pretrained("nlp-waseda/roberta-base-japanese-with-auto-jumanpp" )
UpperCAmelCase = tokenizer.subword_tokenizer
UpperCAmelCase = subword_tokenizer.tokenize("国境 の 長い トンネル を 抜ける と 雪国 であった 。" )
self.assertListEqual(UpperCamelCase__ , ["▁国境", "▁の", "▁長い", "▁トンネル", "▁を", "▁抜ける", "▁と", "▁雪", "国", "▁であった", "▁。"] )
UpperCAmelCase = subword_tokenizer.tokenize("こんばんは こんばん にち は こんにちは" )
self.assertListEqual(UpperCamelCase__ , ["▁こん", "ばん", "は", "▁こん", "ばん", "▁に", "ち", "▁は", "▁こんにちは"] )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> int:
'''simple docstring'''
UpperCAmelCase = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese" )
UpperCAmelCase = tokenizer.encode("ありがとう。" , add_special_tokens=UpperCamelCase__ )
UpperCAmelCase = tokenizer.encode("どういたしまして。" , add_special_tokens=UpperCamelCase__ )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __magic_name__ ( A__, unittest.TestCase ):
lowercase : Optional[int] =BertJapaneseTokenizer
lowercase : List[str] =False
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
super().setUp()
UpperCAmelCase = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE_ ( self : str , **UpperCamelCase__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="character" , **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCamelCase__ : List[str] ) -> Dict:
'''simple docstring'''
UpperCAmelCase = "こんにちは、世界。 \nこんばんは、世界。"
UpperCAmelCase = "こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"
return input_text, output_text
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Tuple:
'''simple docstring'''
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="character" )
UpperCAmelCase = tokenizer.tokenize("こんにちは、世界。 \nこんばんは、世界。" )
self.assertListEqual(
UpperCamelCase__ , ["こ", "ん", "に", "ち", "は", "、", "世", "界", "。", "こ", "ん", "ば", "ん", "は", "、", "世", "界", "。"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Dict:
'''simple docstring'''
UpperCAmelCase = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
UpperCAmelCase = {}
for i, token in enumerate(UpperCamelCase__ ):
UpperCAmelCase = i
UpperCAmelCase = CharacterTokenizer(vocab=UpperCamelCase__ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こ", "ん", "に", "ち", "は"] )
self.assertListEqual(tokenizer.tokenize("こんにちほ" ) , ["こ", "ん", "に", "ち", "[UNK]"] )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese-char" )
UpperCAmelCase = tokenizer.encode("ありがとう。" , add_special_tokens=UpperCamelCase__ )
UpperCAmelCase = tokenizer.encode("どういたしまして。" , add_special_tokens=UpperCamelCase__ )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __magic_name__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Any:
'''simple docstring'''
UpperCAmelCase = "cl-tohoku/bert-base-japanese"
UpperCAmelCase = AutoTokenizer.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
class __magic_name__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase = "cl-tohoku/bert-base-japanese"
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertTokenizer.from_pretrained(UpperCamelCase__ )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
UpperCAmelCase = "bert-base-cased"
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertJapaneseTokenizer.from_pretrained(UpperCamelCase__ )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
| 323
| 1
|
from decimal import Decimal, getcontext
from math import ceil, factorial
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> str:
if not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
raise TypeError("Undefined for non-integers" )
elif precision < 1:
raise ValueError("Undefined for non-natural numbers" )
lowercase__ : str = precision
lowercase__ : Any = ceil(precision / 14 )
lowercase__ : Tuple = 42_68_80 * Decimal(1_00_05 ).sqrt()
lowercase__ : Optional[int] = 1
lowercase__ : str = 13_59_14_09
lowercase__ : Optional[Any] = Decimal(SCREAMING_SNAKE_CASE_ )
for k in range(1 ,SCREAMING_SNAKE_CASE_ ):
lowercase__ : Optional[Any] = factorial(6 * k ) // (factorial(3 * k ) * factorial(SCREAMING_SNAKE_CASE_ ) ** 3)
linear_term += 5_45_14_01_34
exponential_term *= -26_25_37_41_26_40_76_80_00
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
__a : Optional[Any] = 5_0
print(f'The first {n} digits of pi is: {pi(n)}')
| 298
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__a : Tuple = logging.get_logger(__name__)
if is_vision_available():
import PIL
class UpperCAmelCase( snake_case_ ):
"""simple docstring"""
a : Optional[int] = ["""pixel_values"""]
def __init__( self , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = True , lowerCamelCase = 1 / 255 , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = True , **lowerCamelCase , ) -> None:
"""simple docstring"""
super().__init__(**lowerCamelCase )
lowercase__ : Optional[int] = size if size is not None else {"shortest_edge": 224}
lowercase__ : Optional[int] = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
lowercase__ : int = crop_size if crop_size is not None else {"height": 224, "width": 224}
lowercase__ : Tuple = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase , param_name="crop_size" )
lowercase__ : List[str] = do_resize
lowercase__ : Tuple = size
lowercase__ : Any = resample
lowercase__ : List[str] = do_center_crop
lowercase__ : Optional[int] = crop_size
lowercase__ : List[str] = do_rescale
lowercase__ : Union[str, Any] = rescale_factor
lowercase__ : Dict = do_normalize
lowercase__ : List[str] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowercase__ : Dict = image_std if image_std is not None else OPENAI_CLIP_STD
lowercase__ : str = do_convert_rgb
def __a ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = None , **lowerCamelCase , ) -> np.ndarray:
"""simple docstring"""
lowercase__ : Union[str, Any] = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
lowercase__ : Union[str, Any] = get_resize_output_image_size(lowerCamelCase , size=size["shortest_edge"] , default_to_square=lowerCamelCase )
return resize(lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def __a ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ) -> np.ndarray:
"""simple docstring"""
lowercase__ : List[Any] = get_size_dict(lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(lowerCamelCase , size=(size["height"], size["width"]) , data_format=lowerCamelCase , **lowerCamelCase )
def __a ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ) -> Optional[int]:
"""simple docstring"""
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def __a ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ) -> np.ndarray:
"""simple docstring"""
return normalize(lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def __a ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ) -> PIL.Image.Image:
"""simple docstring"""
lowercase__ : Optional[int] = do_resize if do_resize is not None else self.do_resize
lowercase__ : List[Any] = size if size is not None else self.size
lowercase__ : Optional[Any] = get_size_dict(lowerCamelCase , param_name="size" , default_to_square=lowerCamelCase )
lowercase__ : Union[str, Any] = resample if resample is not None else self.resample
lowercase__ : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase__ : int = crop_size if crop_size is not None else self.crop_size
lowercase__ : List[str] = get_size_dict(lowerCamelCase , param_name="crop_size" , default_to_square=lowerCamelCase )
lowercase__ : int = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
lowercase__ : int = image_std if image_std is not None else self.image_std
lowercase__ : int = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase__ : int = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase__ : int = [convert_to_rgb(lowerCamelCase ) for image in images]
# All transformations expect numpy arrays.
lowercase__ : Optional[Any] = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
lowercase__ : Optional[Any] = [self.resize(image=lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_center_crop:
lowercase__ : Optional[Any] = [self.center_crop(image=lowerCamelCase , size=lowerCamelCase ) for image in images]
if do_rescale:
lowercase__ : Any = [self.rescale(image=lowerCamelCase , scale=lowerCamelCase ) for image in images]
if do_normalize:
lowercase__ : Dict = [self.normalize(image=lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase ) for image in images]
lowercase__ : int = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
lowercase__ : Optional[int] = {"pixel_values": images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 298
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _A ( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : List[Any] = StableDiffusionInstructPixaPixPipeline
lowerCamelCase : int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width', 'cross_attention_kwargs'}
lowerCamelCase : Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCamelCase : Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _a ( self : Tuple ) -> int:
torch.manual_seed(0 )
__UpperCAmelCase =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
__UpperCAmelCase =PNDMScheduler(skip_prk_steps=_lowerCAmelCase )
torch.manual_seed(0 )
__UpperCAmelCase =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
__UpperCAmelCase =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__UpperCAmelCase =CLIPTextModel(_lowerCAmelCase )
__UpperCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__UpperCAmelCase ={
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple=0 ) -> Optional[int]:
__UpperCAmelCase =floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
__UpperCAmelCase =image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCAmelCase =Image.fromarray(np.uinta(_lowerCAmelCase ) ).convert("""RGB""" )
if str(_lowerCAmelCase ).startswith("""mps""" ):
__UpperCAmelCase =torch.manual_seed(_lowerCAmelCase )
else:
__UpperCAmelCase =torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
__UpperCAmelCase ={
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""image_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def _a ( self : Optional[int] ) -> str:
__UpperCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase =self.get_dummy_components()
__UpperCAmelCase =StableDiffusionInstructPixaPixPipeline(**_lowerCAmelCase )
__UpperCAmelCase =sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__UpperCAmelCase =self.get_dummy_inputs(_lowerCAmelCase )
__UpperCAmelCase =sd_pipe(**_lowerCAmelCase ).images
__UpperCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__UpperCAmelCase =np.array([0.7_526, 0.3_750, 0.4_547, 0.6_117, 0.5_866, 0.5_016, 0.4_327, 0.5_642, 0.4_815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self : Any ) -> Union[str, Any]:
__UpperCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase =self.get_dummy_components()
__UpperCAmelCase =StableDiffusionInstructPixaPixPipeline(**_lowerCAmelCase )
__UpperCAmelCase =sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__UpperCAmelCase =self.get_dummy_inputs(_lowerCAmelCase )
__UpperCAmelCase ="""french fries"""
__UpperCAmelCase =sd_pipe(**_lowerCAmelCase , negative_prompt=_lowerCAmelCase )
__UpperCAmelCase =output.images
__UpperCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__UpperCAmelCase =np.array([0.7_511, 0.3_642, 0.4_553, 0.6_236, 0.5_797, 0.5_013, 0.4_343, 0.5_611, 0.4_831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self : Optional[int] ) -> Tuple:
__UpperCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase =self.get_dummy_components()
__UpperCAmelCase =StableDiffusionInstructPixaPixPipeline(**_lowerCAmelCase )
__UpperCAmelCase =sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__UpperCAmelCase =self.get_dummy_inputs(_lowerCAmelCase )
__UpperCAmelCase =[inputs["""prompt"""]] * 2
__UpperCAmelCase =np.array(inputs["""image"""] ).astype(np.floataa ) / 255.0
__UpperCAmelCase =torch.from_numpy(_lowerCAmelCase ).unsqueeze(0 ).to(_lowerCAmelCase )
__UpperCAmelCase =image / 2 + 0.5
__UpperCAmelCase =image.permute(0 , 3 , 1 , 2 )
__UpperCAmelCase =image.repeat(2 , 1 , 1 , 1 )
__UpperCAmelCase =sd_pipe(**_lowerCAmelCase ).images
__UpperCAmelCase =image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
__UpperCAmelCase =np.array([0.5_812, 0.5_748, 0.5_222, 0.5_908, 0.5_695, 0.7_174, 0.6_804, 0.5_523, 0.5_579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self : Union[str, Any] ) -> str:
__UpperCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase =self.get_dummy_components()
__UpperCAmelCase =EulerAncestralDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
__UpperCAmelCase =StableDiffusionInstructPixaPixPipeline(**_lowerCAmelCase )
__UpperCAmelCase =sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__UpperCAmelCase =self.get_dummy_inputs(_lowerCAmelCase )
__UpperCAmelCase =sd_pipe(**_lowerCAmelCase ).images
__UpperCAmelCase =image[0, -3:, -3:, -1]
__UpperCAmelCase =[round(_lowerCAmelCase , 4 ) for x in image_slice.flatten().tolist()]
print(""",""".join([str(_lowerCAmelCase ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
__UpperCAmelCase =np.array([0.7_417, 0.3_842, 0.4_732, 0.5_776, 0.5_891, 0.5_139, 0.4_052, 0.5_673, 0.4_986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self : Optional[int] ) -> Optional[Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def _a ( self : str ) -> Dict:
__UpperCAmelCase =self.get_dummy_components()
__UpperCAmelCase =StableDiffusionInstructPixaPixPipeline(**_lowerCAmelCase )
__UpperCAmelCase =VaeImageProcessor(do_resize=_lowerCAmelCase , do_normalize=_lowerCAmelCase )
__UpperCAmelCase =pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__UpperCAmelCase =pipe(**self.get_dummy_inputs_by_type(_lowerCAmelCase , input_image_type="""pt""" ) )[0]
__UpperCAmelCase =components["""vae"""]
__UpperCAmelCase =self.get_dummy_inputs_by_type(_lowerCAmelCase , input_image_type="""pt""" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
__UpperCAmelCase =vae.encode(inputs[image_param] ).latent_dist.mode()
__UpperCAmelCase =pipe(**_lowerCAmelCase )[0]
__UpperCAmelCase =np.abs(out - out_latents_inputs ).max()
self.assertLess(_lowerCAmelCase , 1e-4 , """passing latents as image input generate different result from passing image""" )
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Dict ) -> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : Tuple=0 ) -> Tuple:
__UpperCAmelCase =torch.manual_seed(_lowerCAmelCase )
__UpperCAmelCase =load_image(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg""" )
__UpperCAmelCase ={
"""prompt""": """turn him into a cyborg""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""image_guidance_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def _a ( self : Optional[Any] ) -> int:
__UpperCAmelCase =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=_lowerCAmelCase )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
__UpperCAmelCase =self.get_inputs()
__UpperCAmelCase =pipe(**_lowerCAmelCase ).images
__UpperCAmelCase =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__UpperCAmelCase =np.array([0.5_902, 0.6_015, 0.6_027, 0.5_983, 0.6_092, 0.6_061, 0.5_765, 0.5_785, 0.5_555] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _a ( self : Optional[int] ) -> Dict:
__UpperCAmelCase =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=_lowerCAmelCase )
__UpperCAmelCase =LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
__UpperCAmelCase =self.get_inputs()
__UpperCAmelCase =pipe(**_lowerCAmelCase ).images
__UpperCAmelCase =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__UpperCAmelCase =np.array([0.6_578, 0.6_817, 0.6_972, 0.6_761, 0.6_856, 0.6_916, 0.6_428, 0.6_516, 0.6_301] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _a ( self : Dict ) -> str:
__UpperCAmelCase =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=_lowerCAmelCase )
__UpperCAmelCase =DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
__UpperCAmelCase =self.get_inputs()
__UpperCAmelCase =pipe(**_lowerCAmelCase ).images
__UpperCAmelCase =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__UpperCAmelCase =np.array([0.3_828, 0.3_834, 0.3_818, 0.3_792, 0.3_865, 0.3_752, 0.3_792, 0.3_847, 0.3_753] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _a ( self : Dict ) -> Optional[int]:
__UpperCAmelCase =0
def callback_fn(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : torch.FloatTensor ) -> None:
__UpperCAmelCase =True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
__UpperCAmelCase =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
__UpperCAmelCase =latents[0, -3:, -3:, -1]
__UpperCAmelCase =np.array([-0.2_463, -0.4_644, -0.9_756, 1.5_176, 1.4_414, 0.7_866, 0.9_897, 0.8_521, 0.7_983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
__UpperCAmelCase =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
__UpperCAmelCase =latents[0, -3:, -3:, -1]
__UpperCAmelCase =np.array([-0.2_644, -0.4_626, -0.9_653, 1.5_176, 1.4_551, 0.7_686, 0.9_805, 0.8_452, 0.8_115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
__UpperCAmelCase =False
__UpperCAmelCase =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=_lowerCAmelCase , torch_dtype=torch.floataa )
__UpperCAmelCase =pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
__UpperCAmelCase =self.get_inputs()
pipe(**_lowerCAmelCase , callback=_lowerCAmelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def _a ( self : Union[str, Any] ) -> Dict:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCAmelCase =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=_lowerCAmelCase , torch_dtype=torch.floataa )
__UpperCAmelCase =pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase =self.get_inputs()
__UpperCAmelCase =pipe(**_lowerCAmelCase )
__UpperCAmelCase =torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def _a ( self : Dict ) -> str:
__UpperCAmelCase =self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
__UpperCAmelCase =inputs["""image"""].resize((504, 504) )
__UpperCAmelCase ="""timbrooks/instruct-pix2pix"""
__UpperCAmelCase =StableDiffusionInstructPixaPixPipeline.from_pretrained(
_lowerCAmelCase , safety_checker=_lowerCAmelCase , )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
__UpperCAmelCase =pipe(**_lowerCAmelCase )
__UpperCAmelCase =output.images[0]
__UpperCAmelCase =image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
__UpperCAmelCase =np.array([0.2_726, 0.2_529, 0.2_664, 0.2_655, 0.2_641, 0.2_642, 0.2_591, 0.2_649, 0.2_590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 68
|
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError("The length of profit and weight must be same." )
if max_weight <= 0:
raise ValueError("max_weight must greater than zero." )
if any(p < 0 for p in profit ):
raise ValueError("Profit can not be negative." )
if any(w < 0 for w in weight ):
raise ValueError("Weight can not be negative." )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
UpperCAmelCase_ =[p / w for p, w in zip(lowercase__ , lowercase__ )]
# Creating a copy of the list and sorting profit/weight in ascending order
UpperCAmelCase_ =sorted(lowercase__ )
# declaring useful variables
UpperCAmelCase_ =len(lowercase__ )
UpperCAmelCase_ =0
UpperCAmelCase_ =0
UpperCAmelCase_ =0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
UpperCAmelCase_ =sorted_profit_by_weight[length - i - 1]
UpperCAmelCase_ =profit_by_weight.index(lowercase__ )
UpperCAmelCase_ =-1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
"""Input profits, weights, and then max_weight (all positive ints) separated by """
"""spaces."""
)
__lowercase : List[str] =[int(x) for x in input("""Input profits separated by spaces: """).split()]
__lowercase : Union[str, Any] =[int(x) for x in input("""Input weights separated by spaces: """).split()]
__lowercase : Tuple =int(input("""Max weight allowed: """))
# Function Call
calc_profit(profit, weight, max_weight)
| 54
| 0
|
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class a ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase__ ( self : Any ) -> Any:
__UpperCAmelCase : List[str] = FlaxXLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
__UpperCAmelCase : Dict = AutoTokenizer.from_pretrained('''xlm-roberta-base''' )
__UpperCAmelCase : Optional[int] = '''The dog is cute and lives in the garden house'''
__UpperCAmelCase : Dict = jnp.array([tokenizer.encode(_lowercase )] )
__UpperCAmelCase : Dict = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
__UpperCAmelCase : Optional[Any] = jnp.array(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
__UpperCAmelCase : int = model(_lowercase )['''last_hidden_state''']
self.assertEqual(output.shape , _lowercase )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , _lowercase , atol=1E-3 ) )
| 703
|
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
__UpperCAmelCase :Optional[int] = ["bert-base-uncased", "bert-base-cased"]
__UpperCAmelCase :str = "hf-internal-testing/tiny-bert-tf-only"
if is_tf_available():
class a ( tf.keras.Model ):
"""simple docstring"""
def __init__( self : List[str] , snake_case : List[str] ) -> str:
super().__init__()
__UpperCAmelCase : List[str] = tokenizer
__UpperCAmelCase : List[Any] = AutoConfig.from_pretrained(snake_case )
__UpperCAmelCase : int = TFAutoModel.from_config(snake_case )
def lowerCamelCase__ ( self : List[Any] , snake_case : Optional[int] ) -> Optional[Any]:
__UpperCAmelCase : List[Any] = self.tokenizer(snake_case )
__UpperCAmelCase : Optional[Any] = self.bert(**snake_case )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class a ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self : Optional[int] ) -> List[str]:
super().setUp()
__UpperCAmelCase : Tuple = [
BertTokenizer.from_pretrained(snake_case ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
__UpperCAmelCase : Any = [TFBertTokenizer.from_pretrained(snake_case ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(snake_case , use_fast_bert_tokenizer=snake_case )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__UpperCAmelCase : Optional[int] = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
__UpperCAmelCase : Optional[int] = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def lowerCamelCase__ ( self : Optional[int] ) -> Optional[Any]:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
__UpperCAmelCase : Any = tokenizer(snake_case , return_tensors='''tf''' , padding='''longest''' )
__UpperCAmelCase : Optional[int] = tf_tokenizer(snake_case )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def lowerCamelCase__ ( self : List[Any] ) -> str:
for tf_tokenizer in self.tf_tokenizers:
__UpperCAmelCase : Any = tf_tokenizer(self.paired_sentences )
__UpperCAmelCase : Union[str, Any] = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def lowerCamelCase__ ( self : str ) -> Union[str, Any]:
for tf_tokenizer in self.tf_tokenizers:
__UpperCAmelCase : Optional[int] = tf.function(snake_case )
for test_inputs in (self.test_sentences, self.paired_sentences):
__UpperCAmelCase : int = tf.constant(snake_case )
__UpperCAmelCase : Tuple = compiled_tokenizer(snake_case )
__UpperCAmelCase : Optional[int] = tf_tokenizer(snake_case )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def lowerCamelCase__ ( self : str ) -> str:
for tf_tokenizer in self.tf_tokenizers:
__UpperCAmelCase : List[Any] = ModelToSave(tokenizer=snake_case )
__UpperCAmelCase : Union[str, Any] = tf.convert_to_tensor(self.test_sentences )
__UpperCAmelCase : Tuple = model(snake_case ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__UpperCAmelCase : Any = Path(snake_case ) / '''saved.model'''
model.save(snake_case )
__UpperCAmelCase : str = tf.keras.models.load_model(snake_case )
__UpperCAmelCase : Optional[int] = loaded_model(snake_case )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 )
| 266
| 0
|
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
A : int = [
'kernels/rwkv/wkv_cuda.cu',
'kernels/rwkv/wkv_op.cpp',
'kernels/deformable_detr/ms_deform_attn.h',
'kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh',
'models/graphormer/algos_graphormer.pyx',
]
def _lowerCAmelCase ( _lowerCAmelCase ) -> Dict:
'''simple docstring'''
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
A : str = argparse.ArgumentParser()
parser.add_argument('--check_lib', action='store_true', help='Whether to check the build or the actual package.')
A : Tuple = parser.parse_args()
if args.check_lib:
A : List[Any] = importlib.import_module('transformers')
A : int = Path(transformers_module.__file__).parent
else:
A : Dict = Path.cwd() / 'build/lib/transformers'
if not test_custom_files_are_present(transformers_path):
raise ValueError('The built release does not contain the custom files. Fix this before going further!')
| 371
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class UpperCamelCase( unittest.TestCase ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any]=7 , SCREAMING_SNAKE_CASE : Dict=3 , SCREAMING_SNAKE_CASE : Any=3_0 , SCREAMING_SNAKE_CASE : List[str]=4_0_0 , SCREAMING_SNAKE_CASE : Any=True , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : int=0.9 , SCREAMING_SNAKE_CASE : Tuple=None , SCREAMING_SNAKE_CASE : int=True , SCREAMING_SNAKE_CASE : Union[str, Any]=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE : Optional[Any]=[0.5, 0.5, 0.5] , ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = size if size is not None else {"shortest_edge": 3_0}
__snake_case = crop_size if crop_size is not None else {"height": 3_0, "width": 3_0}
__snake_case = parent
__snake_case = batch_size
__snake_case = num_channels
__snake_case = min_resolution
__snake_case = max_resolution
__snake_case = do_resize_and_center_crop
__snake_case = size
__snake_case = crop_pct
__snake_case = crop_size
__snake_case = do_normalize
__snake_case = image_mean
__snake_case = image_std
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Any:
'''simple docstring'''
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class UpperCamelCase( _a , unittest.TestCase ):
snake_case_ : List[str] = PoolFormerImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
__snake_case = PoolFormerImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Dict:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> List[str]:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_resize_and_center_crop" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "size" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "crop_pct" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_normalize" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "image_mean" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "image_std" ) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 3_0} )
self.assertEqual(image_processor.crop_size , {"height": 3_0, "width": 3_0} )
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2} )
self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4} )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__snake_case = image_processing(SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE , numpify=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__snake_case = image_processing(SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE , torchify=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__snake_case = image_processing(SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 371
| 1
|
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 714
|
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
lowerCamelCase =logging.get_logger(__name__)
@add_end_docstrings(UpperCamelCase_ )
class _lowerCamelCase ( UpperCamelCase_ ):
"""simple docstring"""
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
requires_backends(self , '''decord''' )
self.check_model_type(__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : List[str] = {}
if frame_sampling_rate is not None:
UpperCamelCase__ : Tuple = frame_sampling_rate
if num_frames is not None:
UpperCamelCase__ : str = num_frames
UpperCamelCase__ : List[str] = {}
if top_k is not None:
UpperCamelCase__ : List[Any] = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=1 ) -> Optional[Any]:
"""simple docstring"""
if num_frames is None:
UpperCamelCase__ : Optional[Any] = self.model.config.num_frames
if video.startswith('''http://''' ) or video.startswith('''https://''' ):
UpperCamelCase__ : str = BytesIO(requests.get(__SCREAMING_SNAKE_CASE ).content )
UpperCamelCase__ : Tuple = VideoReader(__SCREAMING_SNAKE_CASE )
videoreader.seek(0 )
UpperCamelCase__ : Optional[int] = 0
UpperCamelCase__ : str = num_frames * frame_sampling_rate - 1
UpperCamelCase__ : List[Any] = np.linspace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num=__SCREAMING_SNAKE_CASE , dtype=np.intaa )
UpperCamelCase__ : str = videoreader.get_batch(__SCREAMING_SNAKE_CASE ).asnumpy()
UpperCamelCase__ : Tuple = list(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = self.image_processor(__SCREAMING_SNAKE_CASE , return_tensors=self.framework )
return model_inputs
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Tuple = self.model(**__SCREAMING_SNAKE_CASE )
return model_outputs
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=5 ) -> str:
"""simple docstring"""
if top_k > self.model.config.num_labels:
UpperCamelCase__ : Dict = self.model.config.num_labels
if self.framework == "pt":
UpperCamelCase__ : Any = model_outputs.logits.softmax(-1 )[0]
UpperCamelCase__ ,UpperCamelCase__ : List[str] = probs.topk(__SCREAMING_SNAKE_CASE )
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
UpperCamelCase__ : Any = scores.tolist()
UpperCamelCase__ : Any = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )]
| 462
| 0
|
'''simple docstring'''
from typing import Any
def _lowerCAmelCase ( lowercase : Optional[Any] ) ->list[Any]:
"""simple docstring"""
if not input_list:
return []
lowercase__ = [input_list.count(_lowerCAmelCase ) for value in input_list]
lowercase__ = max(_lowerCAmelCase ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(_lowerCAmelCase ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 161
|
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
__lowerCamelCase : Tuple = logging.get_logger(__name__)
__lowerCamelCase : int = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
__lowerCamelCase : str = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
__lowerCamelCase : List[str] = {
"""allenai/led-base-16384""": 1_6384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def A_ ( ) -> List[str]:
UpperCamelCase : Optional[int] = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
UpperCamelCase : List[Any] = bs[:]
UpperCamelCase : List[Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowerCAmelCase )
cs.append(2**8 + n )
n += 1
UpperCamelCase : Union[str, Any] = [chr(_lowerCAmelCase ) for n in cs]
return dict(zip(_lowerCAmelCase , _lowerCAmelCase ) )
def A_ ( _lowerCAmelCase ) -> Optional[Any]:
UpperCamelCase : int = set()
UpperCamelCase : List[str] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCamelCase : List[str] = char
return pairs
class A__ ( __snake_case ):
_UpperCAmelCase :Dict = VOCAB_FILES_NAMES
_UpperCAmelCase :List[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase :List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase :Union[str, Any] = ['input_ids', 'attention_mask']
def __init__( self , A_ , A_ , A_="replace" , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_=False , **A_ , ):
'''simple docstring'''
UpperCamelCase : int = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else bos_token
UpperCamelCase : Tuple = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else eos_token
UpperCamelCase : Optional[Any] = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else sep_token
UpperCamelCase : Union[str, Any] = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else cls_token
UpperCamelCase : Dict = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else unk_token
UpperCamelCase : int = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase : Optional[Any] = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
super().__init__(
errors=A_ , bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , add_prefix_space=A_ , **A_ , )
with open(A_ , encoding="utf-8" ) as vocab_handle:
UpperCamelCase : int = json.load(A_ )
UpperCamelCase : Dict = {v: k for k, v in self.encoder.items()}
UpperCamelCase : List[str] = errors # how to handle errors in decoding
UpperCamelCase : Optional[Any] = bytes_to_unicode()
UpperCamelCase : Tuple = {v: k for k, v in self.byte_encoder.items()}
with open(A_ , encoding="utf-8" ) as merges_handle:
UpperCamelCase : str = merges_handle.read().split("\n" )[1:-1]
UpperCamelCase : Optional[Any] = [tuple(merge.split() ) for merge in bpe_merges]
UpperCamelCase : Any = dict(zip(A_ , range(len(A_ ) ) ) )
UpperCamelCase : List[str] = {}
UpperCamelCase : Any = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCamelCase : Any = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def __UpperCamelCase( self ):
'''simple docstring'''
return len(self.encoder )
def __UpperCamelCase( self ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
UpperCamelCase : Union[str, Any] = tuple(A_ )
UpperCamelCase : str = get_pairs(A_ )
if not pairs:
return token
while True:
UpperCamelCase : Any = min(A_ , key=lambda A_ : self.bpe_ranks.get(A_ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCamelCase , UpperCamelCase : Union[str, Any] = bigram
UpperCamelCase : List[Any] = []
UpperCamelCase : Union[str, Any] = 0
while i < len(A_ ):
try:
UpperCamelCase : Union[str, Any] = word.index(A_ , A_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCamelCase : Union[str, Any] = j
if word[i] == first and i < len(A_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCamelCase : Tuple = tuple(A_ )
UpperCamelCase : Union[str, Any] = new_word
if len(A_ ) == 1:
break
else:
UpperCamelCase : int = get_pairs(A_ )
UpperCamelCase : Union[str, Any] = " ".join(A_ )
UpperCamelCase : Any = word
return word
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = []
for token in re.findall(self.pat , A_ ):
UpperCamelCase : Optional[Any] = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(A_ ).split(" " ) )
return bpe_tokens
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
return self.encoder.get(A_ , self.encoder.get(self.unk_token ) )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
return self.decoder.get(A_ )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : List[Any] = "".join(A_ )
UpperCamelCase : List[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def __UpperCamelCase( self , A_ , A_ = None ):
'''simple docstring'''
if not os.path.isdir(A_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase : List[str] = os.path.join(
A_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase : Any = os.path.join(
A_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(A_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=A_ , ensure_ascii=A_ ) + "\n" )
UpperCamelCase : str = 0
with open(A_ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A_ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
UpperCamelCase : Any = token_index
writer.write(" ".join(A_ ) + "\n" )
index += 1
return vocab_file, merge_file
def __UpperCamelCase( self , A_ , A_ = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase : List[str] = [self.cls_token_id]
UpperCamelCase : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCamelCase( self , A_ , A_ = None , A_ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is None:
return [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1]
def __UpperCamelCase( self , A_ , A_ = None ):
'''simple docstring'''
UpperCamelCase : Any = [self.sep_token_id]
UpperCamelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase( self , A_ , A_=False , **A_ ):
'''simple docstring'''
UpperCamelCase : Tuple = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(A_ ) > 0 and not text[0].isspace()):
UpperCamelCase : List[str] = " " + text
return (text, kwargs)
def __UpperCamelCase( self , A_ , A_ = None , A_ = PaddingStrategy.DO_NOT_PAD , A_ = None , A_ = None , ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = super()._pad(
encoded_inputs=A_ , max_length=A_ , padding_strategy=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , )
# Load from model defaults
if return_attention_mask is None:
UpperCamelCase : Union[str, Any] = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
UpperCamelCase : int = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
UpperCamelCase : str = len(encoded_inputs["global_attention_mask"] ) != len(A_ )
if needs_to_be_padded:
UpperCamelCase : Optional[int] = len(A_ ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
UpperCamelCase : Union[str, Any] = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
UpperCamelCase : Tuple = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 629
| 0
|
def A__ ( __A , __A , __A , __A , __A ):
'''simple docstring'''
if index == number_of_items:
return 0
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : str = 0
_lowerCamelCase : List[Any] = knapsack(__A , __A , __A , __A , index + 1 )
if weights[index] <= max_weight:
_lowerCamelCase : Optional[int] = values[index] + knapsack(
__A , __A , __A , max_weight - weights[index] , index + 1 )
return max(__A , __A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=__A , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=__A , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=__A )
return parser.parse_args()
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : List[str] = parse_args()
# Import training_script as a module.
_lowerCamelCase : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_lowerCamelCase : Optional[Any] = script_fpath.stem
_lowerCamelCase : Dict = importlib.import_module(__A )
# Patch sys.argv
_lowerCamelCase : Union[str, Any] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 15
| 1
|
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
SCREAMING_SNAKE_CASE__ = True
for i in range(UpperCamelCase__ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
SCREAMING_SNAKE_CASE__ = True
if a[i].islower():
SCREAMING_SNAKE_CASE__ = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6
|
"""simple docstring"""
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
_snake_case = logging.getLogger(__name__)
_snake_case = "Hello world! cécé herlolip"
_snake_case = namedtuple(
"BertAbsConfig",
[
"temp_dir",
"large",
"use_bert_emb",
"finetune_bert",
"encoder",
"share_emb",
"max_pos",
"enc_layers",
"enc_hidden_size",
"enc_heads",
"enc_ff_size",
"enc_dropout",
"dec_layers",
"dec_hidden_size",
"dec_heads",
"dec_ff_size",
"dec_dropout",
],
)
def snake_case ( _a: Union[str, Any] , _a: Dict )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = BertAbsConfig(
temp_dir='.' , finetune_bert=_a , large=_a , share_emb=_a , use_bert_emb=_a , encoder='bert' , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
lowerCamelCase__ = torch.load(_a , lambda _a , _a : storage )
lowerCamelCase__ = AbsSummarizer(_a , torch.device('cpu' ) , _a )
original.eval()
lowerCamelCase__ = BertAbsSummarizer(_a , torch.device('cpu' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('convert the model' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('Make sure that the models\' outputs are identical' )
lowerCamelCase__ = BertTokenizer.from_pretrained('bert-base-uncased' )
# prepare the model inputs
lowerCamelCase__ = tokenizer.encode('This is sample éàalj\'-.' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_a )) )
lowerCamelCase__ = torch.tensor(_a ).unsqueeze(0 )
lowerCamelCase__ = tokenizer.encode('This is sample 3 éàalj\'-.' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_a )) )
lowerCamelCase__ = torch.tensor(_a ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
lowerCamelCase__ = encoder_input_ids
lowerCamelCase__ = decoder_input_ids
lowerCamelCase__ = lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = lowerCamelCase__ = None
lowerCamelCase__ = lowerCamelCase__ = None
lowerCamelCase__ = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
lowerCamelCase__ = original(_a , _a , _a , _a , _a , _a , _a )[0]
lowerCamelCase__ = original.generator(_a )
lowerCamelCase__ = new_model(
_a , _a , _a , _a , _a )[0]
lowerCamelCase__ = new_model.generator(_a )
lowerCamelCase__ = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(_a ) )
lowerCamelCase__ = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(_a ) )
lowerCamelCase__ = torch.allclose(_a , _a , atol=1E-3 )
if are_identical:
logging.info('all weights are equal up to 1e-3' )
else:
raise ValueError('the weights are different. The new model is likely different from the original one.' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('saving the model\'s state dictionary' )
torch.save(
new_model.state_dict() , './bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin' )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"--bertabs_checkpoint_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model.",
)
_snake_case = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 510
| 0
|
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import List, Optional
class __a ( __SCREAMING_SNAKE_CASE ):
def __init__( self : List[Any] ):
'''simple docstring'''
self.test()
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = False
while not completed:
if counter == 1:
self.reset()
__SCREAMING_SNAKE_CASE = self.advance()
if not self.does_advance(_a ):
raise Exception(
"""Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.""" )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.update(_a )
counter += 1
if counter > 1_0000:
raise Exception("""update() does not fulfill the constraint.""" )
if self.remaining() != 0:
raise Exception("""Custom Constraint is not defined correctly.""" )
@abstractmethod
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def UpperCAmelCase__ ( self : List[str] ,lowerCamelCase : Tuple ):
'''simple docstring'''
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : int ):
'''simple docstring'''
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : Dict=False ):
'''simple docstring'''
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class __a ( __SCREAMING_SNAKE_CASE ):
def __init__( self : str ,lowerCamelCase : List[str] ):
'''simple docstring'''
super(_a ,self ).__init__()
if not isinstance(_a ,_a ) or len(_a ) == 0:
raise ValueError(f"""`token_ids` has to be a non-empty list, but is {token_ids}.""" )
if any((not isinstance(_a ,_a ) or token_id < 0) for token_id in token_ids ):
raise ValueError(f"""Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.""" )
__SCREAMING_SNAKE_CASE = token_ids
__SCREAMING_SNAKE_CASE = len(self.token_ids )
__SCREAMING_SNAKE_CASE = -1 # the index of the currently fulfilled step
__SCREAMING_SNAKE_CASE = False
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def UpperCAmelCase__ ( self : int ,lowerCamelCase : Dict ):
'''simple docstring'''
if not isinstance(_a ,_a ):
raise ValueError(f"""`token_id` has to be an `int`, but is {token_id} of type {type(_a )}""" )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def UpperCAmelCase__ ( self : str ,lowerCamelCase : Any ):
'''simple docstring'''
if not isinstance(_a ,_a ):
raise ValueError(f"""`token_id` has to be an `int`, but is {token_id} of type {type(_a )}""" )
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
if self.does_advance(_a ):
self.fulfilled_idx += 1
__SCREAMING_SNAKE_CASE = True
if self.fulfilled_idx == (self.seqlen - 1):
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = completed
else:
# failed to make progress.
__SCREAMING_SNAKE_CASE = True
self.reset()
return stepped, completed, reset
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = 0
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
return self.seqlen - (self.fulfilled_idx + 1)
def UpperCAmelCase__ ( self : List[str] ,lowerCamelCase : Optional[Any]=False ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = PhrasalConstraint(self.token_ids )
if stateful:
__SCREAMING_SNAKE_CASE = self.seqlen
__SCREAMING_SNAKE_CASE = self.fulfilled_idx
__SCREAMING_SNAKE_CASE = self.completed
return new_constraint
class __a :
def __init__( self : Union[str, Any] ,lowerCamelCase : int ,lowerCamelCase : Any=True ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = max([len(_a ) for one in nested_token_ids] )
__SCREAMING_SNAKE_CASE = {}
for token_ids in nested_token_ids:
__SCREAMING_SNAKE_CASE = root
for tidx, token_id in enumerate(_a ):
if token_id not in level:
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = level[token_id]
if no_subsets and self.has_subsets(_a ,_a ):
raise ValueError(
"""Each list in `nested_token_ids` can\'t be a complete subset of another list, but is"""
f""" {nested_token_ids}.""" )
__SCREAMING_SNAKE_CASE = root
def UpperCAmelCase__ ( self : int ,lowerCamelCase : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.trie
for current_token in current_seq:
__SCREAMING_SNAKE_CASE = start[current_token]
__SCREAMING_SNAKE_CASE = list(start.keys() )
return next_tokens
def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.next_tokens(_a )
return len(_a ) == 0
def UpperCAmelCase__ ( self : List[str] ,lowerCamelCase : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = list(root.values() )
if len(_a ) == 0:
return 1
else:
return sum([self.count_leaves(_a ) for nn in next_nodes] )
def UpperCAmelCase__ ( self : str ,lowerCamelCase : Any ,lowerCamelCase : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.count_leaves(_a )
return len(_a ) != leaf_count
class __a ( __SCREAMING_SNAKE_CASE ):
def __init__( self : Dict ,lowerCamelCase : List[str] ):
'''simple docstring'''
super(_a ,self ).__init__()
if not isinstance(_a ,_a ) or len(_a ) == 0:
raise ValueError(f"""`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.""" )
if any(not isinstance(_a ,_a ) for token_ids in nested_token_ids ):
raise ValueError(f"""`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.""" )
if any(
any((not isinstance(_a ,_a ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
f"""Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.""" )
__SCREAMING_SNAKE_CASE = DisjunctiveTrie(_a )
__SCREAMING_SNAKE_CASE = nested_token_ids
__SCREAMING_SNAKE_CASE = self.trie.max_height
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = False
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.trie.next_tokens(self.current_seq )
if len(_a ) == 0:
return None
else:
return token_list
def UpperCAmelCase__ ( self : Optional[Any] ,lowerCamelCase : int ):
'''simple docstring'''
if not isinstance(_a ,_a ):
raise ValueError(f"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(_a )}""" )
__SCREAMING_SNAKE_CASE = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def UpperCAmelCase__ ( self : Optional[Any] ,lowerCamelCase : int ):
'''simple docstring'''
if not isinstance(_a ,_a ):
raise ValueError(f"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(_a )}""" )
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
if self.does_advance(_a ):
self.current_seq.append(_a )
__SCREAMING_SNAKE_CASE = True
else:
__SCREAMING_SNAKE_CASE = True
self.reset()
__SCREAMING_SNAKE_CASE = self.trie.reached_leaf(self.current_seq )
__SCREAMING_SNAKE_CASE = completed
return stepped, completed, reset
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = []
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : Union[str, Any]=False ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = DisjunctiveConstraint(self.token_ids )
if stateful:
__SCREAMING_SNAKE_CASE = self.seqlen
__SCREAMING_SNAKE_CASE = self.current_seq
__SCREAMING_SNAKE_CASE = self.completed
return new_constraint
class __a :
def __init__( self : Optional[Any] ,lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = constraints
# max # of steps required to fulfill a given constraint
__SCREAMING_SNAKE_CASE = max([c.seqlen for c in constraints] )
__SCREAMING_SNAKE_CASE = len(_a )
__SCREAMING_SNAKE_CASE = False
self.init_state()
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = [constraint.copy(stateful=_a ) for constraint in self.constraints]
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
__SCREAMING_SNAKE_CASE = constraint.advance()
if isinstance(_a ,_a ):
token_list.append(_a )
elif isinstance(_a ,_a ):
token_list.extend(_a )
else:
__SCREAMING_SNAKE_CASE = self.inprogress_constraint.advance()
if isinstance(_a ,_a ):
token_list.append(_a )
elif isinstance(_a ,_a ):
token_list.extend(_a )
if len(_a ) == 0:
return None
else:
return token_list
def UpperCAmelCase__ ( self : int ,lowerCamelCase : Optional[Any] ):
'''simple docstring'''
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.add(_a )
# the entire list of constraints are fulfilled
if self.completed:
break
def UpperCAmelCase__ ( self : int ,lowerCamelCase : Tuple ):
'''simple docstring'''
if not isinstance(_a ,_a ):
raise ValueError(f"""`token_id` should be an `int`, but is `{token_id}`.""" )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False, False
if self.completed:
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.inprogress_constraint.update(_a )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=_a ) )
__SCREAMING_SNAKE_CASE = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
__SCREAMING_SNAKE_CASE = None
if len(self.pending_constraints ) == 0:
# we're done!
__SCREAMING_SNAKE_CASE = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(_a ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = pending_constraint.update(_a )
if not stepped:
raise Exception(
"""`constraint.update(token_id)` is not yielding incremental progress, """
"""even though `constraint.does_advance(token_id)` is true.""" )
if complete:
self.complete_constraints.append(_a )
__SCREAMING_SNAKE_CASE = None
if not complete and stepped:
__SCREAMING_SNAKE_CASE = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
__SCREAMING_SNAKE_CASE = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
__SCREAMING_SNAKE_CASE = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def UpperCAmelCase__ ( self : int ,lowerCamelCase : Optional[Any]=True ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
__SCREAMING_SNAKE_CASE = [
constraint.copy(stateful=_a ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
__SCREAMING_SNAKE_CASE = self.inprogress_constraint.copy(stateful=_a )
__SCREAMING_SNAKE_CASE = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 720
|
'''simple docstring'''
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
a = "__DUMMY_TRANSFORMERS_USER__"
a = "Dummy User"
a = "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"
a = "https://hub-ci.huggingface.co"
a = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
a = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
a = Path("~/.huggingface/hub_ci_token").expanduser()
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
monkeypatch.setattr(
"""huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE""" , __UpperCAmelCase )
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
monkeypatch.setattr("""datasets.config.HF_ENDPOINT""" , __UpperCAmelCase )
monkeypatch.setattr("""datasets.config.HUB_DATASETS_URL""" , __UpperCAmelCase )
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
monkeypatch.setattr("""huggingface_hub.hf_api.HfFolder.path_token""" , __UpperCAmelCase )
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
HfFolder.save_token(__UpperCAmelCase )
yield
HfFolder.delete_token()
@pytest.fixture(scope="""session""" )
def __magic_name__ ( ) -> Optional[Any]:
'''simple docstring'''
return HfApi(endpoint=__UpperCAmelCase )
@pytest.fixture(scope="""session""" )
def __magic_name__ ( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = HfFolder.get_token()
HfFolder.save_token(__UpperCAmelCase )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(__UpperCAmelCase )
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
def _cleanup_repo(__UpperCAmelCase ):
hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" )
return _cleanup_repo
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
@contextmanager
def _temporary_repo(__UpperCAmelCase ):
try:
yield repo_id
finally:
cleanup_repo(__UpperCAmelCase )
return _temporary_repo
@pytest.fixture(scope="""session""" )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = f"""repo_txt_data-{int(time.time() * 1_0e3 )}"""
__SCREAMING_SNAKE_CASE = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" , private=__UpperCAmelCase )
hf_api.upload_file(
token=__UpperCAmelCase , path_or_fileobj=str(__UpperCAmelCase ) , path_in_repo="""data/text_data.txt""" , repo_id=__UpperCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="""session""" )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = f"""repo_zipped_txt_data-{int(time.time() * 1_0e3 )}"""
__SCREAMING_SNAKE_CASE = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" , private=__UpperCAmelCase )
hf_api.upload_file(
token=__UpperCAmelCase , path_or_fileobj=str(__UpperCAmelCase ) , path_in_repo="""data.zip""" , repo_id=__UpperCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="""session""" )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = f"""repo_zipped_img_data-{int(time.time() * 1_0e3 )}"""
__SCREAMING_SNAKE_CASE = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" , private=__UpperCAmelCase )
hf_api.upload_file(
token=__UpperCAmelCase , path_or_fileobj=str(__UpperCAmelCase ) , path_in_repo="""data.zip""" , repo_id=__UpperCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
return hf_private_dataset_repo_zipped_img_data_
| 13
| 0
|
import os
def _UpperCAmelCase (UpperCamelCase_ : Any ):
'''simple docstring'''
_lowerCAmelCase : Any = len(grid[0] )
_lowerCAmelCase : List[Any] = len(UpperCamelCase_ )
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Optional[Any] = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(UpperCamelCase_ ):
for j in range(n_rows - 3 ):
_lowerCAmelCase : Tuple = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
_lowerCAmelCase : List[str] = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
_lowerCAmelCase : Any = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
_lowerCAmelCase : Any = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
_lowerCAmelCase : Optional[int] = max(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if max_product > largest:
_lowerCAmelCase : Optional[Any] = max_product
return largest
def _UpperCAmelCase ():
'''simple docstring'''
_lowerCAmelCase : Any = []
with open(os.path.dirname(UpperCamelCase_ ) + """/grid.txt""" ) as file:
for line in file:
grid.append(line.strip("""\n""" ).split(""" """ ) )
_lowerCAmelCase : List[str] = [[int(UpperCamelCase_ ) for i in grid[j]] for j in range(len(UpperCamelCase_ ) )]
return largest_product(UpperCamelCase_ )
if __name__ == "__main__":
print(solution())
| 429
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __snake_case (_a ):
lowerCAmelCase__ = "naver-clova-ix/donut-base-finetuned-docvqa"
lowerCAmelCase__ = (
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
lowerCAmelCase__ = "document_qa"
lowerCAmelCase__ = AutoProcessor
lowerCAmelCase__ = VisionEncoderDecoderModel
lowerCAmelCase__ = ["image", "text"]
lowerCAmelCase__ = ["text"]
def __init__( self : str , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
if not is_vision_available():
raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""" )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str , _UpperCAmelCase : "Image" , _UpperCAmelCase : str ) -> Optional[Any]:
'''simple docstring'''
_lowerCAmelCase : List[str] = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
_lowerCAmelCase : Dict = task_prompt.replace("""{user_input}""" , _UpperCAmelCase )
_lowerCAmelCase : str = self.pre_processor.tokenizer(
_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_tensors="""pt""" ).input_ids
_lowerCAmelCase : Dict = self.pre_processor(_UpperCAmelCase , return_tensors="""pt""" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def SCREAMING_SNAKE_CASE ( self : Dict , _UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return self.model.generate(
inputs["""pixel_values"""].to(self.device ) , decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=_UpperCAmelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=_UpperCAmelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=_UpperCAmelCase , ).sequences
def SCREAMING_SNAKE_CASE ( self : str , _UpperCAmelCase : Tuple ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : Dict = self.pre_processor.batch_decode(_UpperCAmelCase )[0]
_lowerCAmelCase : Optional[Any] = sequence.replace(self.pre_processor.tokenizer.eos_token , """""" )
_lowerCAmelCase : Union[str, Any] = sequence.replace(self.pre_processor.tokenizer.pad_token , """""" )
_lowerCAmelCase : List[Any] = re.sub(R"""<.*?>""" , """""" , _UpperCAmelCase , count=1 ).strip() # remove first task start token
_lowerCAmelCase : Tuple = self.pre_processor.tokenajson(_UpperCAmelCase )
return sequence["answer"]
| 429
| 1
|
"""simple docstring"""
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __A (UpperCamelCase_ , unittest.TestCase):
__lowercase: List[str] = CodeGenTokenizer
__lowercase: Union[str, Any] = CodeGenTokenizerFast
__lowercase: Tuple = True
__lowercase: Optional[int] = {"""add_prefix_space""": True}
__lowercase: int = False
def lowerCAmelCase ( self : List[Any] ) ->Dict:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case_ = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
snake_case_ = dict(zip(_a , range(len(_a ) ) ) )
snake_case_ = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
snake_case_ = {"""unk_token""": """<unk>"""}
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_a ) )
def lowerCAmelCase ( self : Optional[int] , **UpperCAmelCase_ : str ) ->Optional[int]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **_a )
def lowerCAmelCase ( self : Optional[int] , **UpperCAmelCase_ : List[str] ) ->Union[str, Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **_a )
def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : Dict ) ->List[str]:
"""simple docstring"""
snake_case_ = """lower newer"""
snake_case_ = """lower newer"""
return input_text, output_text
def lowerCAmelCase ( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
snake_case_ = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case_ = """lower newer"""
snake_case_ = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
snake_case_ = tokenizer.tokenize(_a , add_prefix_space=_a )
self.assertListEqual(_a , _a )
snake_case_ = tokens + [tokenizer.unk_token]
snake_case_ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a )
def lowerCAmelCase ( self : Optional[Any] ) ->Optional[Any]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_rust_tokenizer(add_prefix_space=_a )
snake_case_ = """lower newer"""
# Testing tokenization
snake_case_ = tokenizer.tokenize(_a , add_prefix_space=_a )
snake_case_ = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
# Testing conversion to ids without special tokens
snake_case_ = tokenizer.encode(_a , add_special_tokens=_a , add_prefix_space=_a )
snake_case_ = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
# Testing conversion to ids with special tokens
snake_case_ = self.get_rust_tokenizer(add_prefix_space=_a )
snake_case_ = tokenizer.encode(_a , add_prefix_space=_a )
snake_case_ = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
# Testing the unknown token
snake_case_ = tokens + [rust_tokenizer.unk_token]
snake_case_ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(_a ) , _a )
def lowerCAmelCase ( self : List[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : int ) ->Union[str, Any]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : Optional[Any]=15 ) ->Union[str, Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case_ = self.rust_tokenizer_class.from_pretrained(_a , **_a )
# Simple input
snake_case_ = """This is a simple input"""
snake_case_ = ["""This is a simple input 1""", """This is a simple input 2"""]
snake_case_ = ("""This is a simple input""", """This is a pair""")
snake_case_ = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(_a , tokenizer_r.encode , _a , max_length=_a , padding="""max_length""" )
# Simple input
self.assertRaises(_a , tokenizer_r.encode_plus , _a , max_length=_a , padding="""max_length""" )
# Simple input
self.assertRaises(
_a , tokenizer_r.batch_encode_plus , _a , max_length=_a , padding="""max_length""" , )
# Pair input
self.assertRaises(_a , tokenizer_r.encode , _a , max_length=_a , padding="""max_length""" )
# Pair input
self.assertRaises(_a , tokenizer_r.encode_plus , _a , max_length=_a , padding="""max_length""" )
# Pair input
self.assertRaises(
_a , tokenizer_r.batch_encode_plus , _a , max_length=_a , padding="""max_length""" , )
def lowerCAmelCase ( self : Dict ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" )
# Simple input
snake_case_ = """This is a simple input"""
snake_case_ = ["""This is a simple input looooooooong""", """This is a simple input"""]
snake_case_ = ("""This is a simple input""", """This is a pair""")
snake_case_ = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
snake_case_ = tokenizer.pad_token_id
snake_case_ = tokenizer(_a , padding="""max_length""" , max_length=30 , return_tensors="""np""" )
snake_case_ = tokenizer(_a , padding=_a , truncate=_a , return_tensors="""np""" )
snake_case_ = tokenizer(*_a , padding="""max_length""" , max_length=60 , return_tensors="""np""" )
snake_case_ = tokenizer(_a , padding=_a , truncate=_a , return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def lowerCAmelCase ( self : Union[str, Any] ) ->str:
"""simple docstring"""
snake_case_ = """$$$"""
snake_case_ = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=_a , add_bos_token=_a )
snake_case_ = """This is a simple input"""
snake_case_ = ["""This is a simple input 1""", """This is a simple input 2"""]
snake_case_ = tokenizer.bos_token_id
snake_case_ = tokenizer(_a )
snake_case_ = tokenizer(_a )
self.assertEqual(out_s.input_ids[0] , _a )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
snake_case_ = tokenizer.decode(out_s.input_ids )
snake_case_ = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , _a )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def lowerCAmelCase ( self : Tuple ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = CodeGenTokenizer.from_pretrained("""Salesforce/codegen-350M-mono""" )
snake_case_ = """\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"""
snake_case_ = """\nif len_a > len_b: result = a\nelse: result = b"""
snake_case_ = tokenizer.encode(_a )
snake_case_ = ["""^#""", re.escape("""<|endoftext|>""" ), """^'''""", """^\"\"\"""", """\n\n\n"""]
snake_case_ = tokenizer.decode(_a , truncate_before_pattern=_a )
self.assertEqual(_a , _a )
def lowerCAmelCase ( self : Union[str, Any] ) ->List[Any]:
"""simple docstring"""
pass
| 711
|
"""simple docstring"""
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def _a ( _SCREAMING_SNAKE_CASE = 8 ) -> str:
snake_case_ = ascii_letters + digits + punctuation
return "".join(secrets.choice(_SCREAMING_SNAKE_CASE ) for _ in range(_SCREAMING_SNAKE_CASE ) )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(_SCREAMING_SNAKE_CASE )
snake_case_ = i // 3
snake_case_ = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
snake_case_ = (
chars_incl
+ random(_SCREAMING_SNAKE_CASE , quotient + remainder )
+ random(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
+ random(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
)
snake_case_ = list(_SCREAMING_SNAKE_CASE )
shuffle(_SCREAMING_SNAKE_CASE )
return "".join(_SCREAMING_SNAKE_CASE )
# random is a generalised function for letters, characters and numbers
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
return "".join(secrets.choice(_SCREAMING_SNAKE_CASE ) for _ in range(_SCREAMING_SNAKE_CASE ) )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
pass # Put your code here...
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
pass # Put your code here...
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
pass # Put your code here...
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 8 ) -> bool:
if len(_SCREAMING_SNAKE_CASE ) < min_length:
# Your Password must be at least 8 characters long
return False
snake_case_ = any(char in ascii_uppercase for char in password )
snake_case_ = any(char in ascii_lowercase for char in password )
snake_case_ = any(char in digits for char in password )
snake_case_ = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def _a ( ) -> str:
snake_case_ = int(input("""Please indicate the max length of your password: """ ).strip() )
snake_case_ = input(
"""Please indicate the characters that must be in your password: """ ).strip()
print("""Password generated:""" , password_generator(_SCREAMING_SNAKE_CASE ) )
print(
"""Alternative Password generated:""" , alternative_password_generator(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , )
print("""[If you are thinking of using this passsword, You better save it.]""" )
if __name__ == "__main__":
main()
| 2
| 0
|
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class lowercase__:
"""simple docstring"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any]=1_3 , SCREAMING_SNAKE_CASE_ : int=7 , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=9_9 , SCREAMING_SNAKE_CASE_ : Optional[Any]=6_4 , SCREAMING_SNAKE_CASE_ : Optional[Any]=3_2 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=5 , SCREAMING_SNAKE_CASE_ : Any=4 , SCREAMING_SNAKE_CASE_ : Dict=3_7 , SCREAMING_SNAKE_CASE_ : Optional[Any]="gelu" , SCREAMING_SNAKE_CASE_ : Dict=0.1 , SCREAMING_SNAKE_CASE_ : Any=0.1 , SCREAMING_SNAKE_CASE_ : Tuple=5_1_2 , SCREAMING_SNAKE_CASE_ : Any=1_6 , SCREAMING_SNAKE_CASE_ : str=2 , SCREAMING_SNAKE_CASE_ : int=0.02 , SCREAMING_SNAKE_CASE_ : Dict=3 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=4 , SCREAMING_SNAKE_CASE_ : str=None , ) -> List[Any]:
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_input_mask
lowercase_ = use_token_type_ids
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = embedding_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = num_labels
lowercase_ = num_choices
lowercase_ = scope
def _lowercase ( self : Optional[int] ) -> Optional[int]:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ = None
if self.use_input_mask:
lowercase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ = None
if self.use_token_type_ids:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ = None
lowercase_ = None
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self : Tuple ) -> List[Any]:
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : str ) -> str:
lowercase_ = MegatronBertModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ )
lowercase_ = model(SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ )
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : int ) -> Tuple:
lowercase_ = MegatronBertForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[int]:
lowercase_ = MegatronBertForCausalLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> List[str]:
lowercase_ = MegatronBertForNextSentencePrediction(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> int:
lowercase_ = MegatronBertForPreTraining(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , next_sentence_label=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str ) -> Tuple:
lowercase_ = MegatronBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int ) -> List[str]:
lowercase_ = self.num_labels
lowercase_ = MegatronBertForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Optional[Any]:
lowercase_ = self.num_labels
lowercase_ = MegatronBertForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple ) -> int:
lowercase_ = self.num_choices
lowercase_ = MegatronBertForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowercase ( self : Any ) -> Dict:
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase__( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :str = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
a :Tuple = (
{
'feature-extraction': MegatronBertModel,
'fill-mask': MegatronBertForMaskedLM,
'question-answering': MegatronBertForQuestionAnswering,
'text-classification': MegatronBertForSequenceClassification,
'text-generation': MegatronBertForCausalLM,
'token-classification': MegatronBertForTokenClassification,
'zero-shot': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
a :List[str] = True
# test_resize_embeddings = False
a :str = False
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any]=False ) -> int:
lowercase_ = super()._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
if return_labels:
if model_class in get_values(SCREAMING_SNAKE_CASE_ ):
lowercase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ )
lowercase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ )
return inputs_dict
def _lowercase ( self : Tuple ) -> List[Any]:
lowercase_ = MegatronBertModelTester(self )
lowercase_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=3_7 )
def _lowercase ( self : int ) -> Optional[int]:
self.config_tester.run_common_tests()
def _lowercase ( self : Optional[Any] ) -> Optional[Any]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : int ) -> Any:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : int ) -> int:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Optional[int] ) -> str:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Dict ) -> Optional[int]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Tuple ) -> Optional[Any]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[Any] ) -> Optional[int]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Tuple ) -> List[Any]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*SCREAMING_SNAKE_CASE_ )
def a ( snake_case__: Any ):
'''simple docstring'''
return torch.tensor(
snake_case__ , dtype=torch.long , device=snake_case__ , )
__a = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase__( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip('''Model is not available.''' )
def _lowercase ( self : Union[str, Any] ) -> Dict:
lowercase_ = '''nvidia/megatron-bert-uncased-345m'''
if "MYDIR" in os.environ:
lowercase_ = os.path.join(os.environ['''MYDIR'''] , SCREAMING_SNAKE_CASE_ )
lowercase_ = MegatronBertModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.half()
lowercase_ = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] )
with torch.no_grad():
lowercase_ = model(SCREAMING_SNAKE_CASE_ )[0]
lowercase_ = torch.Size((1, 9, 1_0_2_4) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
lowercase_ = [-0.60_40, -0.25_17, -0.10_25, 0.34_20, -0.67_58, -0.00_17, -0.10_89, -0.19_90, 0.57_28]
for ii in range(3 ):
for jj in range(3 ):
lowercase_ = output[0, ii, jj]
lowercase_ = expected[3 * ii + jj]
lowercase_ = '''ii={} jj={} a={} b={}'''.format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertTrue(math.isclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , rel_tol=SCREAMING_SNAKE_CASE_ , abs_tol=SCREAMING_SNAKE_CASE_ ) , msg=SCREAMING_SNAKE_CASE_ )
| 97
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowercase (self ) -> Optional[Any]:
torch.manual_seed(0 )
_snake_case = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def lowercase (self ) -> Dict:
_snake_case = self.dummy_uncond_unet
_snake_case = PNDMScheduler()
_snake_case = PNDMPipeline(unet=UpperCAmelCase , scheduler=UpperCAmelCase )
pndm.to(UpperCAmelCase )
pndm.set_progress_bar_config(disable=UpperCAmelCase )
_snake_case = torch.manual_seed(0 )
_snake_case = pndm(generator=UpperCAmelCase , num_inference_steps=20 , output_type="""numpy""" ).images
_snake_case = torch.manual_seed(0 )
_snake_case = pndm(generator=UpperCAmelCase , num_inference_steps=20 , output_type="""numpy""" , return_dict=UpperCAmelCase )[0]
_snake_case = image[0, -3:, -3:, -1]
_snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_snake_case = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase (self ) -> Optional[Any]:
_snake_case = """google/ddpm-cifar10-32"""
_snake_case = UNetaDModel.from_pretrained(UpperCAmelCase )
_snake_case = PNDMScheduler()
_snake_case = PNDMPipeline(unet=UpperCAmelCase , scheduler=UpperCAmelCase )
pndm.to(UpperCAmelCase )
pndm.set_progress_bar_config(disable=UpperCAmelCase )
_snake_case = torch.manual_seed(0 )
_snake_case = pndm(generator=UpperCAmelCase , output_type="""numpy""" ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_snake_case = np.array([0.1564, 0.1_4645, 0.1406, 0.1_4715, 0.1_2425, 0.1_4045, 0.1_3115, 0.1_2175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 585
| 0
|
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
A_ : Tuple =random.Random()
def SCREAMING_SNAKE_CASE_ ( snake_case : Any , snake_case : Tuple=1.0 , snake_case : List[str]=None , snake_case : Optional[int]=None )-> Optional[int]:
if rng is None:
_lowerCamelCase = global_rng
_lowerCamelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class __a ( unittest.TestCase ):
def __init__( self , a__ , a__=7 , a__=4_00 , a__=20_00 , a__=1 , a__=0.0 , a__=1_60_00 , a__=True , a__=80 , a__=16 , a__=64 , a__="hann_window" , a__=80 , a__=76_00 , a__=1e-10 , a__=True , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = min_seq_length
_lowerCamelCase = max_seq_length
_lowerCamelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_lowerCamelCase = feature_size
_lowerCamelCase = padding_value
_lowerCamelCase = sampling_rate
_lowerCamelCase = do_normalize
_lowerCamelCase = num_mel_bins
_lowerCamelCase = hop_length
_lowerCamelCase = win_length
_lowerCamelCase = win_function
_lowerCamelCase = fmin
_lowerCamelCase = fmax
_lowerCamelCase = mel_floor
_lowerCamelCase = return_attention_mask
def snake_case_ ( self ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def snake_case_ ( self , a__=False , a__=False ):
def _flatten(a__ ):
return list(itertools.chain(*a__ ) )
if equal_length:
_lowerCamelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
_lowerCamelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_lowerCamelCase = [np.asarray(a__ ) for x in speech_inputs]
return speech_inputs
def snake_case_ ( self , a__=False , a__=False ):
if equal_length:
_lowerCamelCase = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_lowerCamelCase = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_lowerCamelCase = [np.asarray(a__ ) for x in speech_inputs]
return speech_inputs
@require_torch
class __a ( lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ : Dict = SpeechTaFeatureExtractor
def snake_case_ ( self ):
_lowerCamelCase = SpeechTaFeatureExtractionTester(self )
def snake_case_ ( self , a__ ):
self.assertTrue(np.all(np.mean(a__ , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(a__ , axis=0 ) - 1 ) < 1e-3 ) )
def snake_case_ ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
_lowerCamelCase = [np.asarray(a__ ) for speech_input in speech_inputs]
# Test not batched input
_lowerCamelCase = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
_lowerCamelCase = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(a__ , a__ , atol=1e-3 ) )
# Test batched
_lowerCamelCase = feat_extract(a__ , return_tensors='np' ).input_values
_lowerCamelCase = feat_extract(a__ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(a__ , a__ ):
self.assertTrue(np.allclose(a__ , a__ , atol=1e-3 ) )
def snake_case_ ( self ):
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
_lowerCamelCase = ['longest', 'max_length', 'do_not_pad']
_lowerCamelCase = [None, 16_00, None]
for max_length, padding in zip(a__ , a__ ):
_lowerCamelCase = feat_extract(a__ , padding=a__ , max_length=a__ , return_tensors='np' )
_lowerCamelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00] )
self.assertTrue(input_values[0][8_00:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:10_00] )
self.assertTrue(input_values[0][10_00:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:12_00] )
def snake_case_ ( self ):
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase = range(8_00 , 14_00 , 2_00 )
_lowerCamelCase = [floats_list((1, x) )[0] for x in lengths]
_lowerCamelCase = ['longest', 'max_length', 'do_not_pad']
_lowerCamelCase = [None, 16_00, None]
for max_length, padding in zip(a__ , a__ ):
_lowerCamelCase = feat_extract(a__ , max_length=a__ , padding=a__ )
_lowerCamelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00] )
self._check_zero_mean_unit_variance(input_values[1][:10_00] )
self._check_zero_mean_unit_variance(input_values[2][:12_00] )
def snake_case_ ( self ):
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
_lowerCamelCase = feat_extract(
a__ , truncation=a__ , max_length=10_00 , padding='max_length' , return_tensors='np' )
_lowerCamelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def snake_case_ ( self ):
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
_lowerCamelCase = feat_extract(
a__ , truncation=a__ , max_length=10_00 , padding='longest' , return_tensors='np' )
_lowerCamelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1, :10_00] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 10_00) )
_lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
_lowerCamelCase = feat_extract(
a__ , truncation=a__ , max_length=20_00 , padding='longest' , return_tensors='np' )
_lowerCamelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1, :10_00] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 12_00) )
def snake_case_ ( self ):
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase = np.random.rand(1_00 ).astype(np.floataa )
_lowerCamelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_lowerCamelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
_lowerCamelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def snake_case_ ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
_lowerCamelCase = [np.asarray(a__ ) for speech_input in speech_inputs]
# Test feature size
_lowerCamelCase = feature_extractor(audio_target=a__ , padding=a__ , return_tensors='np' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
_lowerCamelCase = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_values
_lowerCamelCase = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(a__ , a__ , atol=1e-3 ) )
# Test batched
_lowerCamelCase = feature_extractor(a__ , return_tensors='np' ).input_values
_lowerCamelCase = feature_extractor(a__ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(a__ , a__ ):
self.assertTrue(np.allclose(a__ , a__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_lowerCamelCase = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
_lowerCamelCase = np.asarray(a__ )
_lowerCamelCase = feature_extractor(a__ , return_tensors='np' ).input_values
_lowerCamelCase = feature_extractor(a__ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(a__ , a__ ):
self.assertTrue(np.allclose(a__ , a__ , atol=1e-3 ) )
def snake_case_ ( self ):
_lowerCamelCase = self.feat_extract_tester.prepare_inputs_for_target()
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCamelCase = feat_extract.model_input_names[0]
_lowerCamelCase = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(a__ ) == len(a__ ) for x, y in zip(a__ , processed_features[input_name] ) ) )
_lowerCamelCase = self.feat_extract_tester.prepare_inputs_for_target(equal_length=a__ )
_lowerCamelCase = BatchFeature({input_name: speech_inputs} , tensor_type='np' )
_lowerCamelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_lowerCamelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def snake_case_ ( self ):
_lowerCamelCase = self.feat_extract_tester.prepare_inputs_for_target(equal_length=a__ )
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCamelCase = feat_extract.model_input_names[0]
_lowerCamelCase = BatchFeature({input_name: speech_inputs} , tensor_type='pt' )
_lowerCamelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_lowerCamelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def snake_case_ ( self ):
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCamelCase = self.feat_extract_tester.prepare_inputs_for_target()
_lowerCamelCase = feat_extract.model_input_names[0]
_lowerCamelCase = BatchFeature({input_name: speech_inputs} )
_lowerCamelCase = feat_extract.num_mel_bins # hack!
_lowerCamelCase = feat_extract.pad(a__ , padding='longest' , return_tensors='np' )[input_name]
_lowerCamelCase = feat_extract.pad(a__ , padding='longest' , return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def snake_case_ ( self ):
_lowerCamelCase = self.feat_extract_dict
_lowerCamelCase = True
_lowerCamelCase = self.feature_extraction_class(**a__ )
_lowerCamelCase = self.feat_extract_tester.prepare_inputs_for_target()
_lowerCamelCase = [len(a__ ) for x in speech_inputs]
_lowerCamelCase = feat_extract.model_input_names[0]
_lowerCamelCase = BatchFeature({input_name: speech_inputs} )
_lowerCamelCase = feat_extract.num_mel_bins # hack!
_lowerCamelCase = feat_extract.pad(a__ , padding='longest' , return_tensors='np' )
self.assertIn('attention_mask' , a__ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , a__ )
def snake_case_ ( self ):
_lowerCamelCase = self.feat_extract_dict
_lowerCamelCase = True
_lowerCamelCase = self.feature_extraction_class(**a__ )
_lowerCamelCase = self.feat_extract_tester.prepare_inputs_for_target()
_lowerCamelCase = [len(a__ ) for x in speech_inputs]
_lowerCamelCase = feat_extract.model_input_names[0]
_lowerCamelCase = BatchFeature({input_name: speech_inputs} )
_lowerCamelCase = min(a__ )
_lowerCamelCase = feat_extract.num_mel_bins # hack!
_lowerCamelCase = feat_extract.pad(
a__ , padding='max_length' , max_length=a__ , truncation=a__ , return_tensors='np' )
self.assertIn('attention_mask' , a__ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def snake_case_ ( self , a__ ):
from datasets import load_dataset
_lowerCamelCase = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
_lowerCamelCase = ds.sort('id' ).select(range(a__ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def snake_case_ ( self ):
# fmt: off
_lowerCamelCase = torch.tensor(
[2.3_804e-03, 2.0_752e-03, 1.9_836e-03, 2.1_057e-03, 1.6_174e-03,
3.0_518e-04, 9.1_553e-05, 3.3_569e-04, 9.7_656e-04, 1.8_311e-03,
2.0_142e-03, 2.1_057e-03, 1.7_395e-03, 4.5_776e-04, -3.9_673e-04,
4.5_776e-04, 1.0_071e-03, 9.1_553e-05, 4.8_828e-04, 1.1_597e-03,
7.3_242e-04, 9.4_604e-04, 1.8_005e-03, 1.8_311e-03, 8.8_501e-04,
4.2_725e-04, 4.8_828e-04, 7.3_242e-04, 1.0_986e-03, 2.1_057e-03] )
# fmt: on
_lowerCamelCase = self._load_datasamples(1 )
_lowerCamelCase = SpeechTaFeatureExtractor()
_lowerCamelCase = feature_extractor(a__ , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 9_36_80) )
self.assertTrue(torch.allclose(input_values[0, :30] , a__ , atol=1e-6 ) )
def snake_case_ ( self ):
# fmt: off
_lowerCamelCase = torch.tensor(
[-2.6870, -3.0104, -3.1356, -3.5352, -3.0044, -3.0353, -3.4719, -3.6777,
-3.1520, -2.9435, -2.6553, -2.8795, -2.9944, -2.5921, -3.0279, -3.0386,
-3.0864, -3.1291, -3.2353, -2.7444, -2.6831, -2.7287, -3.1761, -3.1571,
-3.2726, -3.0582, -3.1007, -3.4533, -3.4695, -3.0998] )
# fmt: on
_lowerCamelCase = self._load_datasamples(1 )
_lowerCamelCase = SpeechTaFeatureExtractor()
_lowerCamelCase = feature_extractor(audio_target=a__ , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 3_66, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , a__ , atol=1e-4 ) )
| 222
|
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __a ( lowerCAmelCase__ ):
def __init__( self , a__ , a__=13 , a__=7 , a__=True , a__=True , a__=True , a__=True , a__=99 , a__=32 , a__=5 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=5_12 , a__=16 , a__=2 , a__=0.02 , a__=False , a__=True , a__="None" , a__=3 , a__=4 , a__=None , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = seq_length
_lowerCamelCase = is_training
_lowerCamelCase = use_input_mask
_lowerCamelCase = use_token_type_ids
_lowerCamelCase = use_labels
_lowerCamelCase = vocab_size
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = type_vocab_size
_lowerCamelCase = type_sequence_label_size
_lowerCamelCase = initializer_range
_lowerCamelCase = num_labels
_lowerCamelCase = num_choices
_lowerCamelCase = relative_attention
_lowerCamelCase = position_biased_input
_lowerCamelCase = pos_att_type
_lowerCamelCase = scope
def snake_case_ ( self ):
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase = None
if self.use_input_mask:
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_lowerCamelCase = None
if self.use_token_type_ids:
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCamelCase = None
_lowerCamelCase = None
_lowerCamelCase = None
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ ( self ):
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def snake_case_ ( self , a__ ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCamelCase = DebertaVaModel(config=a__ )
model.to(a__ )
model.eval()
_lowerCamelCase = model(a__ , attention_mask=a__ , token_type_ids=a__ )[0]
_lowerCamelCase = model(a__ , token_type_ids=a__ )[0]
_lowerCamelCase = model(a__ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCamelCase = DebertaVaForMaskedLM(config=a__ )
model.to(a__ )
model.eval()
_lowerCamelCase = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCamelCase = self.num_labels
_lowerCamelCase = DebertaVaForSequenceClassification(a__ )
model.to(a__ )
model.eval()
_lowerCamelCase = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(a__ )
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCamelCase = self.num_labels
_lowerCamelCase = DebertaVaForTokenClassification(config=a__ )
model.to(a__ )
model.eval()
_lowerCamelCase = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCamelCase = DebertaVaForQuestionAnswering(config=a__ )
model.to(a__ )
model.eval()
_lowerCamelCase = model(
a__ , attention_mask=a__ , token_type_ids=a__ , start_positions=a__ , end_positions=a__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCamelCase = DebertaVaForMultipleChoice(config=a__ )
model.to(a__ )
model.eval()
_lowerCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase = model(
a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case_ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) = config_and_inputs
_lowerCamelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __a ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ : int = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : Tuple = (
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Optional[int] = True
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
SCREAMING_SNAKE_CASE__ : str = False
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
def snake_case_ ( self ):
_lowerCamelCase = DebertaVaModelTester(self )
_lowerCamelCase = ConfigTester(self , config_class=a__ , hidden_size=37 )
def snake_case_ ( self ):
self.config_tester.run_common_tests()
def snake_case_ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*a__ )
def snake_case_ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*a__ )
def snake_case_ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*a__ )
def snake_case_ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*a__ )
def snake_case_ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*a__ )
def snake_case_ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*a__ )
@slow
def snake_case_ ( self ):
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase = DebertaVaModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __a ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet' )
def snake_case_ ( self ):
pass
@slow
def snake_case_ ( self ):
_lowerCamelCase = DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge' )
_lowerCamelCase = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
_lowerCamelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_lowerCamelCase = model(a__ , attention_mask=a__ )[0]
# compare the actual values for a slice.
_lowerCamelCase = torch.tensor(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a__ , atol=1e-4 ) , F'{output[:, 1:4, 1:4]}' )
| 222
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[List[PIL.Image.Image], np.ndarray]
SCREAMING_SNAKE_CASE_: Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.26.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version('''>=''', '''0.0.12''')
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: np.ndarray
SCREAMING_SNAKE_CASE_: List[bool]
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 580
|
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def __snake_case ( SCREAMING_SNAKE_CASE: int , SCREAMING_SNAKE_CASE: int , SCREAMING_SNAKE_CASE: int , SCREAMING_SNAKE_CASE: int , SCREAMING_SNAKE_CASE: int , SCREAMING_SNAKE_CASE: int ):
"""simple docstring"""
if (ksize % 2) == 0:
_lowerCAmelCase = ksize + 1
_lowerCAmelCase = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(SCREAMING_SNAKE_CASE ):
for x in range(SCREAMING_SNAKE_CASE ):
# distance from center
_lowerCAmelCase = x - ksize // 2
_lowerCAmelCase = y - ksize // 2
# degree to radiant
_lowerCAmelCase = theta / 180 * np.pi
_lowerCAmelCase = np.cos(_theta )
_lowerCAmelCase = np.sin(_theta )
# get kernel x
_lowerCAmelCase = cos_theta * px + sin_theta * py
# get kernel y
_lowerCAmelCase = -sin_theta * px + cos_theta * py
# fill kernel
_lowerCAmelCase = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_snake_case = imread('''../image_data/lena.jpg''')
# turn image in gray scale value
_snake_case = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_snake_case = np.zeros(gray.shape[:2])
for theta in [0, 3_0, 6_0, 9_0, 1_2_0, 1_5_0]:
_snake_case = gabor_filter_kernel(1_0, 8, theta, 1_0, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_snake_case = out / out.max() * 2_5_5
_snake_case = out.astype(np.uinta)
imshow('''Original''', gray)
imshow('''Gabor filter with 20x20 mask and 6 directions''', out)
waitKey(0)
| 580
| 1
|
'''simple docstring'''
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"The `inpainting.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionInpaintPipeline` instead."
)
| 704
|
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
a = logging.get_logger(__name__)
class __a ( _snake_case ):
__UpperCamelCase : int = 'linear'
__UpperCamelCase : Tuple = 'cosine'
__UpperCamelCase : Tuple = 'cosine_with_restarts'
__UpperCamelCase : List[Any] = 'polynomial'
__UpperCamelCase : Optional[Any] = 'constant'
__UpperCamelCase : Optional[int] = 'constant_with_warmup'
__UpperCamelCase : List[Any] = 'piecewise_constant'
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase = -1 ) -> int:
'''simple docstring'''
return LambdaLR(__UpperCAmelCase , lambda __UpperCAmelCase : 1 , last_epoch=__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = -1 ) -> List[Any]:
'''simple docstring'''
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1.0 , __UpperCAmelCase ) )
return 1.0
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , last_epoch=__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = -1 ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = rule_str.split(""":""" )
__SCREAMING_SNAKE_CASE = int(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = float(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = value
__SCREAMING_SNAKE_CASE = float(rule_list[-1] )
def create_rules_function(__UpperCAmelCase , __UpperCAmelCase ):
def rule_func(__UpperCAmelCase ) -> float:
__SCREAMING_SNAKE_CASE = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__UpperCAmelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__SCREAMING_SNAKE_CASE = create_rules_function(__UpperCAmelCase , __UpperCAmelCase )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , last_epoch=__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=-1 ) -> int:
'''simple docstring'''
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0.5 , __UpperCAmelCase = -1 ) -> Dict:
'''simple docstring'''
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
__SCREAMING_SNAKE_CASE = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__UpperCAmelCase ) * 2.0 * progress )) )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 1 , __UpperCAmelCase = -1 ) -> Tuple:
'''simple docstring'''
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
__SCREAMING_SNAKE_CASE = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__UpperCAmelCase ) * progress) % 1.0) )) )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1e-7 , __UpperCAmelCase=1.0 , __UpperCAmelCase=-1 ) -> Tuple:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__SCREAMING_SNAKE_CASE = lr_init - lr_end
__SCREAMING_SNAKE_CASE = num_training_steps - num_warmup_steps
__SCREAMING_SNAKE_CASE = 1 - (current_step - num_warmup_steps) / decay_steps
__SCREAMING_SNAKE_CASE = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
a = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 1 , __UpperCAmelCase = 1.0 , __UpperCAmelCase = -1 , ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = SchedulerType(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__UpperCAmelCase , last_epoch=__UpperCAmelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__UpperCAmelCase , step_rules=__UpperCAmelCase , last_epoch=__UpperCAmelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , last_epoch=__UpperCAmelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , num_cycles=__UpperCAmelCase , last_epoch=__UpperCAmelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , power=__UpperCAmelCase , last_epoch=__UpperCAmelCase , )
return schedule_func(
__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , last_epoch=__UpperCAmelCase )
| 13
| 0
|
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE_ = {
'vocab_file': {
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json',
},
'merges_file': {
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt',
},
'tokenizer_file': {
'Salesforce/codegen-350M-mono': (
'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json'
),
},
}
SCREAMING_SNAKE_CASE_ = {
'Salesforce/codegen-350M-mono': 2048,
}
class a ( _SCREAMING_SNAKE_CASE ):
_lowercase = VOCAB_FILES_NAMES
_lowercase = PRETRAINED_VOCAB_FILES_MAP
_lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase = ["input_ids", "attention_mask"]
_lowercase = CodeGenTokenizer
def __init__( self , A_=None , A_=None , A_=None , A_="<|endoftext|>" , A_="<|endoftext|>" , A_="<|endoftext|>" , A_=False , **A_ , ):
'''simple docstring'''
super().__init__(
_snake_case , _snake_case , tokenizer_file=_snake_case , unk_token=_snake_case , bos_token=_snake_case , eos_token=_snake_case , add_prefix_space=_snake_case , **_snake_case , )
if kwargs.pop("add_bos_token" , _snake_case ):
_UpperCAmelCase : Dict = kwargs.pop("name_or_path" , "" )
raise ValueError(
"Currenty GPT2's fast tokenizer does NOT support adding a BOS token."
"Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"
f'`CodeGenTokenizer.from_pretrained(\'{model_id}\')`\nor\n'
f'`AutoTokenizer.from_pretrained(\'{model_id}\', use_fast=False)`\n'
"This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."
" so that the fast tokenizer works correctly." )
_UpperCAmelCase : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , _snake_case ) != add_prefix_space:
_UpperCAmelCase : Tuple = getattr(_snake_case , pre_tok_state.pop("type" ) )
_UpperCAmelCase : Optional[Any] = add_prefix_space
_UpperCAmelCase : int = pre_tok_class(**_snake_case )
_UpperCAmelCase : Optional[Any] = add_prefix_space
def _UpperCAmelCase ( self , *A_ , **A_ ):
'''simple docstring'''
_UpperCAmelCase : int = kwargs.get("is_split_into_words" , _snake_case )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_snake_case , **_snake_case )
def _UpperCAmelCase ( self , *A_ , **A_ ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = kwargs.get("is_split_into_words" , _snake_case )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_snake_case , **_snake_case )
def _UpperCAmelCase ( self , A_ , A_ = None ):
'''simple docstring'''
_UpperCAmelCase : Tuple = self._tokenizer.model.save(_snake_case , name=_snake_case )
return tuple(_snake_case )
def _UpperCAmelCase ( self , A_ , A_ = False , A_ = None , A_ = None , **A_ , ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = super().decode(
token_ids=_snake_case , skip_special_tokens=_snake_case , clean_up_tokenization_spaces=_snake_case , **_snake_case , )
if truncate_before_pattern is not None and len(_snake_case ) > 0:
_UpperCAmelCase : Tuple = self.truncate(_snake_case , _snake_case )
return decoded_text
def _UpperCAmelCase ( self , A_ , A_ ):
'''simple docstring'''
def find_re(A_ , A_ , A_ ):
_UpperCAmelCase : str = pattern.search(_snake_case , _snake_case )
return m.start() if m else -1
_UpperCAmelCase : List[str] = [re.compile(_snake_case , re.MULTILINE ) for pattern in truncate_before_pattern]
_UpperCAmelCase : Any = list(re.finditer("^print" , _snake_case , re.MULTILINE ) )
if len(_snake_case ) > 1:
_UpperCAmelCase : Union[str, Any] = completion[: prints[1].start()]
_UpperCAmelCase : List[Any] = list(re.finditer("^def" , _snake_case , re.MULTILINE ) )
if len(_snake_case ) > 1:
_UpperCAmelCase : Tuple = completion[: defs[1].start()]
_UpperCAmelCase : Union[str, Any] = 0
_UpperCAmelCase : Tuple = [
pos for pos in [find_re(_snake_case , _snake_case , _snake_case ) for terminal in terminals] if pos != -1
]
if len(_snake_case ) > 0:
return completion[: min(_snake_case )]
else:
return completion
| 300
|
"""simple docstring"""
class lowerCamelCase :
'''simple docstring'''
def __init__( self : str , _snake_case : list[int] ) -> None:
SCREAMING_SNAKE_CASE__ = len(_snake_case )
SCREAMING_SNAKE_CASE__ = [0] * len_array
if len_array > 0:
SCREAMING_SNAKE_CASE__ = array[0]
for i in range(1 , _snake_case ):
SCREAMING_SNAKE_CASE__ = self.prefix_sum[i - 1] + array[i]
def lowerCAmelCase_ ( self : Union[str, Any] , _snake_case : int , _snake_case : int ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def lowerCAmelCase_ ( self : Union[str, Any] , _snake_case : int ) -> bool:
SCREAMING_SNAKE_CASE__ = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(_snake_case )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 159
| 0
|
"""simple docstring"""
import random
def SCREAMING_SNAKE_CASE ( snake_case, snake_case, snake_case):
__snake_case = a[left_index]
__snake_case = left_index + 1
for j in range(left_index + 1, snake_case):
if a[j] < pivot:
__snake_case , __snake_case = a[i], a[j]
i += 1
__snake_case , __snake_case = a[i - 1], a[left_index]
return i - 1
def SCREAMING_SNAKE_CASE ( snake_case, snake_case, snake_case):
if left < right:
__snake_case = random.randint(snake_case, right - 1)
__snake_case , __snake_case = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
__snake_case = partition(snake_case, snake_case, snake_case)
quick_sort_random(
snake_case, snake_case, snake_case) # recursive quicksort to the left of the pivot point
quick_sort_random(
snake_case, pivot_index + 1, snake_case) # recursive quicksort to the right of the pivot point
def SCREAMING_SNAKE_CASE ( ):
__snake_case = input('''Enter numbers separated by a comma:\n''').strip()
__snake_case = [int(snake_case) for item in user_input.split(''',''')]
quick_sort_random(snake_case, 0, len(snake_case))
print(snake_case)
if __name__ == "__main__":
main()
| 93
|
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class _A ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : str ) -> str:
__snake_case = tempfile.mkdtemp()
__snake_case = BlipImageProcessor()
__snake_case = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
__snake_case = BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
__snake_case = InstructBlipProcessor(A_ , A_ , A_ )
processor.save_pretrained(self.tmpdirname )
def lowercase ( self : Tuple , **A_ : str ) -> List[Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **A_ ).tokenizer
def lowercase ( self : Union[str, Any] , **A_ : Tuple ) -> Union[str, Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **A_ ).image_processor
def lowercase ( self : Union[str, Any] , **A_ : Tuple ) -> Dict:
return AutoProcessor.from_pretrained(self.tmpdirname , **A_ ).qformer_tokenizer
def lowercase ( self : Optional[int] ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def lowercase ( self : Optional[int] ) -> Tuple:
__snake_case = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case = [Image.fromarray(np.moveaxis(A_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase ( self : int ) -> Dict:
__snake_case = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
__snake_case = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__snake_case = self.get_image_processor(do_normalize=A_ , padding_value=1.0 )
__snake_case = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=A_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A_ )
self.assertIsInstance(processor.qformer_tokenizer , A_ )
def lowercase ( self : int ) -> str:
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_qformer_tokenizer()
__snake_case = InstructBlipProcessor(
tokenizer=A_ , image_processor=A_ , qformer_tokenizer=A_ )
__snake_case = self.prepare_image_inputs()
__snake_case = image_processor(A_ , return_tensors='''np''' )
__snake_case = processor(images=A_ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowercase ( self : List[str] ) -> Optional[int]:
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_qformer_tokenizer()
__snake_case = InstructBlipProcessor(
tokenizer=A_ , image_processor=A_ , qformer_tokenizer=A_ )
__snake_case = '''lower newer'''
__snake_case = processor(text=A_ )
__snake_case = tokenizer(A_ , return_token_type_ids=A_ )
__snake_case = qformer_tokenizer(A_ , return_token_type_ids=A_ )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['''qformer_''' + key] )
def lowercase ( self : List[str] ) -> int:
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_qformer_tokenizer()
__snake_case = InstructBlipProcessor(
tokenizer=A_ , image_processor=A_ , qformer_tokenizer=A_ )
__snake_case = '''lower newer'''
__snake_case = self.prepare_image_inputs()
__snake_case = processor(text=A_ , images=A_ )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def lowercase ( self : str ) -> Union[str, Any]:
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_qformer_tokenizer()
__snake_case = InstructBlipProcessor(
tokenizer=A_ , image_processor=A_ , qformer_tokenizer=A_ )
__snake_case = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__snake_case = processor.batch_decode(A_ )
__snake_case = tokenizer.batch_decode(A_ )
self.assertListEqual(A_ , A_ )
def lowercase ( self : int ) -> List[str]:
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_qformer_tokenizer()
__snake_case = InstructBlipProcessor(
tokenizer=A_ , image_processor=A_ , qformer_tokenizer=A_ )
__snake_case = '''lower newer'''
__snake_case = self.prepare_image_inputs()
__snake_case = processor(text=A_ , images=A_ )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
| 93
| 1
|
'''simple docstring'''
import numpy as np
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : Any ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = (0, 0)
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
def __eq__( self : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] ) -> str:
"""simple docstring"""
return self.position == cell.position
def UpperCAmelCase__ ( self : str ) -> str:
"""simple docstring"""
print(self.position )
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : str , __SCREAMING_SNAKE_CASE : Tuple=(5, 5) ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = np.zeros(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = world_size[0]
__SCREAMING_SNAKE_CASE = world_size[1]
def UpperCAmelCase__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
print(self.w )
def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
__SCREAMING_SNAKE_CASE = cell.position[0]
__SCREAMING_SNAKE_CASE = cell.position[1]
__SCREAMING_SNAKE_CASE = []
for n in neughbour_cord:
__SCREAMING_SNAKE_CASE = current_x + n[0]
__SCREAMING_SNAKE_CASE = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
__SCREAMING_SNAKE_CASE = Cell()
__SCREAMING_SNAKE_CASE = (x, y)
__SCREAMING_SNAKE_CASE = cell
neighbours.append(__SCREAMING_SNAKE_CASE )
return neighbours
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
_open.append(a__ )
while _open:
__SCREAMING_SNAKE_CASE = np.argmin([n.f for n in _open] )
__SCREAMING_SNAKE_CASE = _open[min_f]
_closed.append(_open.pop(a__ ) )
if current == goal:
break
for n in world.get_neigbours(a__ ):
for c in _closed:
if c == n:
continue
__SCREAMING_SNAKE_CASE = current.g + 1
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = n.position
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = goal.position
__SCREAMING_SNAKE_CASE = (ya - ya) ** 2 + (xa - xa) ** 2
__SCREAMING_SNAKE_CASE = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(a__ )
__SCREAMING_SNAKE_CASE = []
while current.parent is not None:
path.append(current.position )
__SCREAMING_SNAKE_CASE = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
UpperCAmelCase : Optional[int] = Gridworld()
# Start position and goal
UpperCAmelCase : Tuple = Cell()
UpperCAmelCase : Union[str, Any] = (0, 0)
UpperCAmelCase : List[Any] = Cell()
UpperCAmelCase : Tuple = (4, 4)
print(f"""path from {start.position} to {goal.position}""")
UpperCAmelCase : Tuple = astar(world, start, goal)
# Just for visual reasons.
for i in s:
UpperCAmelCase : str = 1
print(world.w)
| 627
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@property
def UpperCAmelCase__ ( self : int ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def UpperCAmelCase__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.dummy_uncond_unet
__SCREAMING_SNAKE_CASE = KarrasVeScheduler()
__SCREAMING_SNAKE_CASE = KarrasVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = pipe(num_inference_steps=2 , generator=__SCREAMING_SNAKE_CASE , output_type="""numpy""" ).images
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = pipe(num_inference_steps=2 , generator=__SCREAMING_SNAKE_CASE , output_type="""numpy""" , return_dict=__SCREAMING_SNAKE_CASE )[0]
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """google/ncsnpp-celebahq-256"""
__SCREAMING_SNAKE_CASE = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = KarrasVeScheduler()
__SCREAMING_SNAKE_CASE = KarrasVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = pipe(num_inference_steps=20 , generator=__SCREAMING_SNAKE_CASE , output_type="""numpy""" ).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
__SCREAMING_SNAKE_CASE = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 627
| 1
|
def A_ ( A__ = 1000 ) -> int:
a__ : List[Any] = -1
a__ : Union[str, Any] = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
a__ : Any = (n * n - 2 * a * n) // (2 * n - 2 * a)
a__ : Tuple = n - a - b
if c * c == (a * a + b * b):
a__ : Any = a * b * c
if candidate >= product:
a__ : Union[str, Any] = candidate
return product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 392
|
def A_ ( A__ ) -> list[int]:
if num <= 0:
raise ValueError('Input must be a positive integer' )
a__ : Any = [True] * (num + 1)
a__ : Dict = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , A__ ):
a__ : Tuple = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase : Union[str, Any] = int(input("""Enter a positive integer: """).strip())
print(prime_sieve_eratosthenes(user_num))
| 392
| 1
|
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
_snake_case = logging.get_logger(__name__)
@add_end_docstrings(lowercase_ )
class lowerCAmelCase ( lowercase_ ):
def __init__( self :Any , *_lowercase :List[str] , **_lowercase :Dict ):
'''simple docstring'''
super().__init__(*_lowercase , **_lowercase )
requires_backends(self , "decord" )
self.check_model_type(_lowercase )
def UpperCAmelCase ( self :str , _lowercase :Any=None , _lowercase :List[str]=None , _lowercase :List[Any]=None ):
'''simple docstring'''
lowercase__ = {}
if frame_sampling_rate is not None:
lowercase__ = frame_sampling_rate
if num_frames is not None:
lowercase__ = num_frames
lowercase__ = {}
if top_k is not None:
lowercase__ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self :Union[str, Any] , _lowercase :Union[str, List[str]] , **_lowercase :int ):
'''simple docstring'''
return super().__call__(_lowercase , **_lowercase )
def UpperCAmelCase ( self :List[str] , _lowercase :List[Any] , _lowercase :List[str]=None , _lowercase :List[Any]=1 ):
'''simple docstring'''
if num_frames is None:
lowercase__ = self.model.config.num_frames
if video.startswith("http://" ) or video.startswith("https://" ):
lowercase__ = BytesIO(requests.get(_lowercase ).content )
lowercase__ = VideoReader(_lowercase )
videoreader.seek(0 )
lowercase__ = 0
lowercase__ = num_frames * frame_sampling_rate - 1
lowercase__ = np.linspace(_lowercase , _lowercase , num=_lowercase , dtype=np.intaa )
lowercase__ = videoreader.get_batch(_lowercase ).asnumpy()
lowercase__ = list(_lowercase )
lowercase__ = self.image_processor(_lowercase , return_tensors=self.framework )
return model_inputs
def UpperCAmelCase ( self :List[Any] , _lowercase :str ):
'''simple docstring'''
lowercase__ = self.model(**_lowercase )
return model_outputs
def UpperCAmelCase ( self :Tuple , _lowercase :Union[str, Any] , _lowercase :str=5 ):
'''simple docstring'''
if top_k > self.model.config.num_labels:
lowercase__ = self.model.config.num_labels
if self.framework == "pt":
lowercase__ = model_outputs.logits.softmax(-1 )[0]
lowercase__ , lowercase__ = probs.topk(_lowercase )
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
lowercase__ = scores.tolist()
lowercase__ = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_lowercase , _lowercase )]
| 655
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
lowercase__ = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
lowercase__ = tf.convert_to_tensor(
[[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
lowercase__ = model(_lowercase )["last_hidden_state"]
lowercase__ = tf.TensorShape((1, 10, 7_68) )
self.assertEqual(output.shape , _lowercase )
# compare the actual values for a slice.
lowercase__ = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 655
| 1
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCamelCase_ ( lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = ShapEPipeline
_lowerCAmelCase : Tuple = ["prompt"]
_lowerCAmelCase : List[Any] = ["prompt"]
_lowerCAmelCase : List[Any] = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
_lowerCAmelCase : List[str] = False
@property
def lowerCAmelCase__ ( self ):
return 32
@property
def lowerCAmelCase__ ( self ):
return 32
@property
def lowerCAmelCase__ ( self ):
return self.time_input_dim * 4
@property
def lowerCAmelCase__ ( self ):
return 8
@property
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def lowerCAmelCase__ ( self ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(UpperCAmelCase__ )
@property
def lowerCAmelCase__ ( self ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = {
"num_attention_heads": 2,
"attention_head_dim": 16,
"embedding_dim": self.time_input_dim,
"num_embeddings": 32,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
SCREAMING_SNAKE_CASE__ = PriorTransformer(**UpperCAmelCase__ )
return model
@property
def lowerCAmelCase__ ( self ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = {
"param_shapes": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 12,
"background": (
0.1,
0.1,
0.1,
),
}
SCREAMING_SNAKE_CASE__ = ShapERenderer(**UpperCAmelCase__ )
return model
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ = self.dummy_prior
SCREAMING_SNAKE_CASE__ = self.dummy_text_encoder
SCREAMING_SNAKE_CASE__ = self.dummy_tokenizer
SCREAMING_SNAKE_CASE__ = self.dummy_renderer
SCREAMING_SNAKE_CASE__ = HeunDiscreteScheduler(
beta_schedule="exp" , num_train_timesteps=1024 , prediction_type="sample" , use_karras_sigmas=UpperCAmelCase__ , clip_sample=UpperCAmelCase__ , clip_sample_range=1.0 , )
SCREAMING_SNAKE_CASE__ = {
"prior": prior,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def lowerCAmelCase__ ( self , UpperCAmelCase__ , UpperCAmelCase__=0 ):
if str(UpperCAmelCase__ ).startswith("mps" ):
SCREAMING_SNAKE_CASE__ = torch.manual_seed(UpperCAmelCase__ )
else:
SCREAMING_SNAKE_CASE__ = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = {
"prompt": "horse",
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "np",
}
return inputs
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ = "cpu"
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = self.pipeline_class(**UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = pipe(**self.get_dummy_inputs(UpperCAmelCase__ ) )
SCREAMING_SNAKE_CASE__ = output.images[0]
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
SCREAMING_SNAKE_CASE__ = np.array(
[
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase__ ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ = torch_device == "cpu"
SCREAMING_SNAKE_CASE__ = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=UpperCAmelCase__ , relax_max_difference=UpperCAmelCase__ , )
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = self.pipeline_class(**UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(UpperCAmelCase__ )
for key in inputs.keys():
if key in self.batch_params:
SCREAMING_SNAKE_CASE__ = batch_size * [inputs[key]]
SCREAMING_SNAKE_CASE__ = pipe(**UpperCAmelCase__ , num_images_per_prompt=UpperCAmelCase__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_np_out.npy" )
SCREAMING_SNAKE_CASE__ = ShapEPipeline.from_pretrained("openai/shap-e" )
SCREAMING_SNAKE_CASE__ = pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = torch.Generator(device=UpperCAmelCase__ ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ = pipe(
"a shark" , generator=UpperCAmelCase__ , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type="np" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(UpperCAmelCase__ , UpperCAmelCase__ )
| 112
|
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class lowerCamelCase_ :
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=3 , UpperCAmelCase__=7 , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=False , UpperCAmelCase__=True , UpperCAmelCase__=99 , UpperCAmelCase__=32 , UpperCAmelCase__=5 , UpperCAmelCase__=4 , UpperCAmelCase__=37 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=512 , UpperCAmelCase__=16 , UpperCAmelCase__=2 , UpperCAmelCase__=0.02 , UpperCAmelCase__=3 , UpperCAmelCase__=4 , UpperCAmelCase__=None , ):
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_input_mask
SCREAMING_SNAKE_CASE__ = use_token_type_ids
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = num_choices
SCREAMING_SNAKE_CASE__ = scope
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self ):
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=UpperCAmelCase__ , )
def lowerCAmelCase__ ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE__ = FalconModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ):
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = FalconModel(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , )
SCREAMING_SNAKE_CASE__ = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , )
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ):
SCREAMING_SNAKE_CASE__ = FalconForCausalLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ):
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = FalconForCausalLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE__ = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , use_cache=UpperCAmelCase__ , )
SCREAMING_SNAKE_CASE__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE__ = torch.cat([input_mask, next_mask] , dim=-1 )
SCREAMING_SNAKE_CASE__ = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )["hidden_states"][0]
SCREAMING_SNAKE_CASE__ = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )["hidden_states"][0]
# select random slice
SCREAMING_SNAKE_CASE__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE__ = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3 ) )
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( lowercase , lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
_lowerCAmelCase : str = (FalconForCausalLM,) if is_torch_available() else ()
_lowerCAmelCase : Optional[int] = (
{
"feature-extraction": FalconModel,
"text-classification": FalconForSequenceClassification,
"text-generation": FalconForCausalLM,
"question-answering": FalconForQuestionAnswering,
"token-classification": FalconForTokenClassification,
"zero-shot": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCAmelCase : str = False
_lowerCAmelCase : Dict = False
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ = FalconModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37 )
def lowerCAmelCase__ ( self ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
SCREAMING_SNAKE_CASE__ = alibi
self.model_tester.create_and_check_model(UpperCAmelCase__ , *UpperCAmelCase__ )
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = input_dict["input_ids"]
SCREAMING_SNAKE_CASE__ = input_ids.ne(1 ).to(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = FalconForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = "single_label_classification"
SCREAMING_SNAKE_CASE__ = input_dict["input_ids"]
SCREAMING_SNAKE_CASE__ = input_ids.ne(1 ).to(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = FalconForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = input_dict["input_ids"]
SCREAMING_SNAKE_CASE__ = FalconForCausalLM(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase__ , use_cache=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = input_ids.shape[0]
SCREAMING_SNAKE_CASE__ = model._convert_to_rw_cache(result.past_key_values )
SCREAMING_SNAKE_CASE__ = model._convert_cache_to_standard_format(UpperCAmelCase__ , UpperCAmelCase__ )
for layer in range(len(UpperCAmelCase__ ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = "multi_label_classification"
SCREAMING_SNAKE_CASE__ = input_dict["input_ids"]
SCREAMING_SNAKE_CASE__ = input_ids.ne(1 ).to(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE__ = FalconForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase__ ( self ):
# Falcon can have different numbers of KV-heads than the number of query heads, so we need
# to override this test to use the right head counts.
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(UpperCAmelCase__ , "use_cache" ):
return
SCREAMING_SNAKE_CASE__ = model_class(UpperCAmelCase__ ).to(UpperCAmelCase__ )
if "use_cache" not in inputs:
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = model(**UpperCAmelCase__ )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
SCREAMING_SNAKE_CASE__ = (
getattr(UpperCAmelCase__ , "decoder_layers" , UpperCAmelCase__ )
or getattr(UpperCAmelCase__ , "num_decoder_layers" , UpperCAmelCase__ )
or config.num_hidden_layers
)
SCREAMING_SNAKE_CASE__ = getattr(UpperCAmelCase__ , "num_kv_heads" , config.num_attention_heads )
SCREAMING_SNAKE_CASE__ = getattr(UpperCAmelCase__ , "d_model" , config.hidden_size )
SCREAMING_SNAKE_CASE__ = embed_dim // num_attention_heads
SCREAMING_SNAKE_CASE__ = outputs["past_key_values"]
self.assertEqual(len(UpperCAmelCase__ ) , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = inputs["input_ids"].shape
for i in range(UpperCAmelCase__ ):
if config.new_decoder_architecture:
SCREAMING_SNAKE_CASE__ = config.num_attention_heads
elif config.multi_query:
SCREAMING_SNAKE_CASE__ = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained("Rocketknight1/falcon-rw-1b" )
SCREAMING_SNAKE_CASE__ = FalconForCausalLM.from_pretrained("Rocketknight1/falcon-rw-1b" )
model.eval()
model.to(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = tokenizer("My favorite food is" , return_tensors="pt" ).to(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = (
"My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday."
)
SCREAMING_SNAKE_CASE__ = model.generate(**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , max_new_tokens=19 )
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(UpperCAmelCase__ )[0]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def lowerCAmelCase__ ( self ):
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = FalconForCausalLM.from_pretrained(UpperCAmelCase__ )
model.eval()
model.to(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = tokenizer("My favorite food is" , return_tensors="pt" ).to(UpperCAmelCase__ )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , max_new_tokens=4 )
model.generate(**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , max_new_tokens=4 )
model.generate(**UpperCAmelCase__ , num_beams=2 , max_new_tokens=4 )
@slow
def lowerCAmelCase__ ( self ):
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = FalconForCausalLM.from_pretrained(UpperCAmelCase__ )
model.eval()
model.to(device=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = tokenizer("My favorite food is" , return_tensors="pt" ).to(UpperCAmelCase__ )
# Test results are the same with and without cache
SCREAMING_SNAKE_CASE__ = model.generate(**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , max_new_tokens=20 , use_cache=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = model.generate(**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , max_new_tokens=20 , use_cache=UpperCAmelCase__ )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 112
| 1
|
'''simple docstring'''
def __UpperCamelCase ( a : int , a : int ) ->int:
return int((input_a, input_a).count(1 ) != 0 )
def __UpperCamelCase ( ) ->None:
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 342
|
'''simple docstring'''
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
_lowercase = logging.get_logger(__name__)
def __UpperCamelCase ( a : np.ndarray , a : Union[int, Iterable[int]] , a : bool , a : int ) ->Tuple[int, int]:
def constraint_to_multiple_of(a : Tuple , a : Optional[int] , a : Optional[Any]=0 , a : Union[str, Any]=None ):
snake_case = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
snake_case = math.floor(val / multiple ) * multiple
if x < min_val:
snake_case = math.ceil(val / multiple ) * multiple
return x
snake_case = (output_size, output_size) if isinstance(a , a ) else output_size
snake_case , snake_case = get_image_size(a )
snake_case , snake_case = output_size
# determine new height and width
snake_case = output_height / input_height
snake_case = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
snake_case = scale_width
else:
# fit height
snake_case = scale_height
snake_case = constraint_to_multiple_of(scale_height * input_height , multiple=a )
snake_case = constraint_to_multiple_of(scale_width * input_width , multiple=a )
return (new_height, new_width)
class _lowercase ( __a ):
_UpperCAmelCase = ['''pixel_values''']
def __init__( self , A__ = True , A__ = None , A__ = PILImageResampling.BILINEAR , A__ = False , A__ = 1 , A__ = True , A__ = 1 / 2_55 , A__ = True , A__ = None , A__ = None , **A__ , ) -> None:
super().__init__(**A__ )
snake_case = size if size is not None else {'''height''': 3_84, '''width''': 3_84}
snake_case = get_size_dict(A__ )
snake_case = do_resize
snake_case = size
snake_case = keep_aspect_ratio
snake_case = ensure_multiple_of
snake_case = resample
snake_case = do_rescale
snake_case = rescale_factor
snake_case = do_normalize
snake_case = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase ( self , A__ , A__ , A__ = False , A__ = 1 , A__ = PILImageResampling.BICUBIC , A__ = None , **A__ , ) -> np.ndarray:
snake_case = get_size_dict(A__ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
snake_case = get_resize_output_image_size(
A__ , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=A__ , multiple=A__ , )
return resize(A__ , size=A__ , resample=A__ , data_format=A__ , **A__ )
def UpperCamelCase ( self , A__ , A__ , A__ = None , **A__ , ) -> Any:
return rescale(A__ , scale=A__ , data_format=A__ , **A__ )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ = None , **A__ , ) -> np.ndarray:
return normalize(A__ , mean=A__ , std=A__ , data_format=A__ , **A__ )
def UpperCamelCase ( self , A__ , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = ChannelDimension.FIRST , **A__ , ) -> PIL.Image.Image:
snake_case = do_resize if do_resize is not None else self.do_resize
snake_case = size if size is not None else self.size
snake_case = get_size_dict(A__ )
snake_case = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
snake_case = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
snake_case = resample if resample is not None else self.resample
snake_case = do_rescale if do_rescale is not None else self.do_rescale
snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case = do_normalize if do_normalize is not None else self.do_normalize
snake_case = image_mean if image_mean is not None else self.image_mean
snake_case = image_std if image_std is not None else self.image_std
snake_case = make_list_of_images(A__ )
if not valid_images(A__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
snake_case = [to_numpy_array(A__ ) for image in images]
if do_resize:
snake_case = [self.resize(image=A__ , size=A__ , resample=A__ ) for image in images]
if do_rescale:
snake_case = [self.rescale(image=A__ , scale=A__ ) for image in images]
if do_normalize:
snake_case = [self.normalize(image=A__ , mean=A__ , std=A__ ) for image in images]
snake_case = [to_channel_dimension_format(A__ , A__ ) for image in images]
snake_case = {'''pixel_values''': images}
return BatchFeature(data=A__ , tensor_type=A__ )
def UpperCamelCase ( self , A__ , A__ = None ) -> Optional[int]:
snake_case = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A__ ) != len(A__ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(A__ ):
snake_case = target_sizes.numpy()
snake_case = []
for idx in range(len(A__ ) ):
snake_case = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=A__ )
snake_case = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(A__ )
else:
snake_case = logits.argmax(dim=1 )
snake_case = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 342
| 1
|
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
snake_case_ : Any = logging.getLogger(__name__)
snake_case_ : Dict = {"facebook/bart-base": BartForConditionalGeneration}
snake_case_ : Optional[Any] = {"facebook/bart-base": BartTokenizer}
def lowerCamelCase_ ( ) -> Optional[int]:
UpperCAmelCase_ : List[str] = argparse.ArgumentParser(description='''Export Bart model + Beam Search to ONNX graph.''' )
parser.add_argument(
'''--validation_file''', type=SCREAMING_SNAKE_CASE__, default=SCREAMING_SNAKE_CASE__, help='''A csv or a json file containing the validation data.''' )
parser.add_argument(
'''--max_length''', type=SCREAMING_SNAKE_CASE__, default=5, help='''The maximum total input sequence length after tokenization.''', )
parser.add_argument(
'''--num_beams''', type=SCREAMING_SNAKE_CASE__, default=SCREAMING_SNAKE_CASE__, help=(
'''Number of beams to use for evaluation. This argument will be '''
'''passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'''
), )
parser.add_argument(
'''--model_name_or_path''', type=SCREAMING_SNAKE_CASE__, help='''Path to pretrained model or model identifier from huggingface.co/models.''', required=SCREAMING_SNAKE_CASE__, )
parser.add_argument(
'''--config_name''', type=SCREAMING_SNAKE_CASE__, default=SCREAMING_SNAKE_CASE__, help='''Pretrained config name or path if not the same as model_name''', )
parser.add_argument(
'''--device''', type=SCREAMING_SNAKE_CASE__, default='''cpu''', help='''Device where the model will be run''', )
parser.add_argument('''--output_file_path''', type=SCREAMING_SNAKE_CASE__, default=SCREAMING_SNAKE_CASE__, help='''Where to store the final ONNX file.''' )
UpperCAmelCase_ : Optional[int] = parser.parse_args()
return args
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[Any], SCREAMING_SNAKE_CASE__ : Union[str, Any]="cpu" ) -> Tuple:
UpperCAmelCase_ : Tuple = model_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : List[str] = tokenizer_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE__ )
if model_name in ["facebook/bart-base"]:
UpperCAmelCase_ : Optional[Any] = 0
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : List[Any] = 0
return huggingface_model, tokenizer
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : str, SCREAMING_SNAKE_CASE__ : Optional[int], SCREAMING_SNAKE_CASE__ : List[Any] ) -> Any:
model.eval()
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Union[str, Any] = torch.jit.script(BARTBeamSearchGenerator(SCREAMING_SNAKE_CASE__ ) )
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = '''My friends are cool but they eat too many carbs.'''
UpperCAmelCase_ : List[str] = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors='''pt''' ).to(model.device )
UpperCAmelCase_ : Optional[int] = model.generate(
inputs['''input_ids'''], attention_mask=inputs['''attention_mask'''], num_beams=SCREAMING_SNAKE_CASE__, max_length=SCREAMING_SNAKE_CASE__, early_stopping=SCREAMING_SNAKE_CASE__, decoder_start_token_id=model.config.decoder_start_token_id, )
torch.onnx.export(
SCREAMING_SNAKE_CASE__, (
inputs['''input_ids'''],
inputs['''attention_mask'''],
num_beams,
max_length,
model.config.decoder_start_token_id,
), SCREAMING_SNAKE_CASE__, opset_version=14, input_names=['''input_ids''', '''attention_mask''', '''num_beams''', '''max_length''', '''decoder_start_token_id'''], output_names=['''output_ids'''], dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''seq'''},
'''output_ids''': {0: '''batch''', 1: '''seq_out'''},
}, example_outputs=SCREAMING_SNAKE_CASE__, )
logger.info('''Model exported to {}'''.format(SCREAMING_SNAKE_CASE__ ) )
UpperCAmelCase_ : Union[str, Any] = remove_dup_initializers(os.path.abspath(SCREAMING_SNAKE_CASE__ ) )
logger.info('''Deduplicated and optimized model written to {}'''.format(SCREAMING_SNAKE_CASE__ ) )
UpperCAmelCase_ : Dict = onnxruntime.InferenceSession(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Optional[int] = ort_sess.run(
SCREAMING_SNAKE_CASE__, {
'''input_ids''': inputs['''input_ids'''].cpu().numpy(),
'''attention_mask''': inputs['''attention_mask'''].cpu().numpy(),
'''num_beams''': np.array(SCREAMING_SNAKE_CASE__ ),
'''max_length''': np.array(SCREAMING_SNAKE_CASE__ ),
'''decoder_start_token_id''': np.array(model.config.decoder_start_token_id ),
}, )
np.testing.assert_allclose(summary_ids.cpu().numpy(), ort_out[0], rtol=1E-3, atol=1E-3 )
logger.info('''Model outputs from torch and ONNX Runtime are similar.''' )
logger.info('''Success.''' )
def lowerCamelCase_ ( ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = parse_args()
UpperCAmelCase_ : List[str] = 5
UpperCAmelCase_ : Optional[Any] = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO, )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
UpperCAmelCase_ : Dict = torch.device(args.device )
UpperCAmelCase_ : Dict = load_model_tokenizer(args.model_name_or_path, SCREAMING_SNAKE_CASE__ )
if model.config.decoder_start_token_id is None:
raise ValueError('''Make sure that `config.decoder_start_token_id` is correctly defined''' )
model.to(SCREAMING_SNAKE_CASE__ )
if args.max_length:
UpperCAmelCase_ : Tuple = args.max_length
if args.num_beams:
UpperCAmelCase_ : Optional[int] = args.num_beams
if args.output_file_path:
UpperCAmelCase_ : Optional[int] = args.output_file_path
else:
UpperCAmelCase_ : Dict = '''BART.onnx'''
logger.info('''Exporting model to ONNX''' )
export_and_validate_model(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 714
|
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
snake_case_ : List[str] = False
class __a (unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : int ) -> str:
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
UpperCAmelCase_ : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = pipe.dual_guided(
prompt='''first prompt''' , image=__magic_name__ , text_to_image_strength=0.7_5 , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__magic_name__ )
UpperCAmelCase_ : Optional[int] = VersatileDiffusionPipeline.from_pretrained(__magic_name__ , torch_dtype=torch.floataa )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : Any = generator.manual_seed(0 )
UpperCAmelCase_ : Dict = pipe.dual_guided(
prompt='''first prompt''' , image=__magic_name__ , text_to_image_strength=0.7_5 , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : str = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : Union[str, Any] = '''cyberpunk 2077'''
UpperCAmelCase_ : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
UpperCAmelCase_ : Tuple = torch.manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = pipe.dual_guided(
prompt=__magic_name__ , image=__magic_name__ , text_to_image_strength=0.7_5 , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
UpperCAmelCase_ : List[str] = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase_ : Union[str, Any] = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
UpperCAmelCase_ : Tuple = '''A painting of a squirrel eating a burger '''
UpperCAmelCase_ : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase_ : List[Any] = pipe.text_to_image(
prompt=__magic_name__ , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
UpperCAmelCase_ : Tuple = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase_ : Any = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
UpperCAmelCase_ : Tuple = pipe.image_variation(__magic_name__ , generator=__magic_name__ , output_type='''numpy''' ).images
UpperCAmelCase_ : Optional[Any] = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase_ : List[str] = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 644
| 0
|
'''simple docstring'''
def lowerCamelCase ( __lowerCamelCase : int = 400_0000 ) ->int:
_SCREAMING_SNAKE_CASE = [0, 1]
_SCREAMING_SNAKE_CASE = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
_SCREAMING_SNAKE_CASE = 0
for j in range(len(__lowerCamelCase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 314
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def lowerCamelCase ( __lowerCamelCase : str , __lowerCamelCase : str ) ->str | Literal[False]:
_SCREAMING_SNAKE_CASE = list(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = list(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = 0
for i in range(len(__lowerCamelCase ) ):
if lista[i] != lista[i]:
count += 1
_SCREAMING_SNAKE_CASE = """_"""
if count > 1:
return False
else:
return "".join(__lowerCamelCase )
def lowerCamelCase ( __lowerCamelCase : list[str] ) ->list[str]:
_SCREAMING_SNAKE_CASE = []
while True:
_SCREAMING_SNAKE_CASE = ["""$"""] * len(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = []
for i in range(len(__lowerCamelCase ) ):
for j in range(i + 1 , len(__lowerCamelCase ) ):
_SCREAMING_SNAKE_CASE = compare_string(binary[i] , binary[j] )
if k is False:
_SCREAMING_SNAKE_CASE = """*"""
_SCREAMING_SNAKE_CASE = """*"""
temp.append("""X""" )
for i in range(len(__lowerCamelCase ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(__lowerCamelCase ) == 0:
return pi
_SCREAMING_SNAKE_CASE = list(set(__lowerCamelCase ) )
def lowerCamelCase ( __lowerCamelCase : int , __lowerCamelCase : Sequence[float] ) ->list[str]:
_SCREAMING_SNAKE_CASE = []
for minterm in minterms:
_SCREAMING_SNAKE_CASE = """"""
for _ in range(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE = str(minterm % 2 ) + string
minterm //= 2
temp.append(__lowerCamelCase )
return temp
def lowerCamelCase ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : int ) ->bool:
_SCREAMING_SNAKE_CASE = list(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = list(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = 0
for i in range(len(__lowerCamelCase ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def lowerCamelCase ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[str] ) ->list[str]:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = [0] * len(__lowerCamelCase )
for i in range(len(chart[0] ) ):
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = -1
for j in range(len(__lowerCamelCase ) ):
if chart[j][i] == 1:
count += 1
_SCREAMING_SNAKE_CASE = j
if count == 1:
_SCREAMING_SNAKE_CASE = 1
for i in range(len(__lowerCamelCase ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(__lowerCamelCase ) ):
_SCREAMING_SNAKE_CASE = 0
temp.append(prime_implicants[i] )
while True:
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = -1
_SCREAMING_SNAKE_CASE = 0
for i in range(len(__lowerCamelCase ) ):
_SCREAMING_SNAKE_CASE = chart[i].count(1 )
if count_n > max_n:
_SCREAMING_SNAKE_CASE = count_n
_SCREAMING_SNAKE_CASE = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(__lowerCamelCase ) ):
_SCREAMING_SNAKE_CASE = 0
def lowerCamelCase ( __lowerCamelCase : list[str] , __lowerCamelCase : list[str] ) ->list[list[int]]:
_SCREAMING_SNAKE_CASE = [[0 for x in range(len(__lowerCamelCase ) )] for x in range(len(__lowerCamelCase ) )]
for i in range(len(__lowerCamelCase ) ):
_SCREAMING_SNAKE_CASE = prime_implicants[i].count("""_""" )
for j in range(len(__lowerCamelCase ) ):
if is_for_table(prime_implicants[i] , binary[j] , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE = 1
return chart
def lowerCamelCase ( ) ->None:
_SCREAMING_SNAKE_CASE = int(input("""Enter the no. of variables\n""" ) )
_SCREAMING_SNAKE_CASE = [
float(__lowerCamelCase )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
_SCREAMING_SNAKE_CASE = decimal_to_binary(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = check(__lowerCamelCase )
print("""Prime Implicants are:""" )
print(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = prime_implicant_chart(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = selection(__lowerCamelCase , __lowerCamelCase )
print("""Essential Prime Implicants are:""" )
print(__lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 314
| 1
|
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__lowercase = logging.get_logger(__name__)
class _lowercase ( enum.Enum ):
"""simple docstring"""
lowercase__ = 0
lowercase__ = 1
@add_end_docstrings(__a )
class _lowercase ( __a ):
"""simple docstring"""
lowercase__ = '''generated'''
def __init__( self : Any , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Tuple ) -> List[Any]:
'''simple docstring'''
super().__init__(*lowercase_ , **lowercase_ )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def UpperCAmelCase_ ( self : Optional[int] , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Any=None , UpperCamelCase__ : Union[str, Any]=None , **UpperCamelCase__ : Optional[Any] , ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase ={}
if truncation is not None:
__UpperCamelCase =truncation
__UpperCamelCase =generate_kwargs
__UpperCamelCase ={}
if return_tensors is not None and return_type is None:
__UpperCamelCase =ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
__UpperCamelCase =return_type
if clean_up_tokenization_spaces is not None:
__UpperCamelCase =clean_up_tokenization_spaces
if stop_sequence is not None:
__UpperCamelCase =self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
if len(lowercase_ ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
__UpperCamelCase =stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCAmelCase_ ( self : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> Any:
'''simple docstring'''
return True
def UpperCAmelCase_ ( self : Dict , *UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] ) -> Tuple:
'''simple docstring'''
__UpperCamelCase =self.model.config.prefix if self.model.config.prefix is not None else ''''''
if isinstance(args[0] , lowercase_ ):
if self.tokenizer.pad_token_id is None:
raise ValueError('''Please make sure that the tokenizer has a pad_token_id when using a batch input''' )
__UpperCamelCase =([prefix + arg for arg in args[0]],)
__UpperCamelCase =True
elif isinstance(args[0] , lowercase_ ):
__UpperCamelCase =(prefix + args[0],)
__UpperCamelCase =False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
__UpperCamelCase =self.tokenizer(*lowercase_ , padding=lowercase_ , truncation=lowercase_ , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : List[Any] , *UpperCamelCase__ : Any , **UpperCamelCase__ : int ) -> Dict:
'''simple docstring'''
__UpperCamelCase =super().__call__(*lowercase_ , **lowercase_ )
if (
isinstance(args[0] , lowercase_ )
and all(isinstance(lowercase_ , lowercase_ ) for el in args[0] )
and all(len(lowercase_ ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def UpperCAmelCase_ ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str=TruncationStrategy.DO_NOT_TRUNCATE , **UpperCamelCase__ : Dict ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase =self._parse_and_tokenize(lowercase_ , truncation=lowercase_ , **lowercase_ )
return inputs
def UpperCAmelCase_ ( self : str , UpperCamelCase__ : str , **UpperCamelCase__ : str ) -> str:
'''simple docstring'''
if self.framework == "pt":
__UpperCamelCase , __UpperCamelCase =model_inputs['''input_ids'''].shape
elif self.framework == "tf":
__UpperCamelCase , __UpperCamelCase =tf.shape(model_inputs['''input_ids'''] ).numpy()
__UpperCamelCase =generate_kwargs.get('''min_length''' , self.model.config.min_length )
__UpperCamelCase =generate_kwargs.get('''max_length''' , self.model.config.max_length )
self.check_inputs(lowercase_ , generate_kwargs['''min_length'''] , generate_kwargs['''max_length'''] )
__UpperCamelCase =self.model.generate(**lowercase_ , **lowercase_ )
__UpperCamelCase =output_ids.shape[0]
if self.framework == "pt":
__UpperCamelCase =output_ids.reshape(lowercase_ , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
__UpperCamelCase =tf.reshape(lowercase_ , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def UpperCAmelCase_ ( self : Dict , UpperCamelCase__ : str , UpperCamelCase__ : int=ReturnType.TEXT , UpperCamelCase__ : int=False ) -> Tuple:
'''simple docstring'''
__UpperCamelCase =[]
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
__UpperCamelCase ={f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
__UpperCamelCase ={
f"""{self.return_name}_text""": self.tokenizer.decode(
lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ , )
}
records.append(lowercase_ )
return records
@add_end_docstrings(__a )
class _lowercase ( __a ):
"""simple docstring"""
lowercase__ = '''summary'''
def __call__( self : Optional[Any] , *UpperCamelCase__ : int , **UpperCamelCase__ : Dict ) -> Optional[int]:
'''simple docstring'''
return super().__call__(*lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> bool:
'''simple docstring'''
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
'''a summarization task, where outputs shorter than the input are typically wanted, you might '''
f"""consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})""" )
@add_end_docstrings(__a )
class _lowercase ( __a ):
"""simple docstring"""
lowercase__ = '''translation'''
def UpperCAmelCase_ ( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> int:
'''simple docstring'''
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
'''increasing your max_length manually, e.g. translator(\'...\', max_length=400)''' )
return True
def UpperCAmelCase_ ( self : Tuple , *UpperCamelCase__ : Any , UpperCamelCase__ : List[Any]=TruncationStrategy.DO_NOT_TRUNCATE , UpperCamelCase__ : Any=None , UpperCamelCase__ : Optional[Any]=None ) -> List[str]:
'''simple docstring'''
if getattr(self.tokenizer , '''_build_translation_inputs''' , lowercase_ ):
return self.tokenizer._build_translation_inputs(
*lowercase_ , return_tensors=self.framework , truncation=lowercase_ , src_lang=lowercase_ , tgt_lang=lowercase_ )
else:
return super()._parse_and_tokenize(*lowercase_ , truncation=lowercase_ )
def UpperCAmelCase_ ( self : List[str] , UpperCamelCase__ : Dict=None , UpperCamelCase__ : str=None , **UpperCamelCase__ : List[Any] ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =super()._sanitize_parameters(**lowercase_ )
if src_lang is not None:
__UpperCamelCase =src_lang
if tgt_lang is not None:
__UpperCamelCase =tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
__UpperCamelCase =kwargs.get('''task''' , self.task )
__UpperCamelCase =task.split('''_''' )
if task and len(lowercase_ ) == 4:
# translation, XX, to YY
__UpperCamelCase =items[1]
__UpperCamelCase =items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[str] , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : str ) -> Union[str, Any]:
'''simple docstring'''
return super().__call__(*lowercase_ , **lowercase_ )
| 716
|
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowercase ( __a , unittest.TestCase ):
"""simple docstring"""
lowercase__ = MgpstrTokenizer
lowercase__ = False
lowercase__ = {}
lowercase__ = False
def UpperCAmelCase_ ( self : int ) -> Tuple:
'''simple docstring'''
super().setUp()
# fmt: off
__UpperCamelCase =['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
__UpperCamelCase =dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + '''\n''' )
def UpperCAmelCase_ ( self : str , **UpperCamelCase__ : str ) -> Optional[int]:
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def UpperCAmelCase_ ( self : List[Any] , UpperCamelCase__ : int ) -> Dict:
'''simple docstring'''
__UpperCamelCase ='''tester'''
__UpperCamelCase ='''tester'''
return input_text, output_text
@unittest.skip('''MGP-STR always lower cases letters.''' )
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self : Tuple ) -> Dict:
'''simple docstring'''
__UpperCamelCase =self.get_tokenizers(do_lower_case=UpperCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
__UpperCamelCase ='''[SPECIAL_TOKEN]'''
tokenizer.add_special_tokens({'''cls_token''': special_token} )
__UpperCamelCase =tokenizer.encode([special_token] , add_special_tokens=UpperCamelCase__ )
self.assertEqual(len(UpperCamelCase__ ) , 1 )
__UpperCamelCase =tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
self.assertTrue(special_token not in decoded )
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
__UpperCamelCase , __UpperCamelCase =self.get_input_output_texts(UpperCamelCase__ )
__UpperCamelCase =tokenizer.tokenize(UpperCamelCase__ )
__UpperCamelCase =tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
__UpperCamelCase =tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertNotEqual(len(UpperCamelCase__ ) , 0 )
__UpperCamelCase =tokenizer.decode(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(text_a.replace(''' ''' , '''''' ) , UpperCamelCase__ )
@unittest.skip('''MGP-STR tokenizer only handles one sequence.''' )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' )
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
pass
| 296
| 0
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@property
def __lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase : List[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
@property
def __lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase : str = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , )
return model
@property
def __lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(__UpperCAmelCase )
def __lowerCamelCase ( self ) -> int:
'''simple docstring'''
__UpperCamelCase : Tuple = self.dummy_uncond_unet
__UpperCamelCase : Optional[int] = DDIMScheduler()
__UpperCamelCase : List[str] = self.dummy_vq_model
__UpperCamelCase : Dict = LDMPipeline(unet=__UpperCAmelCase , vqvae=__UpperCAmelCase , scheduler=__UpperCAmelCase )
ldm.to(__UpperCAmelCase )
ldm.set_progress_bar_config(disable=__UpperCAmelCase )
__UpperCamelCase : Tuple = torch.manual_seed(0 )
__UpperCamelCase : Union[str, Any] = ldm(generator=__UpperCAmelCase , num_inference_steps=2 , output_type="numpy" ).images
__UpperCamelCase : List[Any] = torch.manual_seed(0 )
__UpperCamelCase : Any = ldm(generator=__UpperCAmelCase , num_inference_steps=2 , output_type="numpy" , return_dict=__UpperCAmelCase )[0]
__UpperCamelCase : Tuple = image[0, -3:, -3:, -1]
__UpperCamelCase : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__UpperCamelCase : List[Any] = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
__UpperCamelCase : Optional[int] = 1E-2 if torch_device != "mps" else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = LDMPipeline.from_pretrained("CompVis/ldm-celebahq-256" )
ldm.to(__UpperCAmelCase )
ldm.set_progress_bar_config(disable=__UpperCAmelCase )
__UpperCamelCase : Optional[Any] = torch.manual_seed(0 )
__UpperCamelCase : Any = ldm(generator=__UpperCAmelCase , num_inference_steps=5 , output_type="numpy" ).images
__UpperCamelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
__UpperCamelCase : Union[str, Any] = np.array([0.4399, 0.44975, 0.46825, 0.474, 0.4359, 0.4581, 0.45095, 0.4341, 0.4447] )
__UpperCamelCase : Optional[Any] = 1E-2 if torch_device != "mps" else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 327
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A :int = ["image_processor", "tokenizer"]
A :Any = "LayoutLMv3ImageProcessor"
A :str = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast")
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ):
"""simple docstring"""
a__ : str = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __UpperCAmelCase , )
a__ : List[str] = kwargs.pop("feature_extractor" )
a__ : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = None , **__UpperCAmelCase , ):
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
# first, apply the image processor
a__ : List[str] = self.image_processor(images=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
a__ : Union[str, Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
a__ : Optional[Any] = features["words"]
a__ : Union[str, Any] = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , stride=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_overflowing_tokens=__UpperCAmelCase , return_special_tokens_mask=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , return_length=__UpperCAmelCase , verbose=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , )
# add pixel values
a__ : Dict = features.pop("pixel_values" )
if return_overflowing_tokens is True:
a__ : str = self.get_overflowing_images(__UpperCAmelCase , encoded_inputs["overflow_to_sample_mapping"] )
a__ : Tuple = images
return encoded_inputs
def _A ( self , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
a__ : Any = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__UpperCAmelCase ) != len(__UpperCAmelCase ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
f' {len(__UpperCAmelCase )} and {len(__UpperCAmelCase )}' )
return images_with_overflow
def _A ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def _A ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def _A ( self ):
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def _A ( self ):
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __UpperCAmelCase , )
return self.image_processor_class
@property
def _A ( self ):
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __UpperCAmelCase , )
return self.image_processor
| 191
| 0
|
"""simple docstring"""
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = [0 for i in range(len(_lowerCAmelCase ) )]
# initialize interval's left pointer and right pointer
__lowerCAmelCase , __lowerCAmelCase = 0, 0
for i in range(1 , len(_lowerCAmelCase ) ):
# case when current index is inside the interval
if i <= right_pointer:
__lowerCAmelCase = min(right_pointer - i + 1 , z_result[i - left_pointer] )
__lowerCAmelCase = min_edge
while go_next(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
__lowerCAmelCase , __lowerCAmelCase = i, i + z_result[i] - 1
return z_result
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
return i + z_result[i] < len(_lowerCAmelCase ) and s[z_result[i]] == s[i + z_result[i]]
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
__lowerCAmelCase = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(_lowerCAmelCase ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 573
|
"""simple docstring"""
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = prime_factors(_lowerCAmelCase )
if is_square_free(_lowerCAmelCase ):
return -1 if len(_lowerCAmelCase ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 573
| 1
|
def _a ( UpperCAmelCase ) -> int:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = abs(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = 0
while n > 0:
res += n % 10
n //= 10
return res
def _a ( UpperCAmelCase ) -> int:
"""simple docstring"""
lowerCamelCase__ : List[str] = abs(UpperCAmelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def _a ( UpperCAmelCase ) -> int:
"""simple docstring"""
return sum(int(UpperCAmelCase ) for c in str(abs(UpperCAmelCase ) ) )
def _a ( ) -> None:
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(UpperCAmelCase , UpperCAmelCase ) -> None:
lowerCamelCase__ : Optional[int] = f"{func.__name__}({value})"
lowerCamelCase__ : Optional[Any] = timeit(f"__main__.{call}" , setup='''import __main__''' )
print(f"{call:56} = {func(UpperCAmelCase )} -- {timing:.4f} seconds" )
for value in (262144, 1125899906842624, 1267650600228229401496703205376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(UpperCAmelCase , UpperCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 315
|
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : str , A : int , A : Tuple=1_3 , A : List[str]=3_0 , A : Any=2 , A : List[Any]=3 , A : Dict=True , A : Tuple=True , A : Optional[int]=3_2 , A : List[Any]=5 , A : Any=4 , A : Optional[int]=3_7 , A : Union[str, Any]="gelu" , A : Optional[int]=0.1 , A : Optional[int]=0.1 , A : Optional[int]=1_0 , A : Optional[int]=0.02 , ) ->Optional[int]:
lowerCamelCase__ : Any = parent
lowerCamelCase__ : Any = batch_size
lowerCamelCase__ : str = image_size
lowerCamelCase__ : Any = patch_size
lowerCamelCase__ : Dict = num_channels
lowerCamelCase__ : List[Any] = is_training
lowerCamelCase__ : str = use_labels
lowerCamelCase__ : str = hidden_size
lowerCamelCase__ : Optional[Any] = num_hidden_layers
lowerCamelCase__ : Any = num_attention_heads
lowerCamelCase__ : int = intermediate_size
lowerCamelCase__ : Optional[Any] = hidden_act
lowerCamelCase__ : Tuple = hidden_dropout_prob
lowerCamelCase__ : int = attention_probs_dropout_prob
lowerCamelCase__ : int = type_sequence_label_size
lowerCamelCase__ : Optional[Any] = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase__ : Optional[Any] = (image_size // patch_size) ** 2
lowerCamelCase__ : Optional[Any] = num_patches + 1
def __lowerCamelCase ( self : str ) ->Any:
lowerCamelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : int = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A , initializer_range=self.initializer_range , )
return config, pixel_values
def __lowerCamelCase ( self : Tuple , A : List[Any] , A : Optional[int] ) ->int:
lowerCamelCase__ : Dict = FlaxViTModel(config=A )
lowerCamelCase__ : Optional[int] = model(A )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase__ : Optional[Any] = (self.image_size, self.image_size)
lowerCamelCase__ : List[str] = (self.patch_size, self.patch_size)
lowerCamelCase__ : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def __lowerCamelCase ( self : Optional[Any] , A : int , A : Optional[int] ) ->Optional[int]:
lowerCamelCase__ : Optional[int] = self.type_sequence_label_size
lowerCamelCase__ : Optional[Any] = FlaxViTForImageClassification(config=A )
lowerCamelCase__ : int = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase__ : Union[str, Any] = 1
lowerCamelCase__ : int = FlaxViTForImageClassification(A )
lowerCamelCase__ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[Any] = model(A )
def __lowerCamelCase ( self : int ) ->str:
lowerCamelCase__ : Optional[Any] = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : Optional[Any] = config_and_inputs
lowerCamelCase__ : Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : Union[str, Any] = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def __lowerCamelCase ( self : Optional[Any] ) ->None:
lowerCamelCase__ : int = FlaxViTModelTester(self )
lowerCamelCase__ : List[Any] = ConfigTester(self , config_class=A , has_text_modality=A , hidden_size=3_7 )
def __lowerCamelCase ( self : Any ) ->Dict:
self.config_tester.run_common_tests()
def __lowerCamelCase ( self : str ) ->List[Any]:
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def __lowerCamelCase ( self : Any ) ->Union[str, Any]:
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
def __lowerCamelCase ( self : int ) ->int:
lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : int = model_class(A )
lowerCamelCase__ : Any = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : List[Any] = [*signature.parameters.keys()]
lowerCamelCase__ : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A )
def __lowerCamelCase ( self : int ) ->List[str]:
lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase__ : List[str] = self._prepare_for_class(A , A )
lowerCamelCase__ : int = model_class(A )
@jax.jit
def model_jitted(A : Union[str, Any] , **A : Union[str, Any] ):
return model(pixel_values=A , **A )
with self.subTest('''JIT Enabled''' ):
lowerCamelCase__ : Union[str, Any] = model_jitted(**A ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCamelCase__ : Optional[Any] = model_jitted(**A ).to_tuple()
self.assertEqual(len(A ) , len(A ) )
for jitted_output, output in zip(A , A ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __lowerCamelCase ( self : Any ) ->Tuple:
for model_class_name in self.all_model_classes:
lowerCamelCase__ : List[str] = model_class_name.from_pretrained('''google/vit-base-patch16-224''' )
lowerCamelCase__ : Dict = model(np.ones((1, 3, 2_2_4, 2_2_4) ) )
self.assertIsNotNone(A )
| 315
| 1
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue_model_parallelism.py',
'model_name_or_path': 'roberta-large',
'instance_type': 'ml.p3dn.24xlarge',
'results': {'train_runtime': 1600, 'eval_accuracy': 0.3, 'eval_loss': 1.2},
},
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'roberta-large',
'instance_type': 'ml.p3dn.24xlarge',
'results': {'train_runtime': 1600, 'eval_accuracy': 0.3, 'eval_loss': 1.2},
},
] )
class snake_case__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : List[Any] ):
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='utf-8' , check=_lowerCamelCase , )
assert hasattr(self , 'env' )
def UpperCAmelCase__ ( self : List[Any] , _lowerCamelCase : Dict ):
# configuration for running training on smdistributed Model Parallel
snake_case__ : Tuple = {
'enabled': True,
'processes_per_host': 8,
}
snake_case__ : Any = {
'enabled': True,
'parameters': {
'microbatches': 4,
'placement_strategy': 'spread',
'pipeline': 'interleaved',
'optimize': 'speed',
'partitions': 4,
'ddp': True,
},
}
snake_case__ : Union[str, Any] = {'smdistributed': {'modelparallel': smp_options}, 'mpi': mpi_options}
snake_case__ : Tuple = 'trainer' if self.script == 'run_glue.py' else 'smtrainer'
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=_lowerCamelCase , instance_type=self.instance_type , debugger_hook_config=_lowerCamelCase , hyperparameters={
**self.env.hyperparameters,
'model_name_or_path': self.model_name_or_path,
'max_steps': 5_0_0,
} , metric_definitions=self.env.metric_definitions , distribution=_lowerCamelCase , py_version='py36' , )
def UpperCAmelCase__ ( self : List[Any] , _lowerCamelCase : List[Any] ):
TrainingJobAnalytics(_lowerCamelCase ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(1,)] )
def UpperCAmelCase__ ( self : Dict , _lowerCamelCase : List[str] ):
# create estimator
snake_case__ : Tuple = self.create_estimator(_lowerCamelCase )
# run training
estimator.fit()
# result dataframe
snake_case__ : List[str] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
snake_case__ : Dict = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
snake_case__ : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
snake_case__ : List[Any] = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''' , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , _lowerCamelCase )
| 303
|
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def lowercase__( A ):
snake_case__ : List[str] = []
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
f'''stage{idx}.patch_embed.proj.weight''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
f'''stage{idx}.patch_embed.proj.bias''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
f'''stage{idx}.patch_embed.norm.weight''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
f'''stage{idx}.patch_embed.norm.bias''',
) )
return embed
def lowercase__( A , A ):
snake_case__ : Optional[Any] = []
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
) )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', f'''stage{idx}.blocks.{cnt}.norm1.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', f'''stage{idx}.blocks.{cnt}.norm1.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', f'''stage{idx}.blocks.{cnt}.norm2.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', f'''stage{idx}.blocks.{cnt}.norm2.bias''') )
return attention_weights
def lowercase__( A ):
snake_case__ : Union[str, Any] = []
token.append((f'''cvt.encoder.stages.{idx}.cls_token''', 'stage2.cls_token') )
return token
def lowercase__( ):
snake_case__ : List[str] = []
head.append(('layernorm.weight', 'norm.weight') )
head.append(('layernorm.bias', 'norm.bias') )
head.append(('classifier.weight', 'head.weight') )
head.append(('classifier.bias', 'head.bias') )
return head
def lowercase__( A , A , A , A ):
snake_case__ : List[str] = 'imagenet-1k-id2label.json'
snake_case__ : Union[str, Any] = 1_0_0_0
snake_case__ : Union[str, Any] = 'huggingface/label-files'
snake_case__ : Any = num_labels
snake_case__ : Any = json.load(open(cached_download(hf_hub_url(A , A , repo_type='dataset' ) ) , 'r' ) )
snake_case__ : Dict = {int(A ): v for k, v in idalabel.items()}
snake_case__ : Optional[int] = idalabel
snake_case__ : Optional[Any] = {v: k for k, v in idalabel.items()}
snake_case__ : Dict = CvtConfig(num_labels=A , idalabel=A , labelaid=A )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13":
snake_case__ : Optional[int] = [1, 2, 1_0]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21":
snake_case__ : str = [1, 4, 1_6]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
snake_case__ : int = [2, 2, 2_0]
snake_case__ : Tuple = [3, 1_2, 1_6]
snake_case__ : Union[str, Any] = [1_9_2, 7_6_8, 1_0_2_4]
snake_case__ : Union[str, Any] = CvtForImageClassification(A )
snake_case__ : int = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
snake_case__ : int = image_size
snake_case__ : str = torch.load(A , map_location=torch.device('cpu' ) )
snake_case__ : int = OrderedDict()
snake_case__ : List[Any] = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
snake_case__ : List[Any] = list_of_state_dict + cls_token(A )
snake_case__ : str = list_of_state_dict + embeddings(A )
for cnt in range(config.depth[idx] ):
snake_case__ : str = list_of_state_dict + attention(A , A )
snake_case__ : Optional[int] = list_of_state_dict + final()
for gg in list_of_state_dict:
print(A )
for i in range(len(A ) ):
snake_case__ : int = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(A )
model.save_pretrained(A )
image_processor.save_pretrained(A )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--cvt_model',
default='cvt-w24',
type=str,
help='Name of the cvt model you\'d like to convert.',
)
parser.add_argument(
'--image_size',
default=3_8_4,
type=int,
help='Input Image Size',
)
parser.add_argument(
'--cvt_file_name',
default=R'cvtmodels\CvT-w24-384x384-IN-22k.pth',
type=str,
help='Input Image Size',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
lowerCamelCase : Optional[Any] = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 303
| 1
|
def UpperCamelCase ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
return "\n".join(
F'{number} * {i} = {number * i}' for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=1_0))
| 12
|
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_lowerCAmelCase = "platform"
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def UpperCamelCase ( a , a , a=None , a=None , a=None , a=None , a=None , a=None , ) -> Tuple:
'''simple docstring'''
if attention_mask is None:
__magic_name__ = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
__magic_name__ = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
__magic_name__ = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__magic_name__ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__magic_name__ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class _SCREAMING_SNAKE_CASE :
def __init__( self : Tuple , a__ : Union[str, Any] , a__ : Dict=13 , a__ : Tuple=7 , a__ : Any=True , a__ : Optional[int]=False , a__ : Dict=99 , a__ : str=16 , a__ : Tuple=2 , a__ : Union[str, Any]=4 , a__ : List[str]=4 , a__ : Dict="gelu" , a__ : List[str]=0.1 , a__ : str=0.1 , a__ : Optional[Any]=32 , a__ : Dict=2 , a__ : List[str]=1 , a__ : Tuple=0 , a__ : Optional[Any]=0.02 , ):
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = seq_length
__magic_name__ = is_training
__magic_name__ = use_labels
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = eos_token_id
__magic_name__ = pad_token_id
__magic_name__ = bos_token_id
__magic_name__ = initializer_range
def snake_case__ ( self : List[str] ):
__magic_name__ = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
__magic_name__ = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
__magic_name__ = shift_tokens_right(a__ , 1 , 2 )
__magic_name__ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=a__ , )
__magic_name__ = prepare_blenderbot_inputs_dict(a__ , a__ , a__ )
return config, inputs_dict
def snake_case__ ( self : int ):
__magic_name__ , __magic_name__ = self.prepare_config_and_inputs()
return config, inputs_dict
def snake_case__ ( self : Dict , a__ : Tuple , a__ : List[Any] , a__ : Union[str, Any] ):
__magic_name__ = 20
__magic_name__ = model_class_name(a__ )
__magic_name__ = model.encode(inputs_dict['''input_ids'''] )
__magic_name__ , __magic_name__ = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
__magic_name__ = model.init_cache(decoder_input_ids.shape[0] , a__ , a__ )
__magic_name__ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
__magic_name__ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__magic_name__ = model.decode(
decoder_input_ids[:, :-1] , a__ , decoder_attention_mask=a__ , past_key_values=a__ , decoder_position_ids=a__ , )
__magic_name__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
__magic_name__ = model.decode(
decoder_input_ids[:, -1:] , a__ , decoder_attention_mask=a__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=a__ , )
__magic_name__ = model.decode(a__ , a__ )
__magic_name__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def snake_case__ ( self : str , a__ : Tuple , a__ : List[Any] , a__ : Union[str, Any] ):
__magic_name__ = 20
__magic_name__ = model_class_name(a__ )
__magic_name__ = model.encode(inputs_dict['''input_ids'''] )
__magic_name__ , __magic_name__ = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
__magic_name__ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__magic_name__ = model.init_cache(decoder_input_ids.shape[0] , a__ , a__ )
__magic_name__ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__magic_name__ = model.decode(
decoder_input_ids[:, :-1] , a__ , decoder_attention_mask=a__ , past_key_values=a__ , decoder_position_ids=a__ , )
__magic_name__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
__magic_name__ = model.decode(
decoder_input_ids[:, -1:] , a__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=a__ , decoder_position_ids=a__ , )
__magic_name__ = model.decode(a__ , a__ , decoder_attention_mask=a__ )
__magic_name__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
@require_flax
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
__SCREAMING_SNAKE_CASE :Optional[int] = 99
def snake_case__ ( self : int ):
__magic_name__ = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
__magic_name__ = input_ids.shape[0]
__magic_name__ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def snake_case__ ( self : Any ):
__magic_name__ , __magic_name__ , __magic_name__ = self._get_config_and_data()
__magic_name__ = FlaxBlenderbotForConditionalGeneration(a__ )
__magic_name__ = lm_model(input_ids=a__ )
__magic_name__ = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , a__ )
def snake_case__ ( self : List[Any] ):
__magic_name__ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
__magic_name__ = FlaxBlenderbotForConditionalGeneration(a__ )
__magic_name__ = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
__magic_name__ = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
__magic_name__ = lm_model(input_ids=a__ , decoder_input_ids=a__ )
__magic_name__ = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , a__ )
def snake_case__ ( self : int ):
__magic_name__ = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
__magic_name__ = shift_tokens_right(a__ , 1 , 2 )
__magic_name__ = np.equal(a__ , 1 ).astype(np.floataa ).sum()
__magic_name__ = np.equal(a__ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(a__ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class _SCREAMING_SNAKE_CASE ( __a ,unittest.TestCase ,__a ):
__SCREAMING_SNAKE_CASE :List[str] = True
__SCREAMING_SNAKE_CASE :Union[str, Any] = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
__SCREAMING_SNAKE_CASE :List[Any] = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def snake_case__ ( self : Union[str, Any] ):
__magic_name__ = FlaxBlenderbotModelTester(self )
def snake_case__ ( self : List[str] ):
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(a__ , a__ , a__ )
def snake_case__ ( self : str ):
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(a__ , a__ , a__ )
def snake_case__ ( self : Any ):
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__magic_name__ = self._prepare_for_class(a__ , a__ )
__magic_name__ = model_class(a__ )
@jax.jit
def encode_jitted(a__ : Union[str, Any] , a__ : Tuple=None , **a__ : Tuple ):
return model.encode(input_ids=a__ , attention_mask=a__ )
with self.subTest('''JIT Enabled''' ):
__magic_name__ = encode_jitted(**a__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__magic_name__ = encode_jitted(**a__ ).to_tuple()
self.assertEqual(len(a__ ) , len(a__ ) )
for jitted_output, output in zip(a__ , a__ ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case__ ( self : Any ):
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__magic_name__ = model_class(a__ )
__magic_name__ = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
__magic_name__ = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(a__ : Union[str, Any] , a__ : List[Any] , a__ : Dict ):
return model.decode(
decoder_input_ids=a__ , decoder_attention_mask=a__ , encoder_outputs=a__ , )
with self.subTest('''JIT Enabled''' ):
__magic_name__ = decode_jitted(**a__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__magic_name__ = decode_jitted(**a__ ).to_tuple()
self.assertEqual(len(a__ ) , len(a__ ) )
for jitted_output, output in zip(a__ , a__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def snake_case__ ( self : Optional[Any] ):
for model_class_name in self.all_model_classes:
__magic_name__ = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__magic_name__ = np.ones((1, 1) ) * model.config.eos_token_id
__magic_name__ = model(a__ )
self.assertIsNotNone(a__ )
@unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' )
@slow
def snake_case__ ( self : List[Any] ):
__magic_name__ = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25}
__magic_name__ = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True}
__magic_name__ = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=a__ )
__magic_name__ = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' )
__magic_name__ = ['''Sam''']
__magic_name__ = tokenizer(a__ , return_tensors='''jax''' )
__magic_name__ = model.generate(**a__ , **a__ )
__magic_name__ = '''Sam is a great name. It means "sun" in Gaelic.'''
__magic_name__ = tokenizer.batch_decode(a__ , **a__ )
assert generated_txt[0].strip() == tgt_text
| 432
| 0
|
import os
import sys
import unittest
a__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
a__ = os.path.join(git_repo_path, '''src''', '''diffusers''')
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> Optional[Any]:
_a : List[str] = find_backend(''' if not is_torch_available():''' )
self.assertEqual(_a , '''torch''' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
_a : Tuple = find_backend(''' if not (is_torch_available() and is_transformers_available()):''' )
self.assertEqual(_a , '''torch_and_transformers''' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
_a : Optional[Any] = find_backend(
''' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):''' )
self.assertEqual(_a , '''torch_and_transformers_and_onnx''' )
def __lowercase ( self ) -> Union[str, Any]:
_a : Optional[int] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , _a )
self.assertIn('''torch_and_transformers''' , _a )
self.assertIn('''flax_and_transformers''' , _a )
self.assertIn('''torch_and_transformers_and_onnx''' , _a )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''UNet2DModel''' , objects['''torch'''] )
self.assertIn('''FlaxUNet2DConditionModel''' , objects['''flax'''] )
self.assertIn('''StableDiffusionPipeline''' , objects['''torch_and_transformers'''] )
self.assertIn('''FlaxStableDiffusionPipeline''' , objects['''flax_and_transformers'''] )
self.assertIn('''LMSDiscreteScheduler''' , objects['''torch_and_scipy'''] )
self.assertIn('''OnnxStableDiffusionPipeline''' , objects['''torch_and_transformers_and_onnx'''] )
def __lowercase ( self ) -> Optional[Any]:
_a : Union[str, Any] = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(_a , '''\nCONSTANT = None\n''' )
_a : Optional[int] = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
_a , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
_a : Optional[int] = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
'''
_a : Union[str, Any] = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(_a , _a )
def __lowercase ( self ) -> str:
_a : Optional[Any] = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
'''
_a : Union[str, Any] = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , _a )
| 578
|
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
a__ = TypeVar('''KEY''')
a__ = TypeVar('''VAL''')
@dataclass(frozen=__lowercase , slots=__lowercase )
class UpperCAmelCase_ ( Generic[KEY, VAL] ):
"""simple docstring"""
UpperCAmelCase__ : KEY
UpperCAmelCase__ : VAL
class UpperCAmelCase_ ( _Item ):
"""simple docstring"""
def __init__( self ) -> None:
super().__init__(_a , _a )
def __bool__( self ) -> bool:
return False
a__ = _DeletedItem()
class UpperCAmelCase_ ( MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self , _a = 8 , _a = 0.75 ) -> None:
_a : Optional[Any] = initial_block_size
_a : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
_a : Tuple = capacity_factor
_a : Optional[Any] = 0
def __lowercase ( self , _a ) -> int:
return hash(_a ) % len(self._buckets )
def __lowercase ( self , _a ) -> int:
return (ind + 1) % len(self._buckets )
def __lowercase ( self , _a , _a , _a ) -> bool:
_a : Optional[Any] = self._buckets[ind]
if not stored:
_a : List[Any] = _Item(_a , _a )
self._len += 1
return True
elif stored.key == key:
_a : int = _Item(_a , _a )
return True
else:
return False
def __lowercase ( self ) -> bool:
_a : List[Any] = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(_a )
def __lowercase ( self ) -> bool:
if len(self._buckets ) <= self._initial_block_size:
return False
_a : Union[str, Any] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def __lowercase ( self , _a ) -> None:
_a : Any = self._buckets
_a : str = [None] * new_size
_a : Tuple = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def __lowercase ( self ) -> None:
self._resize(len(self._buckets ) * 2 )
def __lowercase ( self ) -> None:
self._resize(len(self._buckets ) // 2 )
def __lowercase ( self , _a ) -> Iterator[int]:
_a : str = self._get_bucket_index(_a )
for _ in range(len(self._buckets ) ):
yield ind
_a : List[Any] = self._get_next_ind(_a )
def __lowercase ( self , _a , _a ) -> None:
for ind in self._iterate_buckets(_a ):
if self._try_set(_a , _a , _a ):
break
def __setitem__( self , _a , _a ) -> None:
if self._is_full():
self._size_up()
self._add_item(_a , _a )
def __delitem__( self , _a ) -> None:
for ind in self._iterate_buckets(_a ):
_a : List[str] = self._buckets[ind]
if item is None:
raise KeyError(_a )
if item is _deleted:
continue
if item.key == key:
_a : Optional[Any] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , _a ) -> VAL:
for ind in self._iterate_buckets(_a ):
_a : int = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(_a )
def __len__( self ) -> int:
return self._len
def __iter__( self ) -> Iterator[KEY]:
yield from (item.key for item in self._buckets if item)
def __repr__( self ) -> str:
_a : int = ''' ,'''.join(
F"""{item.key}: {item.val}""" for item in self._buckets if item )
return F"""HashMap({val_string})"""
| 578
| 1
|
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowercase__( __UpperCamelCase: Dict ,__UpperCamelCase: Optional[int] ,__UpperCamelCase: List[str] ,__UpperCamelCase: int ,__UpperCamelCase: Optional[Any]=True ,__UpperCamelCase: Dict="pt" ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = {'add_prefix_space': True} if isinstance(__UpperCamelCase ,__UpperCamelCase ) and not line.startswith(' ' ) else {}
SCREAMING_SNAKE_CASE : Any = padding_side
return tokenizer(
[line] ,max_length=__UpperCamelCase ,padding='max_length' if pad_to_max_length else None ,truncation=__UpperCamelCase ,return_tensors=__UpperCamelCase ,add_special_tokens=__UpperCamelCase ,**__UpperCamelCase ,)
def lowercase__( __UpperCamelCase: Optional[int] ,__UpperCamelCase: str ,__UpperCamelCase: List[str]=None ,):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = input_ids.ne(__UpperCamelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self, A, A, A, A, A="train", A=None, A=None, A=None, A="", ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Optional[Any] = Path(A ).joinpath(type_path + '.source' )
SCREAMING_SNAKE_CASE : int = Path(A ).joinpath(type_path + '.target' )
SCREAMING_SNAKE_CASE : Optional[int] = self.get_char_lens(self.src_file )
SCREAMING_SNAKE_CASE : Any = max_source_length
SCREAMING_SNAKE_CASE : str = max_target_length
assert min(self.src_lens ) > 0, F"found empty line in {self.src_file}"
SCREAMING_SNAKE_CASE : Any = tokenizer
SCREAMING_SNAKE_CASE : Optional[Any] = prefix
if n_obs is not None:
SCREAMING_SNAKE_CASE : Any = self.src_lens[:n_obs]
SCREAMING_SNAKE_CASE : Optional[int] = src_lang
SCREAMING_SNAKE_CASE : Union[str, Any] = tgt_lang
def __len__( self ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = index + 1 # linecache starts at 1
SCREAMING_SNAKE_CASE : List[str] = self.prefix + linecache.getline(str(self.src_file ), A ).rstrip('\n' )
SCREAMING_SNAKE_CASE : Tuple = linecache.getline(str(self.tgt_file ), A ).rstrip('\n' )
assert source_line, F"empty source line for index {index}"
assert tgt_line, F"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer, A ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
SCREAMING_SNAKE_CASE : Optional[Any] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer, A ) else self.tokenizer
)
SCREAMING_SNAKE_CASE : str = self.tokenizer.generator if isinstance(self.tokenizer, A ) else self.tokenizer
SCREAMING_SNAKE_CASE : int = encode_line(A, A, self.max_source_length, 'right' )
SCREAMING_SNAKE_CASE : List[str] = encode_line(A, A, self.max_target_length, 'right' )
SCREAMING_SNAKE_CASE : Tuple = source_inputs['input_ids'].squeeze()
SCREAMING_SNAKE_CASE : Dict = target_inputs['input_ids'].squeeze()
SCREAMING_SNAKE_CASE : Optional[Any] = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def UpperCamelCase_ ( A ):
'''simple docstring'''
return [len(A ) for x in Path(A ).open().readlines()]
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = torch.stack([x['input_ids'] for x in batch] )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.stack([x['attention_mask'] for x in batch] )
SCREAMING_SNAKE_CASE : Dict = torch.stack([x['decoder_input_ids'] for x in batch] )
SCREAMING_SNAKE_CASE : int = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer, A )
else self.tokenizer.pad_token_id
)
SCREAMING_SNAKE_CASE : Tuple = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer, A )
else self.tokenizer.pad_token_id
)
SCREAMING_SNAKE_CASE : Dict = trim_batch(A, A )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = trim_batch(A, A, attention_mask=A )
SCREAMING_SNAKE_CASE : List[str] = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
UpperCamelCase_ = getLogger(__name__)
def lowercase__( __UpperCamelCase: List[List] ):
"""simple docstring"""
return list(itertools.chain.from_iterable(__UpperCamelCase ) )
def lowercase__( __UpperCamelCase: str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = get_git_info()
save_json(__UpperCamelCase ,os.path.join(__UpperCamelCase ,'git_log.json' ) )
def lowercase__( __UpperCamelCase: Union[str, Any] ,__UpperCamelCase: Optional[int] ,__UpperCamelCase: Dict=4 ,**__UpperCamelCase: str ):
"""simple docstring"""
with open(__UpperCamelCase ,'w' ) as f:
json.dump(__UpperCamelCase ,__UpperCamelCase ,indent=__UpperCamelCase ,**__UpperCamelCase )
def lowercase__( __UpperCamelCase: List[str] ):
"""simple docstring"""
with open(__UpperCamelCase ) as f:
return json.load(__UpperCamelCase )
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = git.Repo(search_parent_directories=__UpperCamelCase )
SCREAMING_SNAKE_CASE : Dict = {
'repo_id': str(__UpperCamelCase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def lowercase__( __UpperCamelCase: Callable ,__UpperCamelCase: Iterable ):
"""simple docstring"""
return list(map(__UpperCamelCase ,__UpperCamelCase ) )
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: Dict ):
"""simple docstring"""
with open(__UpperCamelCase ,'wb' ) as f:
return pickle.dump(__UpperCamelCase ,__UpperCamelCase )
def lowercase__( __UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
def remove_articles(__UpperCamelCase: str ):
return re.sub(r'\b(a|an|the)\b' ,' ' ,__UpperCamelCase )
def white_space_fix(__UpperCamelCase: Tuple ):
return " ".join(text.split() )
def remove_punc(__UpperCamelCase: int ):
SCREAMING_SNAKE_CASE : str = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__UpperCamelCase: int ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__UpperCamelCase ) ) ) )
def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = normalize_answer(__UpperCamelCase ).split()
SCREAMING_SNAKE_CASE : Any = normalize_answer(__UpperCamelCase ).split()
SCREAMING_SNAKE_CASE : str = Counter(__UpperCamelCase ) & Counter(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = sum(common.values() )
if num_same == 0:
return 0
SCREAMING_SNAKE_CASE : List[str] = 1.0 * num_same / len(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Dict = 1.0 * num_same / len(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Any = (2 * precision * recall) / (precision + recall)
return fa
def lowercase__( __UpperCamelCase: Any ,__UpperCamelCase: Any ):
"""simple docstring"""
return normalize_answer(__UpperCamelCase ) == normalize_answer(__UpperCamelCase )
def lowercase__( __UpperCamelCase: List[str] ,__UpperCamelCase: List[str] ):
"""simple docstring"""
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for hypo, pred in zip(__UpperCamelCase ,__UpperCamelCase ):
em += exact_match_score(__UpperCamelCase ,__UpperCamelCase )
if len(__UpperCamelCase ) > 0:
em /= len(__UpperCamelCase )
return {"em": em}
def lowercase__( __UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
return model_prefix.startswith('rag' )
def lowercase__( __UpperCamelCase: Dict ,__UpperCamelCase: Dict ,__UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
SCREAMING_SNAKE_CASE : List[str] = 'dropout_rate'
for p in extra_params:
if getattr(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ):
if not hasattr(__UpperCamelCase ,__UpperCamelCase ) and not hasattr(__UpperCamelCase ,equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(__UpperCamelCase ) )
delattr(__UpperCamelCase ,__UpperCamelCase )
continue
SCREAMING_SNAKE_CASE : Optional[Any] = p if hasattr(__UpperCamelCase ,__UpperCamelCase ) else equivalent_param[p]
setattr(__UpperCamelCase ,__UpperCamelCase ,getattr(__UpperCamelCase ,__UpperCamelCase ) )
delattr(__UpperCamelCase ,__UpperCamelCase )
return hparams, config
| 28
|
'''simple docstring'''
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Dict = '''char'''
A : Any = '''bpe'''
A : Dict = '''wp'''
UpperCamelCase_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : List[Any] = ['''image_processor''', '''char_tokenizer''']
A : int = '''ViTImageProcessor'''
A : List[str] = '''MgpstrTokenizer'''
def __init__( self, A=None, A=None, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.', A, )
SCREAMING_SNAKE_CASE : str = kwargs.pop('feature_extractor' )
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer
SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained('gpt2' )
SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained('bert-base-uncased' )
super().__init__(A, A )
def __call__( self, A=None, A=None, A=None, **A ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor(A, return_tensors=A, **A )
if text is not None:
SCREAMING_SNAKE_CASE : int = self.char_tokenizer(A, return_tensors=A, **A )
if text is None:
return inputs
elif images is None:
return encodings
else:
SCREAMING_SNAKE_CASE : Any = encodings['input_ids']
return inputs
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = sequences
SCREAMING_SNAKE_CASE : List[str] = char_preds.size(0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self._decode_helper(A, 'char' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self._decode_helper(A, 'bpe' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self._decode_helper(A, 'wp' )
SCREAMING_SNAKE_CASE : Optional[Any] = []
SCREAMING_SNAKE_CASE : Tuple = []
for i in range(A ):
SCREAMING_SNAKE_CASE : str = [char_scores[i], bpe_scores[i], wp_scores[i]]
SCREAMING_SNAKE_CASE : Dict = [char_strs[i], bpe_strs[i], wp_strs[i]]
SCREAMING_SNAKE_CASE : List[str] = scores.index(max(A ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
SCREAMING_SNAKE_CASE : List[Any] = {}
SCREAMING_SNAKE_CASE : int = final_strs
SCREAMING_SNAKE_CASE : Any = final_scores
SCREAMING_SNAKE_CASE : Dict = char_strs
SCREAMING_SNAKE_CASE : Any = bpe_strs
SCREAMING_SNAKE_CASE : Union[str, Any] = wp_strs
return out
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
if format == DecodeType.CHARACTER:
SCREAMING_SNAKE_CASE : List[Any] = self.char_decode
SCREAMING_SNAKE_CASE : Optional[int] = 1
SCREAMING_SNAKE_CASE : str = '[s]'
elif format == DecodeType.BPE:
SCREAMING_SNAKE_CASE : str = self.bpe_decode
SCREAMING_SNAKE_CASE : str = 2
SCREAMING_SNAKE_CASE : List[str] = '#'
elif format == DecodeType.WORDPIECE:
SCREAMING_SNAKE_CASE : Any = self.wp_decode
SCREAMING_SNAKE_CASE : Tuple = 102
SCREAMING_SNAKE_CASE : List[Any] = '[SEP]'
else:
raise ValueError(F"Format {format} is not supported." )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = [], []
SCREAMING_SNAKE_CASE : Union[str, Any] = pred_logits.size(0 )
SCREAMING_SNAKE_CASE : Any = pred_logits.size(1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = pred_logits.topk(1, dim=-1, largest=A, sorted=A )
SCREAMING_SNAKE_CASE : Optional[int] = preds_index.view(-1, A )[:, 1:]
SCREAMING_SNAKE_CASE : List[Any] = decoder(A )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = torch.nn.functional.softmax(A, dim=2 ).max(dim=2 )
SCREAMING_SNAKE_CASE : Dict = preds_max_prob[:, 1:]
for index in range(A ):
SCREAMING_SNAKE_CASE : Optional[int] = preds_str[index].find(A )
SCREAMING_SNAKE_CASE : List[Any] = preds_str[index][:pred_eos]
SCREAMING_SNAKE_CASE : Dict = preds_index[index].cpu().tolist()
SCREAMING_SNAKE_CASE : Union[str, Any] = pred_index.index(A ) if eos_token in pred_index else -1
SCREAMING_SNAKE_CASE : Optional[int] = preds_max_prob[index][: pred_eos_index + 1]
SCREAMING_SNAKE_CASE : Optional[int] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(A )
conf_scores.append(A )
return dec_strs, conf_scores
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [seq.replace(' ', '' ) for seq in self.char_tokenizer.batch_decode(A )]
return decode_strs
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = [seq.replace(' ', '' ) for seq in self.wp_tokenizer.batch_decode(A )]
return decode_strs
| 28
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__magic_name__ = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 711
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = '''cvt'''
def __init__( self , a_=3 , a_=[7, 3, 3] , a_=[4, 2, 2] , a_=[2, 1, 1] , a_=[64, 192, 384] , a_=[1, 3, 6] , a_=[1, 2, 10] , a_=[4.0, 4.0, 4.0] , a_=[0.0, 0.0, 0.0] , a_=[0.0, 0.0, 0.0] , a_=[0.0, 0.0, 0.1] , a_=[True, True, True] , a_=[False, False, True] , a_=["dw_bn", "dw_bn", "dw_bn"] , a_=[3, 3, 3] , a_=[1, 1, 1] , a_=[2, 2, 2] , a_=[1, 1, 1] , a_=[1, 1, 1] , a_=0.02 , a_=1E-12 , **a_ , ):
super().__init__(**a_ )
lowerCamelCase_ : Optional[Any] = num_channels
lowerCamelCase_ : str = patch_sizes
lowerCamelCase_ : List[Any] = patch_stride
lowerCamelCase_ : str = patch_padding
lowerCamelCase_ : str = embed_dim
lowerCamelCase_ : Union[str, Any] = num_heads
lowerCamelCase_ : Optional[Any] = depth
lowerCamelCase_ : int = mlp_ratio
lowerCamelCase_ : Union[str, Any] = attention_drop_rate
lowerCamelCase_ : Optional[Any] = drop_rate
lowerCamelCase_ : Optional[int] = drop_path_rate
lowerCamelCase_ : Union[str, Any] = qkv_bias
lowerCamelCase_ : int = cls_token
lowerCamelCase_ : int = qkv_projection_method
lowerCamelCase_ : int = kernel_qkv
lowerCamelCase_ : Optional[Any] = padding_kv
lowerCamelCase_ : Optional[int] = stride_kv
lowerCamelCase_ : Optional[int] = padding_q
lowerCamelCase_ : List[Any] = stride_q
lowerCamelCase_ : Any = initializer_range
lowerCamelCase_ : int = layer_norm_eps
| 73
| 0
|
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
__lowerCAmelCase : Optional[Any] = get_logger(__name__)
__lowerCAmelCase : Any = R"\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n"
class __lowerCAmelCase :
"""simple docstring"""
@add_start_docstrings(_snake_case )
def __call__( self : Optional[int] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray ):
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class __lowerCAmelCase :
"""simple docstring"""
@add_start_docstrings(_snake_case )
def __call__( self : Optional[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray ):
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
@add_start_docstrings(_snake_case )
def __call__( self : int , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int , **_snake_case : Tuple ):
for processor in self:
__lowercase : Optional[int] = inspect.signature(processor.__call__ ).parameters
if len(_snake_case ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F'Make sure that all the required parameters: {list(function_args.keys() )} for '
F'{processor.__class__} are passed to the logits processor.' )
__lowercase : Tuple = processor(_snake_case , _snake_case , _snake_case , **_snake_case )
else:
__lowercase : Dict = processor(_snake_case , _snake_case , _snake_case )
return scores
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : str , _snake_case : float ):
if not isinstance(_snake_case , _snake_case ) or not (temperature > 0):
raise ValueError(F'`temperature` has to be a strictly positive float, but is {temperature}' )
__lowercase : Union[str, Any] = temperature
def __call__( self : Optional[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
__lowercase : List[str] = scores / self.temperature
return scores
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Tuple , _snake_case : float , _snake_case : float = -float('''Inf''' ) , _snake_case : int = 1 ):
if not isinstance(_snake_case , _snake_case ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F'`top_p` has to be a float > 0 and < 1, but is {top_p}' )
if not isinstance(_snake_case , _snake_case ) or (min_tokens_to_keep < 1):
raise ValueError(F'`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}' )
__lowercase : List[str] = top_p
__lowercase : Tuple = filter_value
__lowercase : List[str] = min_tokens_to_keep
def __call__( self : Tuple , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
__lowercase , __lowercase : Optional[Any] = lax.top_k(_snake_case , scores.shape[-1] )
__lowercase : List[Any] = jnp.full_like(_snake_case , self.filter_value )
__lowercase : List[Any] = jax.nn.softmax(_snake_case , axis=-1 ).cumsum(axis=-1 )
__lowercase : Tuple = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
__lowercase : List[str] = jnp.roll(_snake_case , 1 )
score_mask |= score_mask.at[:, 0].set(_snake_case )
# min tokens to keep
__lowercase : int = score_mask.at[:, : self.min_tokens_to_keep].set(_snake_case )
__lowercase : List[Any] = jnp.where(_snake_case , _snake_case , _snake_case )
__lowercase : Optional[int] = jax.lax.sort_key_val(_snake_case , _snake_case )[-1]
return next_scores
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , _snake_case : int , _snake_case : float = -float('''Inf''' ) , _snake_case : int = 1 ):
if not isinstance(_snake_case , _snake_case ) or top_k <= 0:
raise ValueError(F'`top_k` has to be a strictly positive integer, but is {top_k}' )
__lowercase : int = max(_snake_case , _snake_case )
__lowercase : str = filter_value
def __call__( self : Optional[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
__lowercase , __lowercase : List[str] = scores.shape
__lowercase : List[Any] = jnp.full(batch_size * vocab_size , self.filter_value )
__lowercase : Optional[int] = min(self.top_k , scores.shape[-1] ) # Safety check
__lowercase , __lowercase : List[Any] = lax.top_k(_snake_case , _snake_case )
__lowercase : Optional[Any] = jnp.broadcast_to((jnp.arange(_snake_case ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
__lowercase : str = topk_scores.flatten()
__lowercase : Optional[int] = topk_indices.flatten() + shift
__lowercase : Tuple = next_scores_flat.at[topk_indices_flat].set(_snake_case )
__lowercase : str = next_scores_flat.reshape(_snake_case , _snake_case )
return next_scores
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , _snake_case : int ):
__lowercase : List[str] = bos_token_id
def __call__( self : Optional[int] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
__lowercase : List[str] = jnp.full(scores.shape , -float('''inf''' ) )
__lowercase : str = 1 - jnp.bool_(cur_len - 1 )
__lowercase : Union[str, Any] = jnp.where(_snake_case , new_scores.at[:, self.bos_token_id].set(0 ) , _snake_case )
return scores
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Any , _snake_case : int , _snake_case : int ):
__lowercase : int = max_length
__lowercase : Optional[int] = eos_token_id
def __call__( self : Optional[int] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
__lowercase : str = jnp.full(scores.shape , -float('''inf''' ) )
__lowercase : int = 1 - jnp.bool_(cur_len - self.max_length + 1 )
__lowercase : Any = jnp.where(_snake_case , new_scores.at[:, self.eos_token_id].set(0 ) , _snake_case )
return scores
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : str , _snake_case : int , _snake_case : int ):
if not isinstance(_snake_case , _snake_case ) or min_length < 0:
raise ValueError(F'`min_length` has to be a positive integer, but is {min_length}' )
if not isinstance(_snake_case , _snake_case ) or eos_token_id < 0:
raise ValueError(F'`eos_token_id` has to be a positive integer, but is {eos_token_id}' )
__lowercase : Tuple = min_length
__lowercase : str = eos_token_id
def __call__( self : str , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
# create boolean flag to decide if min length penalty should be applied
__lowercase : Tuple = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
__lowercase : Any = jnp.where(_snake_case , scores.at[:, self.eos_token_id].set(-float('''inf''' ) ) , _snake_case )
return scores
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Dict , _snake_case : Union[str, Any] , _snake_case : Any ):
__lowercase : int = list(_snake_case )
__lowercase : int = begin_index
def __call__( self : str , _snake_case : Dict , _snake_case : Tuple , _snake_case : int ):
__lowercase : str = 1 - jnp.bool_(cur_len - self.begin_index )
__lowercase : Union[str, Any] = jnp.where(_snake_case , scores.at[:, self.begin_suppress_tokens].set(-float('''inf''' ) ) , _snake_case )
return scores
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : List[Any] , _snake_case : list ):
__lowercase : List[Any] = list(_snake_case )
def __call__( self : List[str] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
__lowercase : Optional[Any] = scores.at[..., self.suppress_tokens].set(-float('''inf''' ) )
return scores
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , _snake_case : List[str] ):
__lowercase : int = dict(_snake_case )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
__lowercase : Dict = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
__lowercase : List[Any] = force_token_array.at[index].set(_snake_case )
__lowercase : Tuple = jnp.intaa(_snake_case )
def __call__( self : Dict , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
def _force_token(_snake_case : List[str] ):
__lowercase : str = scores.shape[0]
__lowercase : str = self.force_token_array[generation_idx]
__lowercase : Tuple = jnp.ones_like(_snake_case , dtype=scores.dtype ) * -float('''inf''' )
__lowercase : Optional[int] = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
__lowercase : Tuple = lax.dynamic_update_slice(_snake_case , _snake_case , (0, current_token) )
return new_scores
__lowercase : int = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(_snake_case ) , lambda: scores , ) , )
return scores
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Tuple , _snake_case : List[Any] , _snake_case : List[str] , _snake_case : int ):
__lowercase : List[Any] = generate_config.eos_token_id
__lowercase : int = generate_config.no_timestamps_token_id
__lowercase : int = generate_config.no_timestamps_token_id + 1
__lowercase : List[Any] = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(_snake_case , '''max_initial_timestamp_index''' ):
__lowercase : Optional[Any] = generate_config.max_initial_timestamp_index
else:
__lowercase : Dict = model_config.vocab_size
if self.max_initial_timestamp_index is None:
__lowercase : str = model_config.vocab_size
def __call__( self : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : str ):
# suppress <|notimestamps|> which is handled by without_timestamps
__lowercase : str = scores.at[:, self.no_timestamps_token_id].set(-float('''inf''' ) )
def handle_pairs(_snake_case : Any , _snake_case : Optional[Any] ):
__lowercase : Optional[int] = jnp.where((cur_len - self.begin_index) >= 1 , _snake_case , _snake_case )
__lowercase : int = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , _snake_case , )
__lowercase : Any = jnp.where((cur_len - self.begin_index) < 2 , _snake_case , _snake_case )
__lowercase : Union[str, Any] = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , _snake_case , _snake_case , )
return jnp.where(
_snake_case , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('''inf''' ) ) , scores_k.at[: self.eos_token_id].set(-float('''inf''' ) ) , ) , _snake_case , )
__lowercase : int = jax.vmap(_snake_case )(_snake_case , _snake_case )
__lowercase : Any = jnp.where(cur_len == self.begin_index , _snake_case , _snake_case )
__lowercase : Dict = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , _snake_case , )
__lowercase : int = self.timestamp_begin + self.max_initial_timestamp_index
__lowercase : Optional[int] = jnp.where(
_snake_case , scores.at[:, last_allowed + 1 :].set(-float('''inf''' ) ) , _snake_case , )
# if sum of probability over timestamps is above any other token, sample timestamp
__lowercase : Union[str, Any] = jax.nn.log_softmax(_snake_case , axis=-1 )
def handle_cumulative_probs(_snake_case : Optional[int] , _snake_case : Union[str, Any] ):
__lowercase : Tuple = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
__lowercase : List[Any] = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('''inf''' ) ) , _snake_case , )
__lowercase : Optional[Any] = jax.vmap(_snake_case )(_snake_case , _snake_case )
return scores
| 509
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
__lowerCAmelCase : Dict = logging.get_logger(__name__)
__lowerCAmelCase : str = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
__lowerCAmelCase : Dict = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1_058, 1_220, 1_267, 1_279, 1_303, 1_343, 1_377,
1_391, 1_635, 1_782, 1_875, 2_162, 2_361, 2_488, 3_467, 4_008, 4_211,
4_600, 4_808, 5_299, 5_855, 6_329, 7_203, 9_609, 9_959, 10_563, 10_786,
11_420, 11_709, 11_907, 13_163, 13_697, 13_700, 14_808, 15_306, 16_410, 16_791,
17_992, 19_203, 19_510, 20_724, 22_305, 22_935, 27_007, 30_109, 30_420, 33_409,
34_949, 40_283, 40_493, 40_549, 47_282, 49_146, 50_257, 50_359, 50_360, 50_361
]
__lowerCAmelCase : Any = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1_350, 1_853, 1_982, 2_460, 2_627,
3_246, 3_253, 3_268, 3_536, 3_846, 3_961, 4_183, 4_667, 6_585, 6_647,
7_273, 9_061, 9_383, 10_428, 10_929, 11_938, 12_033, 12_331, 12_562, 13_793,
14_157, 14_635, 15_265, 15_618, 16_553, 16_604, 18_362, 18_956, 20_075, 21_675,
22_520, 26_130, 26_161, 26_435, 28_279, 29_464, 31_650, 32_302, 32_470, 36_865,
42_863, 47_425, 49_870, 50_254, 50_258, 50_360, 50_361, 50_362
]
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
A__ : Any = '''whisper'''
A__ : Optional[int] = ['''past_key_values''']
A__ : str = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[Any] , _snake_case : Optional[int]=5_1865 , _snake_case : str=80 , _snake_case : Any=6 , _snake_case : Union[str, Any]=4 , _snake_case : int=6 , _snake_case : Any=4 , _snake_case : int=1536 , _snake_case : Any=1536 , _snake_case : str=0.0 , _snake_case : Dict=0.0 , _snake_case : Dict=5_0257 , _snake_case : List[str]=True , _snake_case : Dict=True , _snake_case : List[str]="gelu" , _snake_case : Union[str, Any]=256 , _snake_case : Optional[Any]=0.0 , _snake_case : Dict=0.0 , _snake_case : Union[str, Any]=0.0 , _snake_case : str=0.02 , _snake_case : Dict=False , _snake_case : Dict=1500 , _snake_case : Optional[int]=448 , _snake_case : Optional[Any]=5_0256 , _snake_case : Tuple=5_0256 , _snake_case : Optional[int]=5_0256 , _snake_case : List[str]=None , _snake_case : Tuple=[220, 5_0256] , _snake_case : Union[str, Any]=False , _snake_case : str=256 , _snake_case : List[str]=False , _snake_case : List[Any]=0.05 , _snake_case : Dict=10 , _snake_case : Any=2 , _snake_case : Dict=0.0 , _snake_case : Dict=10 , _snake_case : Optional[int]=0 , _snake_case : Tuple=7 , **_snake_case : Union[str, Any] , ):
__lowercase : Optional[Any] = vocab_size
__lowercase : List[Any] = num_mel_bins
__lowercase : Optional[int] = d_model
__lowercase : Tuple = encoder_layers
__lowercase : str = encoder_attention_heads
__lowercase : Any = decoder_layers
__lowercase : Tuple = decoder_attention_heads
__lowercase : List[Any] = decoder_ffn_dim
__lowercase : Any = encoder_ffn_dim
__lowercase : List[str] = dropout
__lowercase : str = attention_dropout
__lowercase : Tuple = activation_dropout
__lowercase : Dict = activation_function
__lowercase : Optional[Any] = init_std
__lowercase : List[str] = encoder_layerdrop
__lowercase : List[str] = decoder_layerdrop
__lowercase : Dict = use_cache
__lowercase : Dict = encoder_layers
__lowercase : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
__lowercase : str = max_source_positions
__lowercase : Dict = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
__lowercase : str = classifier_proj_size
__lowercase : Any = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowercase : str = apply_spec_augment
__lowercase : Optional[int] = mask_time_prob
__lowercase : Any = mask_time_length
__lowercase : str = mask_time_min_masks
__lowercase : Any = mask_feature_prob
__lowercase : str = mask_feature_length
__lowercase : int = mask_feature_min_masks
__lowercase : Tuple = median_filter_width
super().__init__(
pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , is_encoder_decoder=_snake_case , decoder_start_token_id=_snake_case , suppress_tokens=_snake_case , begin_suppress_tokens=_snake_case , **_snake_case , )
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
@property
def snake_case_ ( self : Tuple ):
__lowercase : Union[str, Any] = OrderedDict(
[
('''input_features''', {0: '''batch''', 1: '''feature_size''', 2: '''encoder_sequence'''}),
] )
if self.use_past:
__lowercase : List[str] = {0: '''batch'''}
else:
__lowercase : List[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(_snake_case , direction='''inputs''' )
return common_inputs
def snake_case_ ( self : str , _snake_case : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , _snake_case : int = -1 , _snake_case : int = -1 , _snake_case : bool = False , _snake_case : Optional["TensorType"] = None , _snake_case : int = 2_2050 , _snake_case : float = 5.0 , _snake_case : int = 220 , ):
__lowercase : List[str] = OrderedDict()
__lowercase : Tuple = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=_snake_case , framework=_snake_case , sampling_rate=_snake_case , time_duration=_snake_case , frequency=_snake_case , )
__lowercase : Any = encoder_inputs['''input_features'''].shape[2]
__lowercase : Dict = encoder_sequence_length // 2 if self.use_past else seq_length
__lowercase : List[Any] = super().generate_dummy_inputs(
preprocessor.tokenizer , _snake_case , _snake_case , _snake_case , _snake_case )
__lowercase : List[Any] = encoder_inputs.pop('''input_features''' )
__lowercase : Any = decoder_inputs.pop('''decoder_input_ids''' )
if "past_key_values" in decoder_inputs:
__lowercase : Dict = decoder_inputs.pop('''past_key_values''' )
return dummy_inputs
@property
def snake_case_ ( self : Tuple ):
return 1E-3
| 509
| 1
|
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
__snake_case : int ='\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n'
__snake_case : List[Any] ='\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n'
__snake_case : str ='\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n'
__snake_case : Tuple ='\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n'
__snake_case : Dict ='The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowerCamelCase__ ( datasets.Metric):
'''simple docstring'''
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Value('''string''' ),
} ) ,homepage='''https://github.com/openai/human-eval''' ,codebase_urls=['''https://github.com/openai/human-eval'''] ,reference_urls=['''https://github.com/openai/human-eval'''] ,license=_LICENSE ,)
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase=[1, 10, 1_00] ,__lowerCamelCase=4 ,__lowerCamelCase=3.0 ) -> Optional[int]:
"""simple docstring"""
if os.getenv('''HF_ALLOW_CODE_EVAL''' ,0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('''This metric is currently not supported on Windows.''' )
with ThreadPoolExecutor(max_workers=__lowerCamelCase ) as executor:
lowerCAmelCase__ : Optional[Any] = []
lowerCAmelCase__ : Any = Counter()
lowerCAmelCase__ : List[str] = 0
lowerCAmelCase__ : str = defaultdict(__lowerCamelCase )
for task_id, (candidates, test_case) in enumerate(zip(__lowerCamelCase ,__lowerCamelCase ) ):
for candidate in candidates:
lowerCAmelCase__ : Union[str, Any] = candidate + '''\n''' + test_case
lowerCAmelCase__ : List[str] = (test_program, timeout, task_id, completion_id[task_id])
lowerCAmelCase__ : List[Any] = executor.submit(__lowerCamelCase ,*__lowerCamelCase )
futures.append(__lowerCamelCase )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(__lowerCamelCase ):
lowerCAmelCase__ : int = future.result()
results[result["task_id"]].append((result['''completion_id'''], result) )
lowerCAmelCase__ : List[str] = [], []
for result in results.values():
result.sort()
lowerCAmelCase__ : List[str] = [r[1]['''passed'''] for r in result]
total.append(len(__lowerCamelCase ) )
correct.append(sum(__lowerCamelCase ) )
lowerCAmelCase__ : List[str] = np.array(__lowerCamelCase )
lowerCAmelCase__ : int = np.array(__lowerCamelCase )
lowerCAmelCase__ : List[Any] = k
lowerCAmelCase__ : List[str] = {f"""pass@{k}""": estimate_pass_at_k(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def lowerCAmelCase__ ( lowerCamelCase_ : Optional[int] ,lowerCamelCase_ : int ,lowerCamelCase_ : Optional[int]):
'''simple docstring'''
def estimator(lowerCamelCase_ : int ,lowerCamelCase_ : int ,lowerCamelCase_ : int) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 ,n + 1))
if isinstance(lowerCamelCase_ ,lowerCamelCase_):
lowerCAmelCase__ : List[Any] = itertools.repeat(lowerCamelCase_ ,len(lowerCamelCase_))
else:
assert len(lowerCamelCase_) == len(lowerCamelCase_)
lowerCAmelCase__ : Optional[Any] = iter(lowerCamelCase_)
return np.array([estimator(int(lowerCamelCase_) ,int(lowerCamelCase_) ,lowerCamelCase_) for n, c in zip(lowerCamelCase_ ,lowerCamelCase_)])
| 721
|
from collections.abc import Callable
import numpy as np
def lowerCAmelCase__ ( lowerCamelCase_ : Callable ,lowerCamelCase_ : float ,lowerCamelCase_ : float ,lowerCamelCase_ : float ,lowerCamelCase_ : float):
'''simple docstring'''
lowerCAmelCase__ : Dict = int(np.ceil((x_end - xa) / step_size))
lowerCAmelCase__ : Any = np.zeros((n + 1,))
lowerCAmelCase__ : Union[str, Any] = ya
lowerCAmelCase__ : str = xa
for k in range(lowerCamelCase_):
lowerCAmelCase__ : Tuple = y[k] + step_size * ode_func(lowerCamelCase_ ,y[k])
lowerCAmelCase__ : str = y[k] + (
(step_size / 2) * (ode_func(lowerCamelCase_ ,y[k]) + ode_func(x + step_size ,lowerCamelCase_))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 90
| 0
|
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('socket.socket' )
@patch('builtins.open' )
def lowerCamelCase ( a_ , a_ ) -> str:
# ===== initialization =====
lowerCAmelCase_ = Mock()
lowerCAmelCase_ = conn, Mock()
lowerCAmelCase_ = iter([1, None] )
lowerCAmelCase_ = lambda a_ : next(a_ )
# ===== invoke =====
send_file(filename='mytext.txt' , testing=a_ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 318
|
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ = logging.get_logger()
@dataclass
class a_ :
'''simple docstring'''
__a: nn.Module
__a: List[nn.Module] = field(default_factory=a_ )
__a: list = field(default_factory=a_ )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ = len(list(m.modules() ) ) == 1 or isinstance(lowercase_ , nn.Convad ) or isinstance(lowercase_ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(lowercase_ )
def __call__( self , lowercase_ ) -> List[str]:
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(lowercase_ )
[x.remove() for x in self.handles]
return self
@property
def _lowercase ( self ) -> str:
'''simple docstring'''
return list(filter(lambda lowercase_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class a_ :
'''simple docstring'''
__a: nn.Module
__a: nn.Module
__a: int = 0
__a: List = field(default_factory=a_ )
__a: List = field(default_factory=a_ )
def __call__( self , lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = Tracker(self.dest )(lowercase_ ).parametrized
lowerCAmelCase_ = Tracker(self.src )(lowercase_ ).parametrized
lowerCAmelCase_ = list(filter(lambda lowercase_ : type(lowercase_ ) not in self.src_skip , lowercase_ ) )
lowerCAmelCase_ = list(filter(lambda lowercase_ : type(lowercase_ ) not in self.dest_skip , lowercase_ ) )
if len(lowercase_ ) != len(lowercase_ ):
raise Exception(
f'''Numbers of operations are different. Source module has {len(lowercase_ )} operations while'''
f''' destination module has {len(lowercase_ )}.''' )
for dest_m, src_m in zip(lowercase_ , lowercase_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'''Transfered from={src_m} to={dest_m}''' )
def lowerCamelCase ( a_ , a_ , a_ , a_ = True ) -> Optional[Any]:
print(F'''Converting {name}...''' )
with torch.no_grad():
lowerCAmelCase_ = timm.create_model(a_ , pretrained=a_ ).eval()
lowerCAmelCase_ = ResNetForImageClassification(a_ ).eval()
lowerCAmelCase_ = ModuleTransfer(src=a_ , dest=a_ )
lowerCAmelCase_ = torch.randn((1, 3, 224, 224) )
module_transfer(a_ )
assert torch.allclose(from_model(a_ ) , our_model(a_ ).logits ), "The model logits don't match the original one."
lowerCAmelCase_ = F'''resnet{"-".join(name.split("resnet" ) )}'''
print(a_ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=a_ , )
# we can use the convnext one
lowerCAmelCase_ = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=a_ , )
print(F'''Pushed {checkpoint_name}''' )
def lowerCamelCase ( a_ , a_ = None , a_ = True ) -> str:
lowerCAmelCase_ = 'imagenet-1k-id2label.json'
lowerCAmelCase_ = 1_000
lowerCAmelCase_ = (1, num_labels)
lowerCAmelCase_ = 'huggingface/label-files'
lowerCAmelCase_ = num_labels
lowerCAmelCase_ = json.load(open(hf_hub_download(a_ , a_ , repo_type='dataset' ) , 'r' ) )
lowerCAmelCase_ = {int(a_ ): v for k, v in idalabel.items()}
lowerCAmelCase_ = idalabel
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
lowerCAmelCase_ = partial(a_ , num_labels=a_ , idalabel=a_ , labelaid=a_ )
lowerCAmelCase_ = {
'resnet18': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ),
'resnet26': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type='bottleneck' ),
'resnet34': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ),
'resnet50': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type='bottleneck' ),
'resnet101': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type='bottleneck' ),
'resnet152': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type='bottleneck' ),
}
if model_name:
convert_weight_and_push(a_ , names_to_config[model_name] , a_ , a_ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(a_ , a_ , a_ , a_ )
return config, expected_shape
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported resnet* architecture,"""
""" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
lowerCamelCase_ = parser.parse_args()
lowerCamelCase_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 318
| 1
|
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def lowercase ( lowerCAmelCase : int):
"""simple docstring"""
_A : int = int(number**0.5)
return number == sq * sq
def lowercase ( lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int):
"""simple docstring"""
_A : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
_A : int = x_den * y_den * z_den
_A : int = gcd(lowerCAmelCase , lowerCAmelCase)
top //= hcf
bottom //= hcf
return top, bottom
def lowercase ( lowerCAmelCase : int = 35):
"""simple docstring"""
_A : set = set()
_A : int
_A : Fraction = Fraction(0)
_A : tuple[int, int]
for x_num in range(1 , order + 1):
for x_den in range(x_num + 1 , order + 1):
for y_num in range(1 , order + 1):
for y_den in range(y_num + 1 , order + 1):
# n=1
_A : Any = x_num * y_den + x_den * y_num
_A : str = x_den * y_den
_A : Union[str, Any] = gcd(lowerCAmelCase , lowerCAmelCase)
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_A : Tuple = add_three(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
unique_s.add(lowerCAmelCase)
# n=2
_A : Tuple = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
_A : Optional[int] = x_den * x_den * y_den * y_den
if is_sq(lowerCAmelCase) and is_sq(lowerCAmelCase):
_A : str = int(sqrt(lowerCAmelCase))
_A : Optional[Any] = int(sqrt(lowerCAmelCase))
_A : List[Any] = gcd(lowerCAmelCase , lowerCAmelCase)
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_A : Union[str, Any] = add_three(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
unique_s.add(lowerCAmelCase)
# n=-1
_A : Dict = x_num * y_num
_A : Union[str, Any] = x_den * y_num + x_num * y_den
_A : Tuple = gcd(lowerCAmelCase , lowerCAmelCase)
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_A : List[Any] = add_three(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
unique_s.add(lowerCAmelCase)
# n=2
_A : int = x_num * x_num * y_num * y_num
_A : Dict = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(lowerCAmelCase) and is_sq(lowerCAmelCase):
_A : Dict = int(sqrt(lowerCAmelCase))
_A : Tuple = int(sqrt(lowerCAmelCase))
_A : str = gcd(lowerCAmelCase , lowerCAmelCase)
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_A : str = add_three(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
unique_s.add(lowerCAmelCase)
for num, den in unique_s:
total += Fraction(lowerCAmelCase , lowerCAmelCase)
return total.denominator + total.numerator
if __name__ == "__main__":
print(f'{solution() = }')
| 417
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
__UpperCamelCase : Dict = '''Create a default config file for Accelerate with only a few flags set.'''
def lowercase ( lowerCAmelCase : List[Any]="no" , lowerCAmelCase : str = default_json_config_file , lowerCAmelCase : bool = False):
"""simple docstring"""
_A : Optional[int] = Path(lowerCAmelCase)
path.parent.mkdir(parents=lowerCAmelCase , exist_ok=lowerCAmelCase)
if path.exists():
print(
f"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""")
return False
_A : Dict = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
f"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""")
_A : str = {
'''compute_environment''': '''LOCAL_MACHINE''',
'''mixed_precision''': mixed_precision,
}
if torch.cuda.is_available():
_A : List[Any] = torch.cuda.device_count()
_A : str = num_gpus
_A : str = False
if num_gpus > 1:
_A : List[str] = '''MULTI_GPU'''
else:
_A : Optional[int] = '''NO'''
elif is_xpu_available() and use_xpu:
_A : Union[str, Any] = torch.xpu.device_count()
_A : Optional[Any] = num_xpus
_A : int = False
if num_xpus > 1:
_A : str = '''MULTI_XPU'''
else:
_A : List[str] = '''NO'''
elif is_npu_available():
_A : List[str] = torch.npu.device_count()
_A : List[Any] = num_npus
_A : Tuple = False
if num_npus > 1:
_A : Dict = '''MULTI_NPU'''
else:
_A : Tuple = '''NO'''
else:
_A : int = 0
_A : Dict = True
_A : str = 1
_A : List[Any] = '''NO'''
_A : List[Any] = ClusterConfig(**lowerCAmelCase)
config.to_json_file(lowerCAmelCase)
return path
def lowercase ( lowerCAmelCase : Tuple , lowerCAmelCase : int):
"""simple docstring"""
_A : List[Any] = parser.add_parser('''default''' , parents=lowerCAmelCase , help=lowerCAmelCase , formatter_class=lowerCAmelCase)
parser.add_argument(
'''--config_file''' , default=lowerCAmelCase , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , dest='''save_location''' , )
parser.add_argument(
'''--mixed_precision''' , choices=['''no''', '''fp16''', '''bf16'''] , type=lowerCAmelCase , help='''Whether or not to use mixed precision training. '''
'''Choose between FP16 and BF16 (bfloat16) training. '''
'''BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.''' , default='''no''' , )
parser.set_defaults(func=lowerCAmelCase)
return parser
def lowercase ( lowerCAmelCase : Optional[Any]):
"""simple docstring"""
_A : List[str] = write_basic_config(args.mixed_precision , args.save_location)
if config_file:
print(f"""accelerate configuration saved at {config_file}""")
| 417
| 1
|
'''simple docstring'''
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
a_ = logging.get_logger(__name__)
a_ = {}
a_ = {}
a_ = {}
def _a( UpperCamelCase__ : type, UpperCamelCase__ : Optional[str], UpperCamelCase__ : Optional[List[str]] = None, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] =aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f"Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})" )
SCREAMING_SNAKE_CASE__ : List[str] =formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f"Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})" )
SCREAMING_SNAKE_CASE__ : List[Any] =format_type
def _a( UpperCamelCase__ : Exception, UpperCamelCase__ : Optional[str], UpperCamelCase__ : Optional[List[str]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
SCREAMING_SNAKE_CASE__ : Dict =unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['python'])
_register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow'])
_register_formatter(NumpyFormatter, 'numpy', aliases=['np'])
_register_formatter(PandasFormatter, 'pandas', aliases=['pd'])
_register_formatter(CustomFormatter, 'custom')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch'])
else:
a_ = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.')
_register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch'])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, 'tensorflow', aliases=['tf'])
else:
a_ = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.')
_register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf'])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, 'jax', aliases=[])
else:
a_ = ValueError('JAX needs to be installed to be able to return JAX arrays.')
_register_unavailable_formatter(_jax_error, 'jax', aliases=[])
def _a( UpperCamelCase__ : Optional[str] ):
'''simple docstring'''
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def _a( UpperCamelCase__ : Optional[str], **UpperCamelCase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =get_format_type_from_alias(UpperCamelCase__ )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**UpperCamelCase__ )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f"Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'" )
| 296
|
'''simple docstring'''
def _a( UpperCamelCase__ : int = 1_0, UpperCamelCase__ : int = 2_2 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str =range(1, UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[str] =range(1, UpperCamelCase__ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F'''{solution(1_0, 2_2) = }''')
| 296
| 1
|
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def lowercase ( _a ,_a ,_a ,_a ,) -> list[float]:
UpperCAmelCase_ , UpperCAmelCase_: Tuple = coefficient_matrix.shape
UpperCAmelCase_ , UpperCAmelCase_: Optional[Any] = constant_matrix.shape
if rowsa != colsa:
UpperCAmelCase_: List[str] = f"Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"
raise ValueError(_a )
if colsa != 1:
UpperCAmelCase_: Dict = f"Constant matrix must be nx1 but received {rowsa}x{colsa}"
raise ValueError(_a )
if rowsa != rowsa:
UpperCAmelCase_: List[str] = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
f"received {rowsa}x{colsa} and {rowsa}x{colsa}"
)
raise ValueError(_a )
if len(_a ) != rowsa:
UpperCAmelCase_: int = (
"Number of initial values must be equal to number of rows in coefficient "
f"matrix but received {len(_a )} and {rowsa}"
)
raise ValueError(_a )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
UpperCAmelCase_: NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) ,axis=1 )
UpperCAmelCase_ , UpperCAmelCase_: str = table.shape
strictly_diagonally_dominant(_a )
# Iterates the whole matrix for given number of times
for _ in range(_a ):
UpperCAmelCase_: Optional[int] = []
for row in range(_a ):
UpperCAmelCase_: List[str] = 0
for col in range(_a ):
if col == row:
UpperCAmelCase_: List[Any] = table[row][col]
elif col == cols - 1:
UpperCAmelCase_: Tuple = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
UpperCAmelCase_: int = (temp + val) / denom
new_val.append(_a )
UpperCAmelCase_: List[Any] = new_val
return [float(_a ) for i in new_val]
def lowercase ( _a ) -> bool:
UpperCAmelCase_ , UpperCAmelCase_: Dict = table.shape
UpperCAmelCase_: List[Any] = True
for i in range(0 ,_a ):
UpperCAmelCase_: str = 0
for j in range(0 ,cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
|
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""vocab_file""": """vocab.json""",
"""tokenizer_config_file""": """tokenizer_config.json""",
"""merges_file""": """merges.txt""",
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"""
),
},
"""tokenizer_config_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"""
),
},
"""merges_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"""
),
},
}
_lowerCAmelCase = """</w>"""
_lowerCAmelCase = """@@ """
def lowercase ( _a ) -> Dict:
UpperCAmelCase_: Any = set()
UpperCAmelCase_: Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase_: List[str] = char
return pairs
# Speech2Text2 has no max input length
_lowerCAmelCase = {"""facebook/s2t-wav2vec2-large-en-de""": 1024}
class UpperCAmelCase__ ( snake_case__ ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ['''input_ids''', '''attention_mask''']
def __init__( self , A__ , A__="<s>" , A__="<pad>" , A__="</s>" , A__="<unk>" , A__=False , A__=None , **A__ , ):
"""simple docstring"""
super().__init__(
unk_token=A__ , bos_token=A__ , eos_token=A__ , pad_token=A__ , do_lower_case=A__ , **A__ , )
UpperCAmelCase_: str = do_lower_case
with open(A__ , encoding="utf-8" ) as vocab_handle:
UpperCAmelCase_: str = json.load(A__ )
UpperCAmelCase_: List[str] = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"No merges files provided. {self.__class__.__name__} can only be used for decoding." )
UpperCAmelCase_: List[Any] = None
UpperCAmelCase_: Optional[int] = None
else:
with open(A__ , encoding="utf-8" ) as merges_handle:
UpperCAmelCase_: List[Any] = merges_handle.read().split("\n" )[:-1]
UpperCAmelCase_: List[Any] = [tuple(merge.split()[:2] ) for merge in merges]
UpperCAmelCase_: Union[str, Any] = dict(zip(A__ , range(len(A__ ) ) ) )
UpperCAmelCase_: Dict = {}
@property
def snake_case_ ( self ):
"""simple docstring"""
return len(self.decoder )
def snake_case_ ( self ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def snake_case_ ( self , A__ ):
"""simple docstring"""
UpperCAmelCase_: str = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
UpperCAmelCase_: Any = get_pairs(A__ )
if not pairs:
return token
while True:
UpperCAmelCase_: List[str] = min(A__ , key=lambda A__ : self.bpe_ranks.get(A__ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase_ , UpperCAmelCase_: Tuple = bigram
UpperCAmelCase_: Optional[Any] = []
UpperCAmelCase_: Optional[int] = 0
while i < len(A__ ):
try:
UpperCAmelCase_: str = word.index(A__ , A__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase_: str = j
if word[i] == first and i < len(A__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase_: str = tuple(A__ )
UpperCAmelCase_: str = new_word
if len(A__ ) == 1:
break
else:
UpperCAmelCase_: Optional[Any] = get_pairs(A__ )
UpperCAmelCase_: str = " ".join(A__ )
if word == "\n " + BPE_TOKEN_MERGES:
UpperCAmelCase_: Tuple = "\n" + BPE_TOKEN_MERGES
if word.endswith(A__ ):
UpperCAmelCase_: Union[str, Any] = word.replace(A__ , "" )
UpperCAmelCase_: Dict = word.replace(" " , A__ )
UpperCAmelCase_: List[Any] = word
return word
def snake_case_ ( self , A__ ):
"""simple docstring"""
if self.bpe_ranks is None:
raise ValueError(
"This tokenizer was instantiated without a `merges.txt` file, so"
" that it can only be used for decoding, not for encoding."
"Make sure to provide `merges.txt` file at instantiation to enable "
"encoding." )
if self.do_lower_case:
UpperCAmelCase_: int = text.lower()
UpperCAmelCase_: Optional[Any] = text.split()
UpperCAmelCase_: Tuple = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(A__ ).split(" " ) ) )
return split_tokens
def snake_case_ ( self , A__ ):
"""simple docstring"""
return self.encoder.get(A__ , self.encoder.get(self.unk_token ) )
def snake_case_ ( self , A__ ):
"""simple docstring"""
UpperCAmelCase_: Optional[Any] = self.decoder.get(A__ , self.unk_token )
return result
def snake_case_ ( self , A__ ):
"""simple docstring"""
UpperCAmelCase_: List[str] = " ".join(A__ )
# make sure @@ tokens are concatenated
UpperCAmelCase_: List[Any] = "".join(string.split(A__ ) )
return string
def snake_case_ ( self , A__ , A__ = None ):
"""simple docstring"""
if not os.path.isdir(A__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase_: Tuple = os.path.join(
A__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_: Optional[Any] = os.path.join(
A__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(A__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=A__ , ensure_ascii=A__ ) + "\n" )
UpperCAmelCase_: str = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(A__ , "w" , encoding="utf-8" ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A__ : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!" )
UpperCAmelCase_: Optional[Any] = token_index
writer.write(" ".join(A__ ) + "\n" )
index += 1
return (vocab_file, merges_file)
| 306
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'openai-gpt': 'https://huggingface.co/openai-gpt/resolve/main/config.json'}
class __lowercase ( __lowerCamelCase ):
snake_case_ = """openai-gpt"""
snake_case_ = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : int ,A : Any=40_478 ,A : List[str]=512 ,A : Tuple=768 ,A : int=12 ,A : Union[str, Any]=12 ,A : int="gelu" ,A : List[str]=0.1 ,A : int=0.1 ,A : List[Any]=0.1 ,A : Optional[int]=1e-5 ,A : Optional[Any]=0.0_2 ,A : List[Any]="cls_index" ,A : List[str]=True ,A : Union[str, Any]=None ,A : int=True ,A : int=0.1 ,**A : Optional[int] ,):
'''simple docstring'''
UpperCAmelCase__ : List[str] = vocab_size
UpperCAmelCase__ : Optional[int] = n_positions
UpperCAmelCase__ : Dict = n_embd
UpperCAmelCase__ : List[str] = n_layer
UpperCAmelCase__ : List[Any] = n_head
UpperCAmelCase__ : str = afn
UpperCAmelCase__ : Dict = resid_pdrop
UpperCAmelCase__ : Union[str, Any] = embd_pdrop
UpperCAmelCase__ : Optional[Any] = attn_pdrop
UpperCAmelCase__ : Tuple = layer_norm_epsilon
UpperCAmelCase__ : Any = initializer_range
UpperCAmelCase__ : Union[str, Any] = summary_type
UpperCAmelCase__ : Tuple = summary_use_proj
UpperCAmelCase__ : List[str] = summary_activation
UpperCAmelCase__ : Optional[Any] = summary_first_dropout
UpperCAmelCase__ : str = summary_proj_to_labels
super().__init__(**A )
| 65
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A_ = {"configuration_plbart": ["PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "PLBartConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ["PLBartTokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"PLBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"PLBartForCausalLM",
"PLBartForConditionalGeneration",
"PLBartForSequenceClassification",
"PLBartModel",
"PLBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 391
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class snake_case ( unittest.TestCase ):
def __init__( self :Tuple , _lowerCamelCase :Dict , _lowerCamelCase :Tuple=7 , _lowerCamelCase :Tuple=3 , _lowerCamelCase :List[Any]=1_8 , _lowerCamelCase :List[Any]=3_0 , _lowerCamelCase :Any=4_0_0 , _lowerCamelCase :Dict=True , _lowerCamelCase :Optional[Any]=None , _lowerCamelCase :Dict=True , _lowerCamelCase :List[Any]=None , _lowerCamelCase :List[str]=True , _lowerCamelCase :Tuple=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , _lowerCamelCase :Optional[Any]=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , _lowerCamelCase :Dict=True , ):
__SCREAMING_SNAKE_CASE : Tuple = size if size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
__SCREAMING_SNAKE_CASE : Any = crop_size if crop_size is not None else {'''height''': 1_8, '''width''': 1_8}
__SCREAMING_SNAKE_CASE : Optional[int] = parent
__SCREAMING_SNAKE_CASE : Any = batch_size
__SCREAMING_SNAKE_CASE : Optional[int] = num_channels
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_size
__SCREAMING_SNAKE_CASE : str = min_resolution
__SCREAMING_SNAKE_CASE : Any = max_resolution
__SCREAMING_SNAKE_CASE : List[Any] = do_resize
__SCREAMING_SNAKE_CASE : Optional[Any] = size
__SCREAMING_SNAKE_CASE : Optional[int] = do_center_crop
__SCREAMING_SNAKE_CASE : List[Any] = crop_size
__SCREAMING_SNAKE_CASE : Optional[int] = do_normalize
__SCREAMING_SNAKE_CASE : str = image_mean
__SCREAMING_SNAKE_CASE : Optional[Any] = image_std
__SCREAMING_SNAKE_CASE : List[str] = do_convert_rgb
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] , _lowerCamelCase :int=False , _lowerCamelCase :Union[str, Any]=False , _lowerCamelCase :Tuple=False ):
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
__SCREAMING_SNAKE_CASE : Optional[Any] = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
2_5_5 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
__SCREAMING_SNAKE_CASE : List[str] = []
for i in range(self.batch_size ):
__SCREAMING_SNAKE_CASE : List[str] = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(2_5_5 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
__SCREAMING_SNAKE_CASE : str = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
if torchify:
__SCREAMING_SNAKE_CASE : int = [torch.from_numpy(_lowerCamelCase ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class snake_case ( __UpperCAmelCase , unittest.TestCase ):
lowerCamelCase__ = ChineseCLIPImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self :int ):
__SCREAMING_SNAKE_CASE : Tuple = ChineseCLIPImageProcessingTester(self , do_center_crop=_lowerCamelCase )
@property
def SCREAMING_SNAKE_CASE_ ( self :str ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self :Any ):
__SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''size''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_center_crop''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''center_crop''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''image_std''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_convert_rgb''' ) )
def SCREAMING_SNAKE_CASE_ ( self :Any ):
__SCREAMING_SNAKE_CASE : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 2_2_4, '''width''': 2_2_4} )
self.assertEqual(image_processor.crop_size , {'''height''': 1_8, '''width''': 1_8} )
__SCREAMING_SNAKE_CASE : int = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2} )
self.assertEqual(image_processor.crop_size , {'''height''': 8_4, '''width''': 8_4} )
def SCREAMING_SNAKE_CASE_ ( self :Any ):
pass
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] ):
# Initialize image_processing
__SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__SCREAMING_SNAKE_CASE : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE : int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE : List[str] = image_processing(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ):
# Initialize image_processing
__SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__SCREAMING_SNAKE_CASE : int = self.image_processor_tester.prepare_inputs(equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
__SCREAMING_SNAKE_CASE : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE : Dict = image_processing(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] ):
# Initialize image_processing
__SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor_tester.prepare_inputs(equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
__SCREAMING_SNAKE_CASE : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE : Dict = image_processing(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
@require_torch
@require_vision
class snake_case ( __UpperCAmelCase , unittest.TestCase ):
lowerCamelCase__ = ChineseCLIPImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
__SCREAMING_SNAKE_CASE : List[str] = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[Any] = 3
@property
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
__SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''size''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_center_crop''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''center_crop''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''image_std''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_convert_rgb''' ) )
def SCREAMING_SNAKE_CASE_ ( self :List[Any] ):
pass
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
# Initialize image_processing
__SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__SCREAMING_SNAKE_CASE : List[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 716
|
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase_ ( lowercase_ : int , lowercase_ : int ):
'''simple docstring'''
if b == 0:
return (1, 0)
((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) : Tuple = extended_euclid(lowercase_ , a % b )
__SCREAMING_SNAKE_CASE : int = a // b
return (y, x - k * y)
def lowerCAmelCase_ ( lowercase_ : int , lowercase_ : int , lowercase_ : int , lowercase_ : int ):
'''simple docstring'''
((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) : int = extended_euclid(lowercase_ , lowercase_ )
__SCREAMING_SNAKE_CASE : Any = na * na
__SCREAMING_SNAKE_CASE : str = ra * x * na + ra * y * na
return (n % m + m) % m
def lowerCAmelCase_ ( lowercase_ : int , lowercase_ : int ):
'''simple docstring'''
((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) : str = extended_euclid(lowercase_ , lowercase_ )
if b < 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = (b % n + n) % n
return b
def lowerCAmelCase_ ( lowercase_ : int , lowercase_ : int , lowercase_ : int , lowercase_ : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = invert_modulo(lowercase_ , lowercase_ ), invert_modulo(lowercase_ , lowercase_ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = na * na
__SCREAMING_SNAKE_CASE : List[Any] = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='''chinese_remainder_theorem''', verbose=True)
testmod(name='''chinese_remainder_theorem2''', verbose=True)
testmod(name='''invert_modulo''', verbose=True)
testmod(name='''extended_euclid''', verbose=True)
| 401
| 0
|
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = (DDPMScheduler,)
def UpperCAmelCase_ ( self , **A_ )-> str:
'''simple docstring'''
UpperCamelCase = {
'num_train_timesteps': 1000,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**A_ )
return config
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=A_ )
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=A_ , beta_end=A_ )
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=A_ )
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=A_ )
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=A_ )
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
self.check_over_configs(thresholding=A_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=A_ , prediction_type=A_ , sample_max_value=A_ , )
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=A_ )
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=A_ )
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**A_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00_979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**A_ )
UpperCamelCase = len(A_ )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
UpperCamelCase = torch.manual_seed(0 )
for t in reversed(range(A_ ) ):
# 1. predict noise residual
UpperCamelCase = model(A_ , A_ )
# 2. predict previous mean of sample x_t-1
UpperCamelCase = scheduler.step(A_ , A_ , A_ , generator=A_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
UpperCamelCase = pred_prev_sample
UpperCamelCase = torch.sum(torch.abs(A_ ) )
UpperCamelCase = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 258.9_606 ) < 1e-2
assert abs(result_mean.item() - 0.3_372 ) < 1e-3
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config(prediction_type='v_prediction' )
UpperCamelCase = scheduler_class(**A_ )
UpperCamelCase = len(A_ )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
UpperCamelCase = torch.manual_seed(0 )
for t in reversed(range(A_ ) ):
# 1. predict noise residual
UpperCamelCase = model(A_ , A_ )
# 2. predict previous mean of sample x_t-1
UpperCamelCase = scheduler.step(A_ , A_ , A_ , generator=A_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
UpperCamelCase = pred_prev_sample
UpperCamelCase = torch.sum(torch.abs(A_ ) )
UpperCamelCase = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 202.0_296 ) < 1e-2
assert abs(result_mean.item() - 0.2_631 ) < 1e-3
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**A_ )
UpperCamelCase = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=A_ )
UpperCamelCase = scheduler.timesteps
for i, timestep in enumerate(A_ ):
if i == len(A_ ) - 1:
UpperCamelCase = -1
else:
UpperCamelCase = timesteps[i + 1]
UpperCamelCase = scheduler.previous_timestep(A_ )
UpperCamelCase = prev_t.item()
self.assertEqual(A_ , A_ )
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**A_ )
UpperCamelCase = [100, 87, 50, 51, 0]
with self.assertRaises(A_ , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=A_ )
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**A_ )
UpperCamelCase = [100, 87, 50, 1, 0]
UpperCamelCase = len(A_ )
with self.assertRaises(A_ , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=A_ , timesteps=A_ )
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**A_ )
UpperCamelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
A_ , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=A_ )
| 3
|
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
UpperCAmelCase__ : List[str] = {
"google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Union[str, Any] = 'umt5'
snake_case__ :Any = ['past_key_values']
def __init__( self : List[Any] , __magic_name__ : Tuple=250112 , __magic_name__ : str=512 , __magic_name__ : int=64 , __magic_name__ : str=1024 , __magic_name__ : Tuple=8 , __magic_name__ : Optional[int]=None , __magic_name__ : Optional[Any]=6 , __magic_name__ : Dict=32 , __magic_name__ : Optional[Any]=128 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : int=1E-6 , __magic_name__ : Optional[int]=1.0 , __magic_name__ : Dict="gated-gelu" , __magic_name__ : List[str]=True , __magic_name__ : Tuple=True , __magic_name__ : Optional[int]="T5Tokenizer" , __magic_name__ : str=True , __magic_name__ : int=0 , __magic_name__ : Union[str, Any]=1 , __magic_name__ : str=0 , **__magic_name__ : Any , ):
"""simple docstring"""
super().__init__(
is_encoder_decoder=__magic_name__ , tokenizer_class=__magic_name__ , tie_word_embeddings=__magic_name__ , pad_token_id=__magic_name__ , eos_token_id=__magic_name__ , decoder_start_token_id=__magic_name__ , **__magic_name__ , )
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = d_model
lowerCAmelCase__ = d_kv
lowerCAmelCase__ = d_ff
lowerCAmelCase__ = num_layers
lowerCAmelCase__ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowerCAmelCase__ = num_heads
lowerCAmelCase__ = relative_attention_num_buckets
lowerCAmelCase__ = relative_attention_max_distance
lowerCAmelCase__ = dropout_rate
lowerCAmelCase__ = layer_norm_epsilon
lowerCAmelCase__ = initializer_factor
lowerCAmelCase__ = feed_forward_proj
lowerCAmelCase__ = use_cache
lowerCAmelCase__ = self.feed_forward_proj.split("-" )
lowerCAmelCase__ = act_info[-1]
lowerCAmelCase__ = act_info[0] == "gated"
if len(__magic_name__ ) > 1 and act_info[0] != "gated" or len(__magic_name__ ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
if feed_forward_proj == "gated-gelu":
lowerCAmelCase__ = "gelu_new"
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
return self.d_model
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
return self.num_heads
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return self.num_layers
class A ( SCREAMING_SNAKE_CASE__ ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
lowerCAmelCase__ = "past_encoder_sequence + sequence"
lowerCAmelCase__ = {0: "batch"}
lowerCAmelCase__ = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
lowerCAmelCase__ = {0: "batch", 1: "decoder_sequence"}
lowerCAmelCase__ = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__magic_name__ , direction="inputs" )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return 13
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return 5E-4
| 48
| 0
|
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowerCAmelCase__ ( __magic_name__ ):
'''simple docstring'''
def __init__( self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = False , lowercase__ = False , lowercase__ = None , **lowercase__ , ):
'''simple docstring'''
super().__init__(
lowercase__ , split=lowercase__ , features=lowercase__ , cache_dir=lowercase__ , keep_in_memory=lowercase__ , streaming=lowercase__ , num_proc=lowercase__ , **lowercase__ , )
__A =path_or_paths if isinstance(lowercase__ , lowercase__ ) else {self.split: path_or_paths}
__A =Text(
cache_dir=lowercase__ , data_files=lowercase__ , features=lowercase__ , **lowercase__ , )
def __UpperCamelCase ( self ):
'''simple docstring'''
if self.streaming:
__A =self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__A =None
__A =None
__A =None
__A =None
self.builder.download_and_prepare(
download_config=lowercase__ , download_mode=lowercase__ , verification_mode=lowercase__ , base_path=lowercase__ , num_proc=self.num_proc , )
__A =self.builder.as_dataset(
split=self.split , verification_mode=lowercase__ , in_memory=self.keep_in_memory )
return dataset
| 703
|
def A__ ( __A : int , __A : float , __A : float ) ->float:
return round(float(moles / volume ) * nfactor )
def A__ ( __A : float , __A : float , __A : float ) ->float:
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def A__ ( __A : float , __A : float , __A : float ) ->float:
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def A__ ( __A : float , __A : float , __A : float ) ->float:
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 516
| 0
|
"""simple docstring"""
from __future__ import annotations
import time
import numpy as np
_lowerCAmelCase :Dict = [8, 5, 9, 7]
_lowerCAmelCase :Any = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
_lowerCAmelCase :Tuple = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , A , A , A , ) -> None:
_UpperCAmelCase : Any = claim_vector
_UpperCAmelCase : Tuple = allocated_resources_table
_UpperCAmelCase : List[Any] = maximum_claim_table
def __lowerCAmelCase ( self ) -> list[int]:
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def __lowerCAmelCase ( self ) -> list[int]:
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def __lowerCAmelCase ( self ) -> list[list[int]]:
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(A ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def __lowerCAmelCase ( self ) -> dict[int, list[int]]:
return {self.__need().index(A ): i for i in self.__need()}
def __lowerCAmelCase ( self , **A ) -> None:
_UpperCAmelCase : Union[str, Any] = self.__need()
_UpperCAmelCase : Dict = self.__allocated_resources_table
_UpperCAmelCase : Any = self.__available_resources()
_UpperCAmelCase : List[str] = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('''_''' * 5_0 + '''\n''' )
while need_list:
_UpperCAmelCase : Optional[Any] = False
for each_need in need_list:
_UpperCAmelCase : List[Any] = True
for index, need in enumerate(A ):
if need > available_resources[index]:
_UpperCAmelCase : int = False
break
if execution:
_UpperCAmelCase : Any = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
_UpperCAmelCase : Optional[int] = original_need_index
print(f'Process {process_number + 1} is executing.' )
# remove the process run from stack
need_list.remove(A )
# update available/freed resources stack
_UpperCAmelCase : Optional[int] = np.array(A ) + np.array(
alloc_resources_table[process_number] )
print(
'''Updated available resource stack for processes: '''
+ ''' '''.join([str(A ) for x in available_resources] ) )
break
if safe:
print('''The process is in a safe state.\n''' )
else:
print('''System in unsafe state. Aborting...\n''' )
break
def __lowerCAmelCase ( self ) -> str:
print(''' ''' * 9 + '''Allocated Resource Table''' )
for item in self.__allocated_resources_table:
print(
f'P{self.__allocated_resources_table.index(A ) + 1}'
+ ''' '''.join(f'{it:>8}' for it in item )
+ '''\n''' )
print(''' ''' * 9 + '''System Resource Table''' )
for item in self.__maximum_claim_table:
print(
f'P{self.__maximum_claim_table.index(A ) + 1}'
+ ''' '''.join(f'{it:>8}' for it in item )
+ '''\n''' )
print(
'''Current Usage by Active Processes: '''
+ ''' '''.join(str(A ) for x in self.__claim_vector ) )
print(
'''Initial Available Resources: '''
+ ''' '''.join(str(A ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 506
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase :Any = logging.get_logger(__name__)
_lowerCAmelCase :Union[str, Any] = {
'unc-nlp/lxmert-base-uncased': 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json',
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''lxmert'''
a__ ={}
def __init__( self , A=3_0_5_2_2 , A=7_6_8 , A=1_2 , A=9_5_0_0 , A=1_6_0_0 , A=4_0_0 , A=3_0_7_2 , A="gelu" , A=0.1 , A=0.1 , A=5_1_2 , A=2 , A=0.02 , A=1E-12 , A=9 , A=5 , A=5 , A=2_0_4_8 , A=4 , A=6.67 , A=True , A=True , A=True , A=True , A=True , A=True , A=True , **A , ) -> List[Any]:
_UpperCAmelCase : int = vocab_size
_UpperCAmelCase : str = hidden_size
_UpperCAmelCase : str = num_attention_heads
_UpperCAmelCase : str = hidden_act
_UpperCAmelCase : Dict = intermediate_size
_UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
_UpperCAmelCase : str = attention_probs_dropout_prob
_UpperCAmelCase : Dict = max_position_embeddings
_UpperCAmelCase : str = type_vocab_size
_UpperCAmelCase : List[str] = initializer_range
_UpperCAmelCase : List[str] = layer_norm_eps
_UpperCAmelCase : Optional[int] = num_qa_labels
_UpperCAmelCase : Tuple = num_object_labels
_UpperCAmelCase : Optional[int] = num_attr_labels
_UpperCAmelCase : List[str] = l_layers
_UpperCAmelCase : Any = x_layers
_UpperCAmelCase : Tuple = r_layers
_UpperCAmelCase : Optional[Any] = visual_feat_dim
_UpperCAmelCase : Optional[int] = visual_pos_dim
_UpperCAmelCase : Optional[Any] = visual_loss_normalizer
_UpperCAmelCase : int = task_matched
_UpperCAmelCase : Optional[Any] = task_mask_lm
_UpperCAmelCase : Union[str, Any] = task_obj_predict
_UpperCAmelCase : Optional[int] = task_qa
_UpperCAmelCase : Union[str, Any] = visual_obj_loss
_UpperCAmelCase : List[str] = visual_attr_loss
_UpperCAmelCase : Optional[int] = visual_feat_loss
_UpperCAmelCase : Tuple = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**A )
| 506
| 1
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : List[Any] = {
'Intel/dpt-large': 'https://huggingface.co/Intel/dpt-large/resolve/main/config.json',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class lowercase ( __UpperCAmelCase):
__lowerCAmelCase : Tuple = """dpt"""
def __init__( self : Optional[int] , _lowerCamelCase : Optional[int]=7_68 , _lowerCamelCase : Tuple=12 , _lowerCamelCase : Union[str, Any]=12 , _lowerCamelCase : Any=30_72 , _lowerCamelCase : int="gelu" , _lowerCamelCase : Dict=0.0 , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : int=0.02 , _lowerCamelCase : Dict=1E-12 , _lowerCamelCase : Any=3_84 , _lowerCamelCase : Optional[Any]=16 , _lowerCamelCase : Optional[int]=3 , _lowerCamelCase : List[Any]=False , _lowerCamelCase : Optional[Any]=True , _lowerCamelCase : str=[2, 5, 8, 11] , _lowerCamelCase : List[Any]="project" , _lowerCamelCase : Dict=[4, 2, 1, 0.5] , _lowerCamelCase : Any=[96, 1_92, 3_84, 7_68] , _lowerCamelCase : int=2_56 , _lowerCamelCase : Any=-1 , _lowerCamelCase : Optional[Any]=False , _lowerCamelCase : List[Any]=True , _lowerCamelCase : List[Any]=0.4 , _lowerCamelCase : Union[str, Any]=2_55 , _lowerCamelCase : Any=0.1 , _lowerCamelCase : str=[1, 10_24, 24, 24] , _lowerCamelCase : List[str]=[0, 1] , _lowerCamelCase : str=None , **_lowerCamelCase : Optional[Any] , ):
"""simple docstring"""
super().__init__(**_lowerCamelCase )
A_ : List[Any] = hidden_size
A_ : List[Any] = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('''Initializing the config with a `BiT` backbone.''' )
A_ : List[Any] = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
}
A_ : Union[str, Any] = BitConfig(**_lowerCamelCase )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
logger.info('''Initializing the config with a `BiT` backbone.''' )
A_ : List[str] = BitConfig(**_lowerCamelCase )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
A_ : Union[str, Any] = backbone_config
else:
raise ValueError(
F"""backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.""" )
A_ : List[str] = backbone_featmap_shape
A_ : Dict = neck_ignore_stages
if readout_type != "project":
raise ValueError('''Readout type must be \'project\' when using `DPT-hybrid` mode.''' )
else:
A_ : List[str] = None
A_ : Any = None
A_ : Optional[Any] = []
A_ : Dict = num_hidden_layers
A_ : Any = num_attention_heads
A_ : str = intermediate_size
A_ : List[str] = hidden_act
A_ : Any = hidden_dropout_prob
A_ : Optional[int] = attention_probs_dropout_prob
A_ : Any = initializer_range
A_ : Optional[Any] = layer_norm_eps
A_ : Any = image_size
A_ : Optional[int] = patch_size
A_ : Union[str, Any] = num_channels
A_ : Optional[Any] = qkv_bias
A_ : int = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('''Readout_type must be one of [\'ignore\', \'add\', \'project\']''' )
A_ : List[str] = readout_type
A_ : Tuple = reassemble_factors
A_ : List[Any] = neck_hidden_sizes
A_ : Union[str, Any] = fusion_hidden_size
A_ : int = head_in_index
A_ : str = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
A_ : Tuple = use_auxiliary_head
A_ : Dict = auxiliary_loss_weight
A_ : int = semantic_loss_ignore_index
A_ : List[Any] = semantic_classifier_dropout
def a_ ( self : Any ):
"""simple docstring"""
A_ : Dict = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
A_ : Dict = self.backbone_config.to_dict()
A_ : Dict = self.__class__.model_type
return output
| 361
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : Any = {
'studio-ousia/luke-base': 'https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json',
'studio-ousia/luke-large': 'https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json',
}
class lowercase ( __UpperCAmelCase):
__lowerCAmelCase : Any = """luke"""
def __init__( self : List[str] , _lowerCamelCase : Union[str, Any]=5_02_67 , _lowerCamelCase : Tuple=50_00_00 , _lowerCamelCase : int=7_68 , _lowerCamelCase : Union[str, Any]=2_56 , _lowerCamelCase : Dict=12 , _lowerCamelCase : str=12 , _lowerCamelCase : Dict=30_72 , _lowerCamelCase : Union[str, Any]="gelu" , _lowerCamelCase : Optional[int]=0.1 , _lowerCamelCase : str=0.1 , _lowerCamelCase : Union[str, Any]=5_12 , _lowerCamelCase : int=2 , _lowerCamelCase : Optional[int]=0.02 , _lowerCamelCase : int=1E-12 , _lowerCamelCase : List[str]=True , _lowerCamelCase : str=None , _lowerCamelCase : Union[str, Any]=1 , _lowerCamelCase : str=0 , _lowerCamelCase : Dict=2 , **_lowerCamelCase : Optional[int] , ):
"""simple docstring"""
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
A_ : str = vocab_size
A_ : Optional[Any] = entity_vocab_size
A_ : Optional[Any] = hidden_size
A_ : List[str] = entity_emb_size
A_ : Union[str, Any] = num_hidden_layers
A_ : Dict = num_attention_heads
A_ : Union[str, Any] = hidden_act
A_ : Dict = intermediate_size
A_ : int = hidden_dropout_prob
A_ : str = attention_probs_dropout_prob
A_ : List[Any] = max_position_embeddings
A_ : Optional[Any] = type_vocab_size
A_ : Optional[int] = initializer_range
A_ : str = layer_norm_eps
A_ : List[str] = use_entity_aware_attention
A_ : Optional[int] = classifier_dropout
| 361
| 1
|
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _SCREAMING_SNAKE_CASE ( lowercase_ ):
__SCREAMING_SNAKE_CASE :int = (DDPMScheduler,)
def snake_case__ ( self : Optional[int] , **a__ : Union[str, Any] ):
__magic_name__ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**_lowercase )
return config
def snake_case__ ( self : Dict ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowercase )
def snake_case__ ( self : Dict ):
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_lowercase , beta_end=_lowercase )
def snake_case__ ( self : Any ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowercase )
def snake_case__ ( self : Optional[Any] ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_lowercase )
def snake_case__ ( self : str ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowercase )
def snake_case__ ( self : Any ):
self.check_over_configs(thresholding=_lowercase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_lowercase , prediction_type=_lowercase , sample_max_value=_lowercase , )
def snake_case__ ( self : str ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase )
def snake_case__ ( self : List[str] ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=_lowercase )
def snake_case__ ( self : Dict ):
__magic_name__ = self.scheduler_classes[0]
__magic_name__ = self.get_scheduler_config()
__magic_name__ = scheduler_class(**_lowercase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00_979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def snake_case__ ( self : int ):
__magic_name__ = self.scheduler_classes[0]
__magic_name__ = self.get_scheduler_config()
__magic_name__ = scheduler_class(**_lowercase )
__magic_name__ = len(_lowercase )
__magic_name__ = self.dummy_model()
__magic_name__ = self.dummy_sample_deter
__magic_name__ = torch.manual_seed(0 )
for t in reversed(range(_lowercase ) ):
# 1. predict noise residual
__magic_name__ = model(_lowercase , _lowercase )
# 2. predict previous mean of sample x_t-1
__magic_name__ = scheduler.step(_lowercase , _lowercase , _lowercase , generator=_lowercase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__magic_name__ = pred_prev_sample
__magic_name__ = torch.sum(torch.abs(_lowercase ) )
__magic_name__ = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 258.9_606 ) < 1E-2
assert abs(result_mean.item() - 0.3_372 ) < 1E-3
def snake_case__ ( self : Any ):
__magic_name__ = self.scheduler_classes[0]
__magic_name__ = self.get_scheduler_config(prediction_type='''v_prediction''' )
__magic_name__ = scheduler_class(**_lowercase )
__magic_name__ = len(_lowercase )
__magic_name__ = self.dummy_model()
__magic_name__ = self.dummy_sample_deter
__magic_name__ = torch.manual_seed(0 )
for t in reversed(range(_lowercase ) ):
# 1. predict noise residual
__magic_name__ = model(_lowercase , _lowercase )
# 2. predict previous mean of sample x_t-1
__magic_name__ = scheduler.step(_lowercase , _lowercase , _lowercase , generator=_lowercase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__magic_name__ = pred_prev_sample
__magic_name__ = torch.sum(torch.abs(_lowercase ) )
__magic_name__ = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 202.0_296 ) < 1E-2
assert abs(result_mean.item() - 0.2_631 ) < 1E-3
def snake_case__ ( self : int ):
__magic_name__ = self.scheduler_classes[0]
__magic_name__ = self.get_scheduler_config()
__magic_name__ = scheduler_class(**_lowercase )
__magic_name__ = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_lowercase )
__magic_name__ = scheduler.timesteps
for i, timestep in enumerate(_lowercase ):
if i == len(_lowercase ) - 1:
__magic_name__ = -1
else:
__magic_name__ = timesteps[i + 1]
__magic_name__ = scheduler.previous_timestep(_lowercase )
__magic_name__ = prev_t.item()
self.assertEqual(_lowercase , _lowercase )
def snake_case__ ( self : Union[str, Any] ):
__magic_name__ = self.scheduler_classes[0]
__magic_name__ = self.get_scheduler_config()
__magic_name__ = scheduler_class(**_lowercase )
__magic_name__ = [100, 87, 50, 51, 0]
with self.assertRaises(_lowercase , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=_lowercase )
def snake_case__ ( self : Optional[int] ):
__magic_name__ = self.scheduler_classes[0]
__magic_name__ = self.get_scheduler_config()
__magic_name__ = scheduler_class(**_lowercase )
__magic_name__ = [100, 87, 50, 1, 0]
__magic_name__ = len(_lowercase )
with self.assertRaises(_lowercase , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=_lowercase , timesteps=_lowercase )
def snake_case__ ( self : Optional[Any] ):
__magic_name__ = self.scheduler_classes[0]
__magic_name__ = self.get_scheduler_config()
__magic_name__ = scheduler_class(**_lowercase )
__magic_name__ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_lowercase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=_lowercase )
| 432
|
'''simple docstring'''
def _UpperCamelCase ( lowerCAmelCase__: int ,lowerCAmelCase__: bool = False ) -> bool:
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_3170_4406_4679_8873_8596_1981 and not allow_probable:
raise ValueError(
'Warning: upper bound of deterministic test is exceeded. '
'Pass allow_probable=True to allow probabilistic test. '
'A return value of True indicates a probable prime.' )
# array bounds provided by analysis
SCREAMING_SNAKE_CASE_ = [
2047,
137_3653,
2532_6001,
32_1503_1751,
2_1523_0289_8747,
3_4747_4966_0383,
341_5500_7172_8321,
1,
382_5123_0565_4641_3051,
1,
1,
3186_6585_7834_0311_5116_7461,
3_3170_4406_4679_8873_8596_1981,
]
SCREAMING_SNAKE_CASE_ = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(lowerCAmelCase__ ,1 ):
if n < _p:
# then we have our last prime to check
SCREAMING_SNAKE_CASE_ = primes[:idx]
break
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
SCREAMING_SNAKE_CASE_ = False
for r in range(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = pow(lowerCAmelCase__ ,d * 2**r ,lowerCAmelCase__ )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
SCREAMING_SNAKE_CASE_ = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def _UpperCamelCase ( ) -> None:
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(83_8201 )
assert miller_rabin(83_8207 )
# 1_373_653
assert not miller_rabin(1731_6001 )
assert miller_rabin(1731_6017 )
# 25_326_001
assert not miller_rabin(30_7838_6641 )
assert miller_rabin(30_7838_6653 )
# 3_215_031_751
assert not miller_rabin(1_7130_4557_4801 )
assert miller_rabin(1_7130_4557_4819 )
# 2_152_302_898_747
assert not miller_rabin(2_7797_9972_8307 )
assert miller_rabin(2_7797_9972_8327 )
# 3_474_749_660_383
assert not miller_rabin(113_8500_2390_9441 )
assert miller_rabin(113_8500_2390_9527 )
# 341_550_071_728_321
assert not miller_rabin(127_5041_0188_4880_4351 )
assert miller_rabin(127_5041_0188_4880_4391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(796_6646_4458_5077_8779_1867 )
assert miller_rabin(796_6646_4458_5077_8779_1951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(5528_4067_7446_6478_9766_0333 )
assert miller_rabin(5528_4067_7446_6478_9766_0359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 294
| 0
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class A__ ( _lowerCamelCase):
A_ : Optional[Any] = 'SpeechT5FeatureExtractor'
A_ : Dict = 'SpeechT5Tokenizer'
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = kwargs.pop('audio' , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = kwargs.pop('text' , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = kwargs.pop('text_target' , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = kwargs.pop('audio_target' , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = kwargs.pop('sampling_rate' , _SCREAMING_SNAKE_CASE )
if audio is not None and text is not None:
raise ValueError(
'Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?' )
if audio_target is not None and text_target is not None:
raise ValueError(
'Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.' )
if audio is not None:
__lowerCAmelCase : List[Any] = self.feature_extractor(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
elif text is not None:
__lowerCAmelCase : Union[str, Any] = self.tokenizer(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase : Tuple = None
if audio_target is not None:
__lowerCAmelCase : Optional[int] = self.feature_extractor(audio_target=_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = targets['input_values']
elif text_target is not None:
__lowerCAmelCase : Tuple = self.tokenizer(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = targets['input_ids']
else:
__lowerCAmelCase : Optional[Any] = None
if inputs is None:
return targets
if targets is not None:
__lowerCAmelCase : List[Any] = labels
__lowerCAmelCase : Dict = targets.get('attention_mask' )
if decoder_attention_mask is not None:
__lowerCAmelCase : Any = decoder_attention_mask
return inputs
def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = kwargs.pop('input_values' , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = kwargs.pop('input_ids' , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = kwargs.pop('labels' , _SCREAMING_SNAKE_CASE )
if input_values is not None and input_ids is not None:
raise ValueError('Cannot process both `input_values` and `input_ids` inputs.' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.' )
if input_values is not None:
__lowerCAmelCase : Optional[int] = self.feature_extractor.pad(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
elif input_ids is not None:
__lowerCAmelCase : Optional[Any] = self.tokenizer.pad(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase : List[str] = None
if labels is not None:
if "input_ids" in labels or (isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and "input_ids" in labels[0]):
__lowerCAmelCase : Optional[int] = self.tokenizer.pad(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = targets['input_ids']
else:
__lowerCAmelCase : int = self.feature_extractor.feature_size
__lowerCAmelCase : Union[str, Any] = self.feature_extractor.num_mel_bins
__lowerCAmelCase : Union[str, Any] = self.feature_extractor.pad(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = feature_size_hack
__lowerCAmelCase : Any = targets['input_values']
else:
__lowerCAmelCase : int = None
if inputs is None:
return targets
if targets is not None:
__lowerCAmelCase : Optional[Any] = labels
__lowerCAmelCase : str = targets.get('attention_mask' )
if decoder_attention_mask is not None:
__lowerCAmelCase : List[Any] = decoder_attention_mask
return inputs
def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 709
|
"""simple docstring"""
from __future__ import annotations
from collections import Counter
from random import random
class A__ :
def __init__( self ):
__lowerCAmelCase : Any = {}
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = {}
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if nodea not in self.connections:
self.add_node(_SCREAMING_SNAKE_CASE )
if nodea not in self.connections:
self.add_node(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = probability
def __lowerCamelCase ( self ):
return list(self.connections )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[str] = 0
__lowerCAmelCase : List[Any] = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Optional[Any] = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase : str = Counter(graph.get_nodes() )
__lowerCAmelCase : Tuple = start
for _ in range(_UpperCamelCase ):
__lowerCAmelCase : int = graph.transition(_UpperCamelCase )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 549
| 0
|
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
__A = get_tests_dir("fixtures")
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Tuple:
'''simple docstring'''
lowerCamelCase__: str =mock.Mock()
lowerCamelCase__: Union[str, Any] =500
lowerCamelCase__: Optional[Any] ={}
lowerCamelCase__: Any =HTTPError
lowerCamelCase__: Union[str, Any] ={}
# Download this model to make sure it's in the cache.
lowerCamelCase__: str =ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit")
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=UpperCAmelCase_) as mock_head:
lowerCamelCase__: Dict =ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit")
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE_ (self : str) ->Any:
'''simple docstring'''
lowerCamelCase__: Optional[int] =ViTImageProcessor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json")
def SCREAMING_SNAKE_CASE_ (self : Dict) ->str:
'''simple docstring'''
with self.assertRaises(UpperCAmelCase_):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase__: int =AutoImageProcessor.from_pretrained("hf-internal-testing/stable-diffusion-all-variants")
lowerCamelCase__: Any =AutoImageProcessor.from_pretrained(
"hf-internal-testing/stable-diffusion-all-variants" , subfolder="feature_extractor")
self.assertIsNotNone(UpperCAmelCase_)
@is_staging_test
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Dict) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: str =TOKEN
HfFolder.save_token(UpperCAmelCase_)
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Optional[Any]) ->Optional[int]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-image-processor")
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-image-processor-org")
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-image-processor")
except HTTPError:
pass
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Dict:
'''simple docstring'''
lowerCamelCase__: Tuple =ViTImageProcessor.from_pretrained(UpperCAmelCase_)
image_processor.push_to_hub("test-image-processor" , use_auth_token=self._token)
lowerCamelCase__: str =ViTImageProcessor.from_pretrained(F"""{USER}/test-image-processor""")
for k, v in image_processor.__dict__.items():
self.assertEqual(UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_))
# Reset repo
delete_repo(token=self._token , repo_id="test-image-processor")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
UpperCAmelCase_ , repo_id="test-image-processor" , push_to_hub=UpperCAmelCase_ , use_auth_token=self._token)
lowerCamelCase__: int =ViTImageProcessor.from_pretrained(F"""{USER}/test-image-processor""")
for k, v in image_processor.__dict__.items():
self.assertEqual(UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Any =ViTImageProcessor.from_pretrained(UpperCAmelCase_)
image_processor.push_to_hub("valid_org/test-image-processor" , use_auth_token=self._token)
lowerCamelCase__: int =ViTImageProcessor.from_pretrained("valid_org/test-image-processor")
for k, v in image_processor.__dict__.items():
self.assertEqual(UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_))
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-image-processor")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
UpperCAmelCase_ , repo_id="valid_org/test-image-processor-org" , push_to_hub=UpperCAmelCase_ , use_auth_token=self._token)
lowerCamelCase__: Tuple =ViTImageProcessor.from_pretrained("valid_org/test-image-processor-org")
for k, v in image_processor.__dict__.items():
self.assertEqual(UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->List[Any]:
'''simple docstring'''
CustomImageProcessor.register_for_auto_class()
lowerCamelCase__: Any =CustomImageProcessor.from_pretrained(UpperCAmelCase_)
image_processor.push_to_hub("test-dynamic-image-processor" , use_auth_token=self._token)
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"AutoImageProcessor": "custom_image_processing.CustomImageProcessor"} , )
lowerCamelCase__: str =AutoImageProcessor.from_pretrained(
F"""{USER}/test-dynamic-image-processor""" , trust_remote_code=UpperCAmelCase_)
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , "CustomImageProcessor")
| 59
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __lowerCamelCase :
a__: List[str]
a__: Optional[str] = None
# Automatically constructed
a__: ClassVar[str] = "dict"
a__: ClassVar[Any] = None
a__: str = field(default='Translation' , init=lowerCAmelCase , repr=lowerCAmelCase )
def __call__( self ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def UpperCAmelCase__ ( self ):
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class __lowerCamelCase :
a__: Optional[List] = None
a__: Optional[int] = None
a__: Optional[str] = None
# Automatically constructed
a__: ClassVar[str] = "dict"
a__: ClassVar[Any] = None
a__: str = field(default='TranslationVariableLanguages' , init=lowerCAmelCase , repr=lowerCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = sorted(set(self.languages ) ) if self.languages else None
lowerCamelCase_ = len(self.languages ) if self.languages else None
def __call__( self ):
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = set(self.languages )
if self.languages and set(UpperCAmelCase ) - lang_set:
raise ValueError(
f"Some languages in example ({', '.join(sorted(set(UpperCAmelCase ) - lang_set ) )}) are not in valid set ({', '.join(UpperCAmelCase )})." )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
lowerCamelCase_ = []
for lang, text in translation_dict.items():
if isinstance(UpperCAmelCase , UpperCAmelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
lowerCamelCase_ , lowerCamelCase_ = zip(*sorted(UpperCAmelCase ) )
return {"language": languages, "translation": translations}
def UpperCAmelCase__ ( self ):
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 29
| 0
|
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__a = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
__a = n - k
# Calculate C(n,k)
for i in range(_SCREAMING_SNAKE_CASE ):
result *= n - i
result //= i + 1
return result
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return binomial_coefficient(2 * node_count , _SCREAMING_SNAKE_CASE ) // (node_count + 1)
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if n < 0:
raise ValueError("""factorial() not defined for negative values""" )
__a = 1
for i in range(1 , n + 1 ):
result *= i
return result
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return catalan_number(_SCREAMING_SNAKE_CASE ) * factorial(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCamelCase__ = int(input("""Enter the number of nodes: """).strip() or 0)
if node_count <= 0:
raise ValueError("""We need some nodes to work with.""")
print(
F"""Given {node_count} nodes, there are {binary_tree_count(node_count)} """
F"""binary trees and {catalan_number(node_count)} binary search trees."""
)
| 721
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {"""configuration_mbart""": ["""MBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MBartConfig""", """MBartOnnxConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""MBartTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""MBartTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""MBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MBartForCausalLM""",
"""MBartForConditionalGeneration""",
"""MBartForQuestionAnswering""",
"""MBartForSequenceClassification""",
"""MBartModel""",
"""MBartPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TFMBartForConditionalGeneration""",
"""TFMBartModel""",
"""TFMBartPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""FlaxMBartForConditionalGeneration""",
"""FlaxMBartForQuestionAnswering""",
"""FlaxMBartForSequenceClassification""",
"""FlaxMBartModel""",
"""FlaxMBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 547
| 0
|
import requests
_A : str = """YOUR API KEY"""
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ = giphy_api_key ) -> list:
SCREAMING_SNAKE_CASE__ = '''+'''.join(query.split() )
SCREAMING_SNAKE_CASE__ = f'''https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'''
SCREAMING_SNAKE_CASE__ = requests.get(lowerCAmelCase_ ).json()['''data''']
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print("""\n""".join(get_gifs("""space ship""")))
| 100
|
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = ['''image_processor''', '''tokenizer''']
SCREAMING_SNAKE_CASE : List[str] = '''BridgeTowerImageProcessor'''
SCREAMING_SNAKE_CASE : Tuple = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ):
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[bool, str, PaddingStrategy] = False , UpperCamelCase__ : Union[bool, str, TruncationStrategy] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 0 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , **UpperCamelCase__ : List[Any] , ):
A = self.tokenizer(
text=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
# add pixel_values + pixel_mask
A = self.image_processor(
UpperCamelCase__ , return_tensors=UpperCamelCase__ , do_normalize=UpperCamelCase__ , do_center_crop=UpperCamelCase__ , **UpperCamelCase__ )
encoding.update(UpperCamelCase__ )
return encoding
def UpperCamelCase ( self : Dict , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Any ):
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def UpperCamelCase ( self : int , *UpperCamelCase__ : int , **UpperCamelCase__ : List[str] ):
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def UpperCamelCase ( self : Any ):
A = self.tokenizer.model_input_names
A = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 699
| 0
|
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _snake_case ( ):
__UpperCAmelCase : str = HfArgumentParser(lowerCamelCase__ )
__UpperCAmelCase : Optional[Any] = parser.parse_args_into_dataclasses()[0]
__UpperCAmelCase : Any = TensorFlowBenchmark(args=lowerCamelCase__ )
try:
__UpperCAmelCase : List[Any] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
__UpperCAmelCase : str = "Arg --no_{0} is no longer used, please use --no-{0} instead."
__UpperCAmelCase : Tuple = " ".join(str(lowerCamelCase__ ).split(" " )[:-1] )
__UpperCAmelCase : Any = ""
__UpperCAmelCase : List[Any] = eval(str(lowerCamelCase__ ).split(" " )[-1] )
__UpperCAmelCase : Optional[int] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
__UpperCAmelCase : Union[str, Any] = full_error_msg + begin_error_msg + str(lowerCamelCase__ )
raise ValueError(lowerCamelCase__ )
benchmark.run()
if __name__ == "__main__":
main()
| 710
|
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowerCAmelCase__ : List[Any] = logging.get_logger(__name__)
@add_end_docstrings(snake_case__ )
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
def __init__( self : List[Any] , **UpperCAmelCase_ : List[str] ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_ )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : List[str] , UpperCAmelCase_ : Union[str, List[str], "Image", List["Image"]] , **UpperCAmelCase_ : List[str] ):
"""simple docstring"""
return super().__call__(UpperCAmelCase_ , **UpperCAmelCase_ )
def lowerCamelCase_ ( self : List[str] , **UpperCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase : Any = {}
if "candidate_labels" in kwargs:
__UpperCAmelCase : int = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
__UpperCAmelCase : Optional[Any] = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Dict="This is a photo of {}." ):
"""simple docstring"""
__UpperCAmelCase : int = load_image(UpperCAmelCase_ )
__UpperCAmelCase : Optional[int] = self.image_processor(images=[image] , return_tensors=self.framework )
__UpperCAmelCase : Optional[int] = candidate_labels
__UpperCAmelCase : Dict = [hypothesis_template.format(UpperCAmelCase_ ) for x in candidate_labels]
__UpperCAmelCase : Any = self.tokenizer(UpperCAmelCase_ , return_tensors=self.framework , padding=UpperCAmelCase_ )
__UpperCAmelCase : Optional[int] = [text_inputs]
return inputs
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase_ : Tuple ):
"""simple docstring"""
__UpperCAmelCase : Any = model_inputs.pop("candidate_labels" )
__UpperCAmelCase : str = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , UpperCAmelCase_ ):
__UpperCAmelCase : Union[str, Any] = text_inputs[0]
else:
# Batching case.
__UpperCAmelCase : Any = text_inputs[0][0]
__UpperCAmelCase : int = self.model(**UpperCAmelCase_ , **UpperCAmelCase_ )
__UpperCAmelCase : Optional[Any] = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_image,
}
return model_outputs
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase_ : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = model_outputs.pop("candidate_labels" )
__UpperCAmelCase : Optional[int] = model_outputs["logits"][0]
if self.framework == "pt":
__UpperCAmelCase : Dict = logits.softmax(dim=-1 ).squeeze(-1 )
__UpperCAmelCase : Tuple = probs.tolist()
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
__UpperCAmelCase : List[Any] = [scores]
elif self.framework == "tf":
__UpperCAmelCase : Optional[int] = stable_softmax(UpperCAmelCase_ , axis=-1 )
__UpperCAmelCase : Tuple = probs.numpy().tolist()
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
__UpperCAmelCase : Optional[int] = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(UpperCAmelCase_ , UpperCAmelCase_ ) , key=lambda UpperCAmelCase_ : -x[0] )
]
return result
| 329
| 0
|
"""simple docstring"""
import math
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__=0 ) -> str: # a graph with Node 0,1,...,N-1
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = n
SCREAMING_SNAKE_CASE__ : Tuple = [
[math.inf for j in range(0 , SCREAMING_SNAKE_CASE__ )] for i in range(0 , SCREAMING_SNAKE_CASE__ )
] # adjacency matrix for weight
SCREAMING_SNAKE_CASE__ : List[str] = [
[math.inf for j in range(0 , SCREAMING_SNAKE_CASE__ )] for i in range(0 , SCREAMING_SNAKE_CASE__ )
] # dp[i][j] stores minimum distance from i to j
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = w
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
SCREAMING_SNAKE_CASE__ : int = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
return self.dp[u][v]
if __name__ == "__main__":
UpperCAmelCase__ : Tuple = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 1_0)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 1_0)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 223
|
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase__ : Union[str, Any] = 1_6
UpperCAmelCase__ : str = 3_2
def lowercase_ ( _snake_case ,_snake_case = 16 ):
SCREAMING_SNAKE_CASE__ : List[str] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = load_dataset("""glue""" ,"""mrpc""" )
def tokenize_function(_snake_case ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE__ : int = tokenizer(examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=_snake_case ,max_length=_snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = datasets.map(
_snake_case ,batched=_snake_case ,remove_columns=["""idx""", """sentence1""", """sentence2"""] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE__ : Tuple = tokenized_datasets.rename_column("""label""" ,"""labels""" )
def collate_fn(_snake_case ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE__ : Any = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE__ : Dict = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE__ : Tuple = 8
else:
SCREAMING_SNAKE_CASE__ : Any = None
return tokenizer.pad(
_snake_case ,padding="""longest""" ,max_length=_snake_case ,pad_to_multiple_of=_snake_case ,return_tensors="""pt""" ,)
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE__ : List[Any] = DataLoader(
tokenized_datasets["""train"""] ,shuffle=_snake_case ,collate_fn=_snake_case ,batch_size=_snake_case )
SCREAMING_SNAKE_CASE__ : List[str] = DataLoader(
tokenized_datasets["""validation"""] ,shuffle=_snake_case ,collate_fn=_snake_case ,batch_size=_snake_case )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCAmelCase__ : Tuple = mocked_dataloaders # noqa: F811
def lowercase_ ( _snake_case ,_snake_case ):
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" ,_snake_case ) == "1":
SCREAMING_SNAKE_CASE__ : int = 2
# New Code #
SCREAMING_SNAKE_CASE__ : Tuple = int(args.gradient_accumulation_steps )
SCREAMING_SNAKE_CASE__ : List[str] = int(args.local_sgd_steps )
# Initialize accelerator
SCREAMING_SNAKE_CASE__ : Optional[Any] = Accelerator(
cpu=args.cpu ,mixed_precision=args.mixed_precision ,gradient_accumulation_steps=_snake_case )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE__ : List[Any] = config["""lr"""]
SCREAMING_SNAKE_CASE__ : List[Any] = int(config["""num_epochs"""] )
SCREAMING_SNAKE_CASE__ : Any = int(config["""seed"""] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = int(config["""batch_size"""] )
SCREAMING_SNAKE_CASE__ : Tuple = evaluate.load("""glue""" ,"""mrpc""" )
set_seed(_snake_case )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = get_dataloaders(_snake_case ,_snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE__ : List[str] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" ,return_dict=_snake_case )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE__ : List[Any] = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AdamW(params=model.parameters() ,lr=_snake_case )
# Instantiate scheduler
SCREAMING_SNAKE_CASE__ : List[Any] = get_linear_schedule_with_warmup(
optimizer=_snake_case ,num_warmup_steps=100 ,num_training_steps=(len(_snake_case ) * num_epochs) ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = accelerator.prepare(
_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case )
# Now we train the model
for epoch in range(_snake_case ):
model.train()
with LocalSGD(
accelerator=_snake_case ,model=_snake_case ,local_sgd_steps=_snake_case ,enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(_snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(_snake_case ):
SCREAMING_SNAKE_CASE__ : List[str] = model(**_snake_case )
SCREAMING_SNAKE_CASE__ : Optional[int] = output.loss
accelerator.backward(_snake_case )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(_snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Tuple = model(**_snake_case )
SCREAMING_SNAKE_CASE__ : Any = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=_snake_case ,references=_snake_case ,)
SCREAMING_SNAKE_CASE__ : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' ,_snake_case )
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Dict = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" ,type=_snake_case ,default=_snake_case ,choices=["""no""", """fp16""", """bf16""", """fp8"""] ,help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" ,)
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" ,type=_snake_case ,default=1 ,help="""The number of minibatches to be ran before gradients are accumulated.""" ,)
parser.add_argument(
"""--local_sgd_steps""" ,type=_snake_case ,default=8 ,help="""Number of local SGD steps or None to disable local SGD""" )
parser.add_argument("""--cpu""" ,action="""store_true""" ,help="""If passed, will train on the CPU.""" )
SCREAMING_SNAKE_CASE__ : Tuple = parser.parse_args()
SCREAMING_SNAKE_CASE__ : Dict = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(_snake_case ,_snake_case )
if __name__ == "__main__":
main()
| 223
| 1
|
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
__lowerCAmelCase : Any = ["gpt2"]
__lowerCAmelCase : Union[str, Any] = "gpt2"
if is_tf_available():
class A ( tf.Module ):
def __init__( self : Any , __a : Any ) -> Optional[int]:
super().__init__()
__UpperCAmelCase = tokenizer
__UpperCAmelCase = AutoConfig.from_pretrained(__a )
__UpperCAmelCase = TFGPTaLMHeadModel.from_config(__a )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='''text''' ),) )
def snake_case__ ( self : int , __a : Optional[Any] ) -> int:
__UpperCAmelCase = self.tokenizer(__a )
__UpperCAmelCase = tokenized['''input_ids'''].to_tensor()
__UpperCAmelCase = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
__UpperCAmelCase = self.model(input_ids=__a , attention_mask=__a )['''logits''']
return outputs
@require_tf
@require_keras_nlp
class A ( unittest.TestCase ):
def snake_case__ ( self : int ) -> Dict:
super().setUp()
__UpperCAmelCase = [GPTaTokenizer.from_pretrained(__a ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
__UpperCAmelCase = [TFGPTaTokenizer.from_pretrained(__a ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__UpperCAmelCase = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
__UpperCAmelCase = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def snake_case__ ( self : Optional[Any] ) -> List[Any]:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
__UpperCAmelCase = tokenizer([test_inputs] , return_tensors='''tf''' )
__UpperCAmelCase = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
__UpperCAmelCase = python_outputs[key].numpy()
__UpperCAmelCase = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(__a , tf.intaa ) == tf_outputs_values ) )
@slow
def snake_case__ ( self : int ) -> List[Any]:
for tf_tokenizer in self.tf_tokenizers:
__UpperCAmelCase = tf.function(__a )
for test_inputs in self.test_sentences:
__UpperCAmelCase = tf.constant(__a )
__UpperCAmelCase = compiled_tokenizer(__a )
__UpperCAmelCase = tf_tokenizer(__a )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def snake_case__ ( self : Tuple ) -> Optional[Any]:
for tf_tokenizer in self.tf_tokenizers:
__UpperCAmelCase = ModelToSave(tokenizer=__a )
__UpperCAmelCase = tf.convert_to_tensor([self.test_sentences[0]] )
__UpperCAmelCase = model.serving(__a ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__UpperCAmelCase = Path(__a ) / '''saved.model'''
tf.saved_model.save(__a , __a , signatures={'''serving_default''': model.serving} )
__UpperCAmelCase = tf.saved_model.load(__a )
__UpperCAmelCase = loaded_model.signatures['''serving_default'''](__a )['''output_0''']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def snake_case__ ( self : Optional[int] ) -> Dict:
for tf_tokenizer in self.tf_tokenizers:
__UpperCAmelCase = tf.convert_to_tensor([self.test_sentences[0]] )
__UpperCAmelCase = tf_tokenizer(__a ) # Build model with some sample inputs
__UpperCAmelCase = tf_tokenizer.get_config()
__UpperCAmelCase = TFGPTaTokenizer.from_config(__a )
__UpperCAmelCase = model_from_config(__a )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def snake_case__ ( self : Dict ) -> List[str]:
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
__UpperCAmelCase = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
__UpperCAmelCase = tf.convert_to_tensor([self.test_sentences[0]] )
__UpperCAmelCase = tf_tokenizer(__a , max_length=__a )
__UpperCAmelCase = out['''input_ids'''].numpy().shape[1]
assert out_length == max_length
| 705
|
'''simple docstring'''
from __future__ import annotations
import math
def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = u
for i in range(1 , UpperCamelCase__ ):
__UpperCAmelCase = temp * (u - i)
return temp
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = int(input('''enter the numbers of values: ''' ) )
__UpperCAmelCase = []
for _ in range(UpperCamelCase__ ):
y.append([] )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
y[i].append(UpperCamelCase__ )
__UpperCAmelCase = 0
print('''enter the values of parameters in a list: ''' )
__UpperCAmelCase = list(map(UpperCamelCase__ , input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(UpperCamelCase__ ):
__UpperCAmelCase = float(input() )
__UpperCAmelCase = int(input('''enter the value to interpolate: ''' ) )
__UpperCAmelCase = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , UpperCamelCase__ ):
for j in range(n - i ):
__UpperCAmelCase = y[j + 1][i - 1] - y[j][i - 1]
__UpperCAmelCase = y[0][0]
for i in range(1 , UpperCamelCase__ ):
summ += (ucal(UpperCamelCase__ , UpperCamelCase__ ) * y[0][i]) / math.factorial(UpperCamelCase__ )
print(f"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 654
| 0
|
'''simple docstring'''
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
'''simple docstring'''
return int((input_a, input_a).count(1 ) != 0 )
def _UpperCamelCase ( ) -> None:
'''simple docstring'''
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 638
|
'''simple docstring'''
import numpy as np
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> np.ndarray:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> np.ndarray:
'''simple docstring'''
return vector * sigmoid(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 638
| 1
|
"""simple docstring"""
from __future__ import annotations
import math
def __a ( A , A , A , A , A ) -> int:
'''simple docstring'''
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if not scores:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , A , A , A ) , minimax(depth + 1 , node_index * 2 + 1 , A , A , A ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , A , A , A ) , minimax(depth + 1 , node_index * 2 + 1 , A , A , A ) , )
)
def __a ( ) -> None:
'''simple docstring'''
A__ = [90, 23, 6, 33, 21, 65, 123, 34_423]
A__ = math.log(len(A ) , 2 )
print(f"""Optimal value : {minimax(0 , 0 , A , A , A )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 261
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowerCAmelCase__ ( metaclass=UpperCAmelCase_ ):
lowercase__ : Union[str, Any] = ["""torch""", """transformers""", """onnx"""]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def lowercase_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def lowercase_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"] )
class lowerCAmelCase__ ( metaclass=UpperCAmelCase_ ):
lowercase__ : Dict = ["""torch""", """transformers""", """onnx"""]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def lowercase_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def lowercase_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"] )
class lowerCAmelCase__ ( metaclass=UpperCAmelCase_ ):
lowercase__ : List[str] = ["""torch""", """transformers""", """onnx"""]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def lowercase_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def lowercase_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"] )
class lowerCAmelCase__ ( metaclass=UpperCAmelCase_ ):
lowercase__ : str = ["""torch""", """transformers""", """onnx"""]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def lowercase_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def lowercase_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"] )
class lowerCAmelCase__ ( metaclass=UpperCAmelCase_ ):
lowercase__ : int = ["""torch""", """transformers""", """onnx"""]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def lowercase_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def lowercase_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"] )
class lowerCAmelCase__ ( metaclass=UpperCAmelCase_ ):
lowercase__ : Dict = ["""torch""", """transformers""", """onnx"""]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def lowercase_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def lowercase_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"] )
| 261
| 1
|
def a (lowerCAmelCase__ ):
__a = False
while is_sorted is False: # Until all the indices are traversed keep looping
__a = True
for i in range(0 , len(lowerCAmelCase__ ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
__a , __a = input_list[i + 1], input_list[i]
# swapping if elements not in order
__a = False
for i in range(1 , len(lowerCAmelCase__ ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
__a , __a = input_list[i + 1], input_list[i]
# swapping if elements not in order
__a = False
return input_list
if __name__ == "__main__":
print('Enter list to be sorted')
SCREAMING_SNAKE_CASE = [int(x) for x in input().split()]
# inputing elements of the list in one line
SCREAMING_SNAKE_CASE = odd_even_sort(input_list)
print('The sorted list is')
print(sorted_list)
| 99
|
'''simple docstring'''
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
lowerCAmelCase : List[Any] = ["""text""", """image""", """audio"""]
def _A ( A ) -> Dict:
lowercase : str = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((5_1_2, 5_1_2) ) )
elif input_type == "audio":
inputs.append(torch.ones(3_0_0_0 ) )
elif isinstance(A ,A ):
inputs.append(create_inputs(A ) )
else:
raise ValueError(F'''Invalid type requested: {input_type}''' )
return inputs
def _A ( A ) -> str:
lowercase : Tuple = []
for output in outputs:
if isinstance(A ,(str, AgentText) ):
output_types.append("text" )
elif isinstance(A ,(Image.Image, AgentImage) ):
output_types.append("image" )
elif isinstance(A ,(torch.Tensor, AgentAudio) ):
output_types.append("audio" )
else:
raise ValueError(F'''Invalid output: {output}''' )
return output_types
@is_tool_test
class _UpperCamelCase :
'''simple docstring'''
def a__ ( self ) -> Optional[Any]:
self.assertTrue(hasattr(self.tool , "inputs" ) )
self.assertTrue(hasattr(self.tool , "outputs" ) )
lowercase : Optional[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input , a_ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
lowercase : Any = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def a__ ( self ) -> Any:
lowercase : Any = create_inputs(self.tool.inputs )
lowercase : Tuple = self.tool(*a_ )
# There is a single output
if len(self.tool.outputs ) == 1:
lowercase : Any = [outputs]
self.assertListEqual(output_types(a_ ) , self.tool.outputs )
def a__ ( self ) -> List[str]:
self.assertTrue(hasattr(self.tool , "description" ) )
self.assertTrue(hasattr(self.tool , "default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def a__ ( self ) -> int:
lowercase : str = create_inputs(self.tool.inputs )
lowercase : str = self.tool(*a_ )
if not isinstance(a_ , a_ ):
lowercase : Union[str, Any] = [outputs]
self.assertEqual(len(a_ ) , len(self.tool.outputs ) )
for output, output_type in zip(a_ , self.tool.outputs ):
lowercase : List[str] = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(a_ , a_ ) )
def a__ ( self ) -> Optional[int]:
lowercase : int = create_inputs(self.tool.inputs )
lowercase : str = []
for _input, input_type in zip(a_ , self.tool.inputs ):
if isinstance(a_ , a_ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
lowercase : Optional[int] = self.tool(*a_ )
if not isinstance(a_ , a_ ):
lowercase : str = [outputs]
self.assertEqual(len(a_ ) , len(self.tool.outputs ) )
| 372
| 0
|
"""simple docstring"""
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
_lowerCamelCase : Optional[Any] = True
except ImportError:
_lowerCamelCase : str = False
_lowerCamelCase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class lowercase ( __UpperCAmelCase):
@staticmethod
def a_ ( _lowerCamelCase : ArgumentParser ):
"""simple docstring"""
A_ : int = parser.add_parser('''add-new-model''' )
add_new_model_parser.add_argument('''--testing''' , action='''store_true''' , help='''If in testing mode.''' )
add_new_model_parser.add_argument('''--testing_file''' , type=_lowerCamelCase , help='''Configuration file on which to run.''' )
add_new_model_parser.add_argument(
'''--path''' , type=_lowerCamelCase , help='''Path to cookiecutter. Should only be used for testing purposes.''' )
add_new_model_parser.set_defaults(func=_lowerCamelCase )
def __init__( self : Any , _lowerCamelCase : bool , _lowerCamelCase : str , _lowerCamelCase : Tuple=None , *_lowerCamelCase : Tuple ):
"""simple docstring"""
A_ : str = testing
A_ : List[Any] = testing_file
A_ : str = path
def a_ ( self : Tuple ):
"""simple docstring"""
warnings.warn(
'''The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '''
'''It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '''
'''checks, you should use `transformers-cli add-new-model-like` instead.''' )
if not _has_cookiecutter:
raise ImportError(
'''Model creation dependencies are required to use the `add_new_model` command. Install them by running '''
'''the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n''' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
A_ : str = [directory for directory in os.listdir() if '''cookiecutter-template-''' == directory[:22]]
if len(_lowerCamelCase ) > 0:
raise ValueError(
'''Several directories starting with `cookiecutter-template-` in current working directory. '''
'''Please clean your directory by removing all folders starting with `cookiecutter-template-` or '''
'''change your working directory.''' )
A_ : List[Any] = (
Path(_lowerCamelCase ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
A_ : Tuple = path_to_transformer_root / '''templates''' / '''adding_a_new_model'''
# Execute cookiecutter
if not self._testing:
cookiecutter(str(_lowerCamelCase ) )
else:
with open(self._testing_file , '''r''' ) as configuration_file:
A_ : Dict = json.load(_lowerCamelCase )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=_lowerCamelCase , extra_context=_lowerCamelCase , )
A_ : Optional[Any] = [directory for directory in os.listdir() if '''cookiecutter-template-''' in directory[:22]][0]
# Retrieve configuration
with open(directory + '''/configuration.json''' , '''r''' ) as configuration_file:
A_ : str = json.load(_lowerCamelCase )
A_ : Any = configuration['''lowercase_modelname''']
A_ : Dict = configuration['''generate_tensorflow_pytorch_and_flax''']
os.remove(F"""{directory}/configuration.json""" )
A_ : Tuple = '''PyTorch''' in generate_tensorflow_pytorch_and_flax
A_ : Dict = '''TensorFlow''' in generate_tensorflow_pytorch_and_flax
A_ : int = '''Flax''' in generate_tensorflow_pytorch_and_flax
A_ : Optional[Any] = F"""{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"""
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
os.makedirs(F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}""" , exist_ok=_lowerCamelCase )
# Tests require submodules as they have parent imports
with open(F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py""" , '''w''' ):
pass
shutil.move(
F"""{directory}/__init__.py""" , F"""{model_dir}/__init__.py""" , )
shutil.move(
F"""{directory}/configuration_{lowercase_model_name}.py""" , F"""{model_dir}/configuration_{lowercase_model_name}.py""" , )
def remove_copy_lines(_lowerCamelCase : Optional[int] ):
with open(_lowerCamelCase , '''r''' ) as f:
A_ : List[Any] = f.readlines()
with open(_lowerCamelCase , '''w''' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(_lowerCamelCase )
if output_pytorch:
if not self._testing:
remove_copy_lines(F"""{directory}/modeling_{lowercase_model_name}.py""" )
shutil.move(
F"""{directory}/modeling_{lowercase_model_name}.py""" , F"""{model_dir}/modeling_{lowercase_model_name}.py""" , )
shutil.move(
F"""{directory}/test_modeling_{lowercase_model_name}.py""" , F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py""" , )
else:
os.remove(F"""{directory}/modeling_{lowercase_model_name}.py""" )
os.remove(F"""{directory}/test_modeling_{lowercase_model_name}.py""" )
if output_tensorflow:
if not self._testing:
remove_copy_lines(F"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
shutil.move(
F"""{directory}/modeling_tf_{lowercase_model_name}.py""" , F"""{model_dir}/modeling_tf_{lowercase_model_name}.py""" , )
shutil.move(
F"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" , F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py""" , )
else:
os.remove(F"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
os.remove(F"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" )
if output_flax:
if not self._testing:
remove_copy_lines(F"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
F"""{directory}/modeling_flax_{lowercase_model_name}.py""" , F"""{model_dir}/modeling_flax_{lowercase_model_name}.py""" , )
shutil.move(
F"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" , F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py""" , )
else:
os.remove(F"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
os.remove(F"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
F"""{directory}/{lowercase_model_name}.md""" , F"""{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md""" , )
shutil.move(
F"""{directory}/tokenization_{lowercase_model_name}.py""" , F"""{model_dir}/tokenization_{lowercase_model_name}.py""" , )
shutil.move(
F"""{directory}/tokenization_fast_{lowercase_model_name}.py""" , F"""{model_dir}/tokenization_{lowercase_model_name}_fast.py""" , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(_lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : List[str] ):
# Create temp file
A_ , A_ : Union[str, Any] = mkstemp()
A_ : List[Any] = False
with fdopen(_lowerCamelCase , '''w''' ) as new_file:
with open(_lowerCamelCase ) as old_file:
for line in old_file:
new_file.write(_lowerCamelCase )
if line_to_copy_below in line:
A_ : Union[str, Any] = True
for line_to_copy in lines_to_copy:
new_file.write(_lowerCamelCase )
if not line_found:
raise ValueError(F"""Line {line_to_copy_below} was not found in file.""" )
# Copy the file permissions from the old file to the new file
copymode(_lowerCamelCase , _lowerCamelCase )
# Remove original file
remove(_lowerCamelCase )
# Move new file
move(_lowerCamelCase , _lowerCamelCase )
def skip_units(_lowerCamelCase : str ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(_lowerCamelCase : Any ):
with open(_lowerCamelCase ) as datafile:
A_ : Optional[Any] = []
A_ : Optional[int] = False
A_ : str = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
A_ : Dict = line.split('''"''' )[1]
A_ : str = skip_units(_lowerCamelCase )
elif "# Below: " in line and "##" not in line:
A_ : Optional[int] = line.split('''"''' )[1]
A_ : Any = skip_units(_lowerCamelCase )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A_ : int = []
elif "# Replace with" in line and "##" not in line:
A_ : Any = []
elif "##" not in line:
lines_to_copy.append(_lowerCamelCase )
remove(_lowerCamelCase )
replace_in_files(F"""{directory}/to_replace_{lowercase_model_name}.py""" )
os.rmdir(_lowerCamelCase )
| 361
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
_lowerCamelCase : Any = False
class lowercase ( unittest.TestCase):
def a_ ( self : Tuple ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def a_ ( self : Any ):
"""simple docstring"""
return 12
@property
def a_ ( self : List[str] ):
"""simple docstring"""
return 12
@property
def a_ ( self : List[Any] ):
"""simple docstring"""
return 32
@property
def a_ ( self : Any ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : Union[str, Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def a_ ( self : List[Any] ):
"""simple docstring"""
A_ : int = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def a_ ( self : int ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(_lowerCamelCase )
@property
def a_ ( self : Optional[int] ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : Optional[Any] = 12
A_ : Optional[int] = 12
A_ : int = {
'''attention_bias''': True,
'''cross_attention_dim''': 32,
'''attention_head_dim''': height * width,
'''num_attention_heads''': 1,
'''num_vector_embeds''': self.num_embed,
'''num_embeds_ada_norm''': self.num_embeds_ada_norm,
'''norm_num_groups''': 32,
'''sample_size''': width,
'''activation_fn''': '''geglu-approximate''',
}
A_ : Tuple = TransformeraDModel(**_lowerCamelCase )
return model
def a_ ( self : Optional[int] ):
"""simple docstring"""
A_ : Union[str, Any] = '''cpu'''
A_ : Union[str, Any] = self.dummy_vqvae
A_ : str = self.dummy_text_encoder
A_ : List[Any] = self.dummy_tokenizer
A_ : int = self.dummy_transformer
A_ : Any = VQDiffusionScheduler(self.num_embed )
A_ : Optional[Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=_lowerCamelCase )
A_ : Dict = VQDiffusionPipeline(
vqvae=_lowerCamelCase , text_encoder=_lowerCamelCase , tokenizer=_lowerCamelCase , transformer=_lowerCamelCase , scheduler=_lowerCamelCase , learned_classifier_free_sampling_embeddings=_lowerCamelCase , )
A_ : List[Any] = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
A_ : List[Any] = '''teddy bear playing in the pool'''
A_ : List[Any] = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
A_ : List[Any] = pipe([prompt] , generator=_lowerCamelCase , num_inference_steps=2 , output_type='''np''' )
A_ : Any = output.images
A_ : List[str] = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
A_ : List[Any] = pipe(
[prompt] , generator=_lowerCamelCase , output_type='''np''' , return_dict=_lowerCamelCase , num_inference_steps=2 )[0]
A_ : Optional[int] = image[0, -3:, -3:, -1]
A_ : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
A_ : Optional[int] = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def a_ ( self : List[Any] ):
"""simple docstring"""
A_ : Union[str, Any] = '''cpu'''
A_ : int = self.dummy_vqvae
A_ : List[str] = self.dummy_text_encoder
A_ : Optional[Any] = self.dummy_tokenizer
A_ : Any = self.dummy_transformer
A_ : Any = VQDiffusionScheduler(self.num_embed )
A_ : Optional[int] = LearnedClassifierFreeSamplingEmbeddings(
learnable=_lowerCamelCase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
A_ : int = VQDiffusionPipeline(
vqvae=_lowerCamelCase , text_encoder=_lowerCamelCase , tokenizer=_lowerCamelCase , transformer=_lowerCamelCase , scheduler=_lowerCamelCase , learned_classifier_free_sampling_embeddings=_lowerCamelCase , )
A_ : List[Any] = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
A_ : Any = '''teddy bear playing in the pool'''
A_ : str = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
A_ : Optional[Any] = pipe([prompt] , generator=_lowerCamelCase , num_inference_steps=2 , output_type='''np''' )
A_ : Tuple = output.images
A_ : Optional[int] = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
A_ : List[str] = pipe(
[prompt] , generator=_lowerCamelCase , output_type='''np''' , return_dict=_lowerCamelCase , num_inference_steps=2 )[0]
A_ : Optional[int] = image[0, -3:, -3:, -1]
A_ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
A_ : str = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase):
def a_ ( self : Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self : str ):
"""simple docstring"""
A_ : str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''' )
A_ : int = VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''' )
A_ : Tuple = pipeline.to(_lowerCamelCase )
pipeline.set_progress_bar_config(disable=_lowerCamelCase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
A_ : Dict = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
A_ : Union[str, Any] = pipeline(
'''teddy bear playing in the pool''' , num_images_per_prompt=1 , generator=_lowerCamelCase , output_type='''np''' , )
A_ : Optional[int] = output.images[0]
assert image.shape == (2_56, 2_56, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 361
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
lowerCAmelCase : Dict = logging.get_logger(__name__)
lowerCAmelCase : List[str] = {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "layoutlmv3"
def __init__( self , snake_case__=5_0265 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.02 , snake_case__=1E-5 , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__=1024 , snake_case__=128 , snake_case__=128 , snake_case__=True , snake_case__=32 , snake_case__=128 , snake_case__=64 , snake_case__=256 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=224 , snake_case__=3 , snake_case__=16 , snake_case__=None , **snake_case__ , ):
'''simple docstring'''
super().__init__(
vocab_size=snake_case__ , hidden_size=snake_case__ , num_hidden_layers=snake_case__ , num_attention_heads=snake_case__ , intermediate_size=snake_case__ , hidden_act=snake_case__ , hidden_dropout_prob=snake_case__ , attention_probs_dropout_prob=snake_case__ , max_position_embeddings=snake_case__ , type_vocab_size=snake_case__ , initializer_range=snake_case__ , layer_norm_eps=snake_case__ , pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ , )
_lowerCAmelCase : List[Any] = max_ad_position_embeddings
_lowerCAmelCase : List[str] = coordinate_size
_lowerCAmelCase : int = shape_size
_lowerCAmelCase : int = has_relative_attention_bias
_lowerCAmelCase : Optional[Any] = rel_pos_bins
_lowerCAmelCase : int = max_rel_pos
_lowerCAmelCase : Optional[int] = has_spatial_attention_bias
_lowerCAmelCase : Tuple = rel_ad_pos_bins
_lowerCAmelCase : Optional[Any] = max_rel_ad_pos
_lowerCAmelCase : str = text_embed
_lowerCAmelCase : Optional[int] = visual_embed
_lowerCAmelCase : int = input_size
_lowerCAmelCase : Dict = num_channels
_lowerCAmelCase : Union[str, Any] = patch_size
_lowerCAmelCase : Optional[int] = classifier_dropout
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = version.parse("1.12" )
@property
def a ( self ):
'''simple docstring'''
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
] )
@property
def a ( self ):
'''simple docstring'''
return 1E-5
@property
def a ( self ):
'''simple docstring'''
return 12
def a ( self , snake_case__ , snake_case__ = -1 , snake_case__ = -1 , snake_case__ = False , snake_case__ = None , snake_case__ = 3 , snake_case__ = 40 , snake_case__ = 40 , ):
'''simple docstring'''
setattr(processor.image_processor , 'apply_ocr' , snake_case__ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowerCAmelCase : List[str] = compute_effective_axis_dimension(
snake_case__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowerCAmelCase : int = processor.tokenizer.num_special_tokens_to_add(snake_case__ )
_lowerCAmelCase : int = compute_effective_axis_dimension(
snake_case__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case__ )
# Generate dummy inputs according to compute batch and sequence
_lowerCAmelCase : List[Any] = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
_lowerCAmelCase : Dict = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
_lowerCAmelCase : Optional[Any] = self._generate_dummy_images(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
_lowerCAmelCase : Optional[Any] = dict(
processor(
snake_case__ , text=snake_case__ , boxes=snake_case__ , return_tensors=snake_case__ , ) )
return inputs
| 444
|
'''simple docstring'''
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
lowerCAmelCase : Optional[int] = datasets.logging.get_logger(__name__)
lowerCAmelCase : List[str] = """\
@inproceedings{bleurt,
title={BLEURT: Learning Robust Metrics for Text Generation},
author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},
booktitle={ACL},
year={2020},
url={https://arxiv.org/abs/2004.04696}
}
"""
lowerCAmelCase : List[Any] = """\
BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)
and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune
it for your specific application (the latter is expected to perform better).
See the project's README at https://github.com/google-research/bleurt#readme for more information.
"""
lowerCAmelCase : str = """
BLEURT score.
Args:
`predictions` (list of str): prediction/candidate sentences
`references` (list of str): reference sentences
`checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.
Returns:
'scores': List of scores.
Examples:
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> bleurt = datasets.load_metric(\"bleurt\")
>>> results = bleurt.compute(predictions=predictions, references=references)
>>> print([round(v, 2) for v in results[\"scores\"]])
[1.03, 1.04]
"""
lowerCAmelCase : Optional[Any] = {
"""bleurt-tiny-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip""",
"""bleurt-tiny-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip""",
"""bleurt-base-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip""",
"""bleurt-base-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip""",
"""bleurt-large-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip""",
"""bleurt-large-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip""",
"""BLEURT-20-D3""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip""",
"""BLEURT-20-D6""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip""",
"""BLEURT-20-D12""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip""",
"""BLEURT-20""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip""",
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/google-research/bleurt' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/bleurt'] , reference_urls=['https://github.com/google-research/bleurt', 'https://arxiv.org/abs/2004.04696'] , )
def a ( self , snake_case__ ):
'''simple docstring'''
if self.config_name == "default":
logger.warning(
'Using default BLEURT-Base checkpoint for sequence maximum length 128. '
'You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').' )
_lowerCAmelCase : Tuple = 'bleurt-base-128'
if self.config_name.lower() in CHECKPOINT_URLS:
_lowerCAmelCase : int = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
_lowerCAmelCase : str = self.config_name.upper()
else:
raise KeyError(
F'{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}' )
# download the model checkpoint specified by self.config_name and set up the scorer
_lowerCAmelCase : Optional[Any] = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
_lowerCAmelCase : str = score.BleurtScorer(os.path.join(snake_case__ , snake_case__ ) )
def a ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.scorer.score(references=snake_case__ , candidates=snake_case__ )
return {"scores": scores}
| 444
| 1
|
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCamelCase_ = '.'
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
UpperCamelCase_ = [
'Assert',
'AssignVariableOp',
'EmptyTensorList',
'MergeV2Checkpoints',
'ReadVariableOp',
'ResourceGather',
'RestoreV2',
'SaveV2',
'ShardedFilename',
'StatefulPartitionedCall',
'StaticRegexFullMatch',
'VarHandleOp',
]
def _UpperCAmelCase ( A , A , A ):
'''simple docstring'''
UpperCAmelCase__ =SavedModel()
UpperCAmelCase__ =[]
with open(os.path.join(A , "utils" , "tf_ops" , "onnx.json" ) ) as f:
UpperCAmelCase__ =json.load(A )["opsets"]
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(A )] )
with open(A , "rb" ) as f:
saved_model.ParseFromString(f.read() )
UpperCAmelCase__ =set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
UpperCAmelCase__ =sorted(A )
UpperCAmelCase__ =[]
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(A )
if strict and len(A ) > 0:
raise Exception(F"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops )
elif len(A ) > 0:
print(F"""Found the following incompatible ops for the opset {opset}:""" )
print(*A , sep="\n" )
else:
print(F"""The saved model {saved_model_path} can properly be converted with ONNX.""" )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument('--saved_model_path', help='Path of the saved model to check (the .pb file).')
parser.add_argument(
'--opset', default=12, type=int, help='The ONNX opset against which the model has to be tested.'
)
parser.add_argument(
'--framework', choices=['onnx'], default='onnx', help='Frameworks against which to test the saved model.'
)
parser.add_argument(
'--strict', action='store_true', help='Whether make the checking strict (raise errors) or not (raise warnings)'
)
UpperCamelCase_ = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 701
|
from __future__ import annotations
UpperCamelCase_ = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class snake_case_ :
'''simple docstring'''
def __init__( self, A_, A_ ) -> None:
UpperCAmelCase__ =graph
# mapping node to its parent in resulting breadth first tree
UpperCAmelCase__ ={}
UpperCAmelCase__ =source_vertex
def __UpperCAmelCase ( self ) -> None:
UpperCAmelCase__ ={self.source_vertex}
UpperCAmelCase__ =None
UpperCAmelCase__ =[self.source_vertex] # first in first out queue
while queue:
UpperCAmelCase__ =queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(A_ )
UpperCAmelCase__ =vertex
queue.append(A_ )
def __UpperCAmelCase ( self, A_ ) -> str:
if target_vertex == self.source_vertex:
return self.source_vertex
UpperCAmelCase__ =self.parent.get(A_ )
if target_vertex_parent is None:
UpperCAmelCase__ =(
f"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}"""
)
raise ValueError(A_ )
return self.shortest_path(A_ ) + f"""->{target_vertex}"""
if __name__ == "__main__":
UpperCamelCase_ = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 510
| 0
|
import math
import unittest
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
assert isinstance(__A , __A ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: Tuple ) -> List[Any]:
"""simple docstring"""
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def lowerCamelCase_ ( self: Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
with self.assertRaises(_lowercase ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , '''Zero doesn\'t have any positive factors, primes must have exactly two.''' , )
self.assertFalse(
is_prime(1 ) , '''One only has 1 positive factor, primes must have exactly two.''' , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 43
|
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
"stable diffusion controlnet",
"0.22.0",
"Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.",
standard_warn=False,
stacklevel=3,
)
| 475
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a :
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=32 , snake_case_=3 , snake_case_=4 , snake_case_=[10, 20, 30, 40] , snake_case_=[2, 2, 3, 2] , snake_case_=True , snake_case_=True , snake_case_=37 , snake_case_="gelu" , snake_case_=10 , snake_case_=0.02 , snake_case_=["stage2", "stage3", "stage4"] , snake_case_=[2, 3, 4] , snake_case_=None , ) -> str:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = num_stages
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = depths
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = num_labels
_UpperCAmelCase = initializer_range
_UpperCAmelCase = out_features
_UpperCAmelCase = out_indices
_UpperCAmelCase = scope
def __A ( self ) -> Any:
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def __A ( self ) -> Any:
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=snake_case_ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def __A ( self , snake_case_ , snake_case_ , snake_case_ ) -> Tuple:
_UpperCAmelCase = ConvNextVaModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = model(snake_case_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __A ( self , snake_case_ , snake_case_ , snake_case_ ) -> List[str]:
_UpperCAmelCase = ConvNextVaForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = model(snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self , snake_case_ , snake_case_ , snake_case_ ) -> Dict:
_UpperCAmelCase = ConvNextVaBackbone(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = model(snake_case_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_UpperCAmelCase = None
_UpperCAmelCase = ConvNextVaBackbone(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = model(snake_case_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __A ( self ) -> int:
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
def __A ( self ) -> List[Any]:
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class a ( _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, unittest.TestCase ):
"""simple docstring"""
A__ : Optional[Any] = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
A__ : Union[str, Any] = (
{"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
A__ : List[str] = False
A__ : Tuple = False
A__ : Tuple = False
A__ : Tuple = False
A__ : Any = False
def __A ( self ) -> Any:
_UpperCAmelCase = ConvNextVaModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=37 )
def __A ( self ) -> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __A ( self ) -> Optional[int]:
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds" )
def __A ( self ) -> int:
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings" )
def __A ( self ) -> Tuple:
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking" )
def __A ( self ) -> Union[str, Any]:
pass
def __A ( self ) -> Dict:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_with_labels()
_UpperCAmelCase = True
if model_class.__name__ in [
*get_values(snake_case_ ),
*get_values(snake_case_ ),
]:
continue
_UpperCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.train()
_UpperCAmelCase = self._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
_UpperCAmelCase = model(**snake_case_ ).loss
loss.backward()
def __A ( self ) -> Optional[int]:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_with_labels()
_UpperCAmelCase = False
_UpperCAmelCase = True
if (
model_class.__name__
in [*get_values(snake_case_ ), *get_values(snake_case_ )]
or not model_class.supports_gradient_checkpointing
):
continue
_UpperCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.gradient_checkpointing_enable()
model.train()
_UpperCAmelCase = self._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
_UpperCAmelCase = model(**snake_case_ ).loss
loss.backward()
def __A ( self ) -> List[Any]:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(snake_case_ )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case_ )
def __A ( self ) -> List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def __A ( self ) -> int:
def check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ ):
_UpperCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
_UpperCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCAmelCase = self.model_tester.num_stages
self.assertEqual(len(snake_case_ ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
def __A ( self ) -> Dict:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@slow
def __A ( self ) -> List[Any]:
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = ConvNextVaModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def A__ ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __A ( self ) -> int:
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None
@slow
def __A ( self ) -> Dict:
_UpperCAmelCase = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(snake_case_ )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = preprocessor(images=snake_case_ , return_tensors="pt" ).to(snake_case_ )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**snake_case_ )
# verify the logits
_UpperCAmelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , snake_case_ )
_UpperCAmelCase = torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1e-4 ) )
| 579
|
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
SCREAMING_SNAKE_CASE_ = False
class a ( unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ) -> int:
_UpperCAmelCase = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
_UpperCAmelCase = "A painting of a squirrel eating a burger "
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe(
prompt=snake_case_ , generator=snake_case_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(snake_case_ )
_UpperCAmelCase = VersatileDiffusionTextToImagePipeline.from_pretrained(snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
_UpperCAmelCase = generator.manual_seed(0 )
_UpperCAmelCase = pipe(
prompt=snake_case_ , generator=snake_case_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def __A ( self ) -> Optional[Any]:
_UpperCAmelCase = VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
_UpperCAmelCase = "A painting of a squirrel eating a burger "
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe(
prompt=snake_case_ , generator=snake_case_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
_UpperCAmelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCAmelCase = np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 579
| 1
|
'''simple docstring'''
def __lowercase ( __SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
if not all(x.isalpha() for x in string ):
raise ValueError("""String must only contain alphabetic characters.""" )
__a = sorted(string.lower() )
return len(__SCREAMING_SNAKE_CASE ) == len(set(__SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = input('Enter a string ').strip()
SCREAMING_SNAKE_CASE_ = is_isogram(input_str)
print(f"""{input_str} is {"an" if isogram else "not an"} isogram.""")
| 582
|
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase_ :
def __init__( self : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any]=13 , __lowerCamelCase : List[Any]=32 , __lowerCamelCase : List[str]=3 , __lowerCamelCase : Any=4 , __lowerCamelCase : str=[10, 20, 30, 40] , __lowerCamelCase : Any=[2, 2, 3, 2] , __lowerCamelCase : List[Any]=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : List[Any]=37 , __lowerCamelCase : int="gelu" , __lowerCamelCase : List[Any]=10 , __lowerCamelCase : Tuple=0.0_2 , __lowerCamelCase : str=["stage2", "stage3", "stage4"] , __lowerCamelCase : Optional[Any]=[2, 3, 4] , __lowerCamelCase : Dict=None , ):
snake_case__ : List[str] = parent
snake_case__ : Optional[Any] = batch_size
snake_case__ : int = image_size
snake_case__ : Tuple = num_channels
snake_case__ : Any = num_stages
snake_case__ : Any = hidden_sizes
snake_case__ : Optional[Any] = depths
snake_case__ : Optional[Any] = is_training
snake_case__ : Dict = use_labels
snake_case__ : Any = intermediate_size
snake_case__ : int = hidden_act
snake_case__ : Any = num_labels
snake_case__ : Optional[int] = initializer_range
snake_case__ : Union[str, Any] = out_features
snake_case__ : Optional[Any] = out_indices
snake_case__ : Optional[int] = scope
def _lowerCAmelCase ( self : List[Any] ):
snake_case__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : Optional[int] = None
if self.use_labels:
snake_case__ : Tuple = ids_tensor([self.batch_size] , self.num_labels )
snake_case__ : Any = self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase ( self : Tuple ):
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _lowerCAmelCase ( self : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple ):
snake_case__ : Optional[int] = ConvNextModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
snake_case__ : Union[str, Any] = model(__lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _lowerCAmelCase ( self : Any , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : List[Any] ):
snake_case__ : Optional[Any] = ConvNextForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
snake_case__ : Optional[Any] = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : int ):
snake_case__ : Dict = ConvNextBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
snake_case__ : str = model(__lowerCamelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
snake_case__ : List[str] = None
snake_case__ : Union[str, Any] = ConvNextBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
snake_case__ : int = model(__lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _lowerCAmelCase ( self : Tuple ):
snake_case__ : Dict = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ : Optional[Any] = config_and_inputs
snake_case__ : Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase_ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
A_ = (
{"feature-extraction": ConvNextModel, "image-classification": ConvNextForImageClassification}
if is_torch_available()
else {}
)
A_ = True
A_ = False
A_ = False
A_ = False
A_ = False
def _lowerCAmelCase ( self : Any ):
snake_case__ : Tuple = ConvNextModelTester(self )
snake_case__ : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def _lowerCAmelCase ( self : Optional[Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCAmelCase ( self : Dict ):
return
@unittest.skip(reason='ConvNext does not use inputs_embeds' )
def _lowerCAmelCase ( self : List[Any] ):
pass
@unittest.skip(reason='ConvNext does not support input and output embeddings' )
def _lowerCAmelCase ( self : Tuple ):
pass
@unittest.skip(reason='ConvNext does not use feedforward chunking' )
def _lowerCAmelCase ( self : List[Any] ):
pass
def _lowerCAmelCase ( self : str ):
snake_case__ , snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Dict = model_class(__lowerCamelCase )
snake_case__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Optional[Any] = [*signature.parameters.keys()]
snake_case__ : str = ['pixel_values']
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _lowerCAmelCase ( self : int ):
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _lowerCAmelCase ( self : int ):
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__lowerCamelCase )
def _lowerCAmelCase ( self : str ):
def check_hidden_states_output(__lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Tuple ):
snake_case__ : int = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
snake_case__ : int = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
snake_case__ : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case__ : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
snake_case__ , snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Dict = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : Tuple = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _lowerCAmelCase ( self : List[Any] ):
snake_case__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def _lowerCAmelCase ( self : str ):
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : Optional[int] = ConvNextModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def UpperCamelCase__ ( ) -> Optional[Any]:
snake_case__ : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase_ ( unittest.TestCase ):
@cached_property
def _lowerCAmelCase ( self : List[Any] ):
return AutoImageProcessor.from_pretrained('facebook/convnext-tiny-224' ) if is_vision_available() else None
@slow
def _lowerCAmelCase ( self : Optional[int] ):
snake_case__ : str = ConvNextForImageClassification.from_pretrained('facebook/convnext-tiny-224' ).to(__lowerCamelCase )
snake_case__ : str = self.default_image_processor
snake_case__ : List[str] = prepare_img()
snake_case__ : List[Any] = image_processor(images=__lowerCamelCase , return_tensors='pt' ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
snake_case__ : int = model(**__lowerCamelCase )
# verify the logits
snake_case__ : Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
snake_case__ : int = torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1E-4 ) )
@require_torch
class lowercase_ ( unittest.TestCase , lowerCAmelCase_ ):
A_ = (ConvNextBackbone,) if is_torch_available() else ()
A_ = ConvNextConfig
A_ = False
def _lowerCAmelCase ( self : List[Any] ):
snake_case__ : List[Any] = ConvNextModelTester(self )
| 270
| 0
|
import warnings
from .generation import TFGenerationMixin
class A ( UpperCamelCase_ ):
'''simple docstring'''
warnings.warn(
"Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will "
"be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead." , UpperCamelCase_ , )
| 712
|
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ ):
if n == 1 or not isinstance(snake_case__ , snake_case__ ):
return 0
elif n == 2:
return 1
else:
__UpperCamelCase : str = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Dict = 0
__UpperCamelCase : Any = 2
while digits < n:
index += 1
__UpperCamelCase : Dict = len(str(fibonacci(snake_case__ ) ) )
return index
def __lowerCAmelCase ( snake_case__ = 1_000 ):
return fibonacci_digits_index(snake_case__ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 399
| 0
|
'''simple docstring'''
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
A_ = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
A_ = [0, 25, 50]
A_ = [25, 50, 75]
A_ = fuzz.membership.trimf(X, abca)
A_ = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
A_ = np.ones(75)
A_ = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
A_ = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
A_ = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
A_ = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
A_ = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
A_ = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
A_ = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
A_ = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
A_ = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title("Young")
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title("Middle aged")
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title("union")
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title("intersection")
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title("complement_a")
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title("difference a/b")
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title("alg_sum")
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title("alg_product")
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title("bdd_sum")
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title("bdd_difference")
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 42
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A_ = {
"configuration_resnet": ["RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ResNetConfig", "ResNetOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"ResNetForImageClassification",
"ResNetModel",
"ResNetPreTrainedModel",
"ResNetBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFResNetForImageClassification",
"TFResNetModel",
"TFResNetPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"FlaxResNetForImageClassification",
"FlaxResNetModel",
"FlaxResNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 42
| 1
|
'''simple docstring'''
from typing import Dict, Optional
import numpy as np
import datasets
SCREAMING_SNAKE_CASE__ : Tuple = "\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n"
SCREAMING_SNAKE_CASE__ : Any = "\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric(\"mean_iou\")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n"
SCREAMING_SNAKE_CASE__ : Optional[int] = "\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}"
def a ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : str = None , UpperCamelCase_ : Any = False , ) -> Tuple:
if label_map is not None:
for old_id, new_id in label_map.items():
snake_case__ =new_id
# turn into Numpy arrays
snake_case__ =np.array(__lowerCAmelCase )
snake_case__ =np.array(__lowerCAmelCase )
if reduce_labels:
snake_case__ =255
snake_case__ =label - 1
snake_case__ =255
snake_case__ =label != ignore_index
snake_case__ =np.not_equal(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ =pred_label[mask]
snake_case__ =np.array(__lowerCAmelCase )[mask]
snake_case__ =pred_label[pred_label == label]
snake_case__ =np.histogram(__lowerCAmelCase , bins=__lowerCAmelCase , range=(0, num_labels - 1) )[0]
snake_case__ =np.histogram(__lowerCAmelCase , bins=__lowerCAmelCase , range=(0, num_labels - 1) )[0]
snake_case__ =np.histogram(__lowerCAmelCase , bins=__lowerCAmelCase , range=(0, num_labels - 1) )[0]
snake_case__ =area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : str , UpperCamelCase_ : List[str] = None , UpperCamelCase_ : Optional[Any] = False , ) -> List[str]:
snake_case__ =np.zeros((num_labels,) , dtype=np.floataa )
snake_case__ =np.zeros((num_labels,) , dtype=np.floataa )
snake_case__ =np.zeros((num_labels,) , dtype=np.floataa )
snake_case__ =np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(__lowerCAmelCase , __lowerCAmelCase ):
snake_case__ , snake_case__ , snake_case__ , snake_case__ =intersect_and_union(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def a ( UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Tuple = None , UpperCamelCase_ : List[Any] = None , UpperCamelCase_ : int = False , ) -> Tuple:
snake_case__ , snake_case__ , snake_case__ , snake_case__ =total_intersect_and_union(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# compute metrics
snake_case__ ={}
snake_case__ =total_area_intersect.sum() / total_area_label.sum()
snake_case__ =total_area_intersect / total_area_union
snake_case__ =total_area_intersect / total_area_label
snake_case__ =np.nanmean(__lowerCAmelCase )
snake_case__ =np.nanmean(__lowerCAmelCase )
snake_case__ =all_acc
snake_case__ =iou
snake_case__ =acc
if nan_to_num is not None:
snake_case__ ={metric: np.nan_to_num(__lowerCAmelCase , nan=__lowerCAmelCase ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__( datasets.Metric ):
def _lowercase ( self ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'predictions': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
'references': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
} ) , reference_urls=[
'https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'
] , )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , ) -> List[str]:
snake_case__ =mean_iou(
results=lowerCAmelCase_ , gt_seg_maps=lowerCAmelCase_ , num_labels=lowerCAmelCase_ , ignore_index=lowerCAmelCase_ , nan_to_num=lowerCAmelCase_ , label_map=lowerCAmelCase_ , reduce_labels=lowerCAmelCase_ , )
return iou_result
| 712
|
'''simple docstring'''
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def a ( UpperCamelCase_ : str , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple ) -> Optional[int]:
# Initialise PyTorch model
snake_case__ =TaConfig.from_json_file(UpperCamelCase_ )
print(f"""Building PyTorch model from configuration: {config}""" )
snake_case__ =TaForConditionalGeneration(UpperCamelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
SCREAMING_SNAKE_CASE__ : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 581
| 0
|
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
__lowerCAmelCase : Optional[Any] = {"UserAgent": UserAgent().random}
def UpperCAmelCase_ ( __lowerCAmelCase ) -> dict:
__lowercase : List[str] = script.contents[0]
__lowercase : str = json.loads(data[data.find('''{"config"''' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[str] , _snake_case : List[str] ):
__lowercase : Tuple = F'https://www.instagram.com/{username}/'
__lowercase : Any = self.get_json()
def snake_case_ ( self : Optional[int] ):
__lowercase : Optional[Any] = requests.get(self.url , headers=_snake_case ).text
__lowercase : Optional[Any] = BeautifulSoup(_snake_case , '''html.parser''' ).find_all('''script''' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : Union[str, Any] ):
return F'{self.__class__.__name__}(\'{self.username}\')'
def __str__( self : Optional[Any] ):
return F'{self.fullname} ({self.username}) is {self.biography}'
@property
def snake_case_ ( self : Union[str, Any] ):
return self.user_data["username"]
@property
def snake_case_ ( self : Tuple ):
return self.user_data["full_name"]
@property
def snake_case_ ( self : int ):
return self.user_data["biography"]
@property
def snake_case_ ( self : List[Any] ):
return self.user_data["business_email"]
@property
def snake_case_ ( self : Dict ):
return self.user_data["external_url"]
@property
def snake_case_ ( self : str ):
return self.user_data["edge_followed_by"]["count"]
@property
def snake_case_ ( self : Optional[Any] ):
return self.user_data["edge_follow"]["count"]
@property
def snake_case_ ( self : Union[str, Any] ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def snake_case_ ( self : Optional[int] ):
return self.user_data["profile_pic_url_hd"]
@property
def snake_case_ ( self : Any ):
return self.user_data["is_verified"]
@property
def snake_case_ ( self : Optional[int] ):
return self.user_data["is_private"]
def UpperCAmelCase_ ( __lowerCAmelCase = "github" ) -> None:
import os
if os.environ.get('''CI''' ):
return # test failing on GitHub Actions
__lowercase : Dict = InstagramUser(__lowerCAmelCase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , __lowerCAmelCase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120_000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('''https://instagram.''' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : Optional[Any] = InstagramUser("github")
print(instagram_user)
print(F'{instagram_user.number_of_posts = }')
print(F'{instagram_user.number_of_followers = }')
print(F'{instagram_user.number_of_followings = }')
print(F'{instagram_user.email = }')
print(F'{instagram_user.website = }')
print(F'{instagram_user.profile_picture_url = }')
print(F'{instagram_user.is_verified = }')
print(F'{instagram_user.is_private = }')
| 509
|
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : Any = logging.get_logger(__name__)
__lowerCAmelCase : Optional[Any] = {
"vocab_file": "vocab.json",
"tokenizer_config_file": "tokenizer_config.json",
"merges_file": "merges.txt",
}
__lowerCAmelCase : List[str] = {
"vocab_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"
),
},
"tokenizer_config_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"
),
},
"merges_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"
),
},
}
__lowerCAmelCase : Union[str, Any] = "</w>"
__lowerCAmelCase : str = "@@ "
def UpperCAmelCase_ ( __lowerCAmelCase ) -> Tuple:
__lowercase : List[str] = set()
__lowercase : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowercase : Tuple = char
return pairs
# Speech2Text2 has no max input length
__lowerCAmelCase : Optional[Any] = {"facebook/s2t-wav2vec2-large-en-de": 1_024}
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
A__ : Union[str, Any] = VOCAB_FILES_NAMES
A__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : List[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : Optional[Any]="<s>" , _snake_case : Tuple="<pad>" , _snake_case : int="</s>" , _snake_case : Optional[Any]="<unk>" , _snake_case : Optional[Any]=False , _snake_case : List[Any]=None , **_snake_case : List[str] , ):
super().__init__(
unk_token=_snake_case , bos_token=_snake_case , eos_token=_snake_case , pad_token=_snake_case , do_lower_case=_snake_case , **_snake_case , )
__lowercase : Optional[int] = do_lower_case
with open(_snake_case , encoding='''utf-8''' ) as vocab_handle:
__lowercase : Optional[Any] = json.load(_snake_case )
__lowercase : List[str] = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F'No merges files provided. {self.__class__.__name__} can only be used for decoding.' )
__lowercase : List[str] = None
__lowercase : Union[str, Any] = None
else:
with open(_snake_case , encoding='''utf-8''' ) as merges_handle:
__lowercase : Optional[int] = merges_handle.read().split('''\n''' )[:-1]
__lowercase : Any = [tuple(merge.split()[:2] ) for merge in merges]
__lowercase : Optional[int] = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
__lowercase : List[Any] = {}
@property
def snake_case_ ( self : str ):
return len(self.decoder )
def snake_case_ ( self : Tuple ):
return dict(self.encoder , **self.added_tokens_encoder )
def snake_case_ ( self : int , _snake_case : int ):
__lowercase : Optional[Any] = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
__lowercase : Any = get_pairs(_snake_case )
if not pairs:
return token
while True:
__lowercase : str = min(_snake_case , key=lambda _snake_case : self.bpe_ranks.get(_snake_case , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
__lowercase , __lowercase : List[Any] = bigram
__lowercase : Optional[int] = []
__lowercase : List[str] = 0
while i < len(_snake_case ):
try:
__lowercase : int = word.index(_snake_case , _snake_case )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowercase : Union[str, Any] = j
if word[i] == first and i < len(_snake_case ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowercase : Union[str, Any] = tuple(_snake_case )
__lowercase : Dict = new_word
if len(_snake_case ) == 1:
break
else:
__lowercase : List[Any] = get_pairs(_snake_case )
__lowercase : List[Any] = ''' '''.join(_snake_case )
if word == "\n " + BPE_TOKEN_MERGES:
__lowercase : Optional[int] = '''\n''' + BPE_TOKEN_MERGES
if word.endswith(_snake_case ):
__lowercase : str = word.replace(_snake_case , '''''' )
__lowercase : List[str] = word.replace(''' ''' , _snake_case )
__lowercase : Union[str, Any] = word
return word
def snake_case_ ( self : List[str] , _snake_case : List[str] ):
if self.bpe_ranks is None:
raise ValueError(
'''This tokenizer was instantiated without a `merges.txt` file, so'''
''' that it can only be used for decoding, not for encoding.'''
'''Make sure to provide `merges.txt` file at instantiation to enable '''
'''encoding.''' )
if self.do_lower_case:
__lowercase : Dict = text.lower()
__lowercase : str = text.split()
__lowercase : Optional[int] = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(_snake_case ).split(''' ''' ) ) )
return split_tokens
def snake_case_ ( self : int , _snake_case : str ):
return self.encoder.get(_snake_case , self.encoder.get(self.unk_token ) )
def snake_case_ ( self : Optional[Any] , _snake_case : int ):
__lowercase : Dict = self.decoder.get(_snake_case , self.unk_token )
return result
def snake_case_ ( self : Union[str, Any] , _snake_case : List[str] ):
__lowercase : Tuple = ''' '''.join(_snake_case )
# make sure @@ tokens are concatenated
__lowercase : str = ''''''.join(string.split(_snake_case ) )
return string
def snake_case_ ( self : Tuple , _snake_case : str , _snake_case : Optional[str] = None ):
if not os.path.isdir(_snake_case ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__lowercase : int = os.path.join(
_snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__lowercase : Optional[Any] = os.path.join(
_snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_snake_case , ensure_ascii=_snake_case ) + '''\n''' )
__lowercase : Any = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _snake_case : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
__lowercase : Optional[int] = token_index
writer.write(''' '''.join(_snake_case ) + '''\n''' )
index += 1
return (vocab_file, merges_file)
| 509
| 1
|
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : bytes ):
return "".join([hex(UpperCAmelCase_ )[2:].zfill(2 ).upper() for byte in list(UpperCAmelCase_ )] )
def _snake_case ( UpperCAmelCase_ : str ):
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(UpperCAmelCase_ ) % 2) != 0:
raise ValueError(
"""Base16 encoded data is invalid:
Data does not have an even number of hex digits.""" )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(UpperCAmelCase_ ) <= set("""0123456789ABCDEF""" ):
raise ValueError(
"""Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.""" )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(UpperCAmelCase_ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 500
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
SCREAMING_SNAKE_CASE_ : List[Any] = logging.get_logger(__name__)
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Tuple , *UpperCamelCase: Optional[int] , **UpperCamelCase: Tuple ):
"""simple docstring"""
warnings.warn(
"""The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use VideoMAEImageProcessor instead.""" , UpperCamelCase , )
super().__init__(*UpperCamelCase , **UpperCamelCase )
| 500
| 1
|
"""simple docstring"""
from __future__ import annotations
a_ = [True] * 1_0_0_0_0_0_1
a_ = 2
while i * i <= 1_0_0_0_0_0_0:
if seive[i]:
for j in range(i * i, 1_0_0_0_0_0_1, i):
a_ = False
i += 1
def __UpperCAmelCase ( __UpperCamelCase ):
return seive[n]
def __UpperCAmelCase ( __UpperCamelCase ):
return any(digit in '''02468''' for digit in str(__UpperCamelCase ) )
def __UpperCAmelCase ( __UpperCamelCase = 1_00_00_00 ):
__lowercase : int = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(__UpperCamelCase ) and not contains_an_even_digit(__UpperCamelCase ):
__lowercase : Dict = str(__UpperCamelCase )
__lowercase : Dict = [int(str_num[j:] + str_num[:j] ) for j in range(len(__UpperCamelCase ) )]
if all(is_prime(__UpperCamelCase ) for i in list_nums ):
result.append(__UpperCamelCase )
return result
def __UpperCAmelCase ( ):
return len(find_circular_primes() )
if __name__ == "__main__":
print(F"{len(find_circular_primes()) = }")
| 76
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Dict = logging.get_logger(__name__)
a_ : Union[str, Any] = {
'edbeeching/decision-transformer-gym-hopper-medium': (
'https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class _snake_case ( A__ ):
_lowercase : Optional[Any] = '''decision_transformer'''
_lowercase : str = ['''past_key_values''']
_lowercase : Union[str, Any] = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , a=17 , a=4 , a=128 , a=4096 , a=True , a=1 , a=1024 , a=3 , a=1 , a=None , a="relu" , a=0.1 , a=0.1 , a=0.1 , a=1E-5 , a=0.02 , a=True , a=True , a=5_0256 , a=5_0256 , a=False , a=False , **a , ) -> List[str]:
SCREAMING_SNAKE_CASE = state_dim
SCREAMING_SNAKE_CASE = act_dim
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = max_ep_len
SCREAMING_SNAKE_CASE = action_tanh
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = n_positions
SCREAMING_SNAKE_CASE = n_layer
SCREAMING_SNAKE_CASE = n_head
SCREAMING_SNAKE_CASE = n_inner
SCREAMING_SNAKE_CASE = activation_function
SCREAMING_SNAKE_CASE = resid_pdrop
SCREAMING_SNAKE_CASE = embd_pdrop
SCREAMING_SNAKE_CASE = attn_pdrop
SCREAMING_SNAKE_CASE = layer_norm_epsilon
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = scale_attn_weights
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = scale_attn_by_inverse_layer_idx
SCREAMING_SNAKE_CASE = reorder_and_upcast_attn
SCREAMING_SNAKE_CASE = bos_token_id
SCREAMING_SNAKE_CASE = eos_token_id
super().__init__(bos_token_id=a , eos_token_id=a , **a)
| 73
| 0
|
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = ''''''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
__SCREAMING_SNAKE_CASE : str = remove_duplicates(key.upper() )
__SCREAMING_SNAKE_CASE : List[str] = len(snake_case )
# First fill cipher with key characters
__SCREAMING_SNAKE_CASE : str = {alphabet[i]: char for i, char in enumerate(snake_case )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(snake_case ) , 26 ):
__SCREAMING_SNAKE_CASE : Optional[Any] = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
__SCREAMING_SNAKE_CASE : Optional[int] = alphabet[i - offset]
__SCREAMING_SNAKE_CASE : Union[str, Any] = char
return cipher_alphabet
def a__ ( snake_case , snake_case ):
"""simple docstring"""
return "".join(cipher_map.get(snake_case , snake_case ) for ch in message.upper() )
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(snake_case , snake_case ) for ch in message.upper() )
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = input('''Enter message to encode or decode: ''' ).strip()
__SCREAMING_SNAKE_CASE : Tuple = input('''Enter keyword: ''' ).strip()
__SCREAMING_SNAKE_CASE : str = input('''Encipher or decipher? E/D:''' ).strip()[0].lower()
try:
__SCREAMING_SNAKE_CASE : List[str] = {'''e''': encipher, '''d''': decipher}[option]
except KeyError:
raise KeyError('''invalid input option''' )
__SCREAMING_SNAKE_CASE : int = create_cipher_map(snake_case )
print(func(snake_case , snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 131
|
from __future__ import annotations
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = str(snake_case )
return n == n[::-1]
def a__ ( snake_case = 1_000_000 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = 0
for i in range(1 , snake_case ):
if is_palindrome(snake_case ) and is_palindrome(bin(snake_case ).split('''b''' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 131
| 1
|
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__A = "pt"
elif is_tf_available():
__A = "tf"
else:
__A = "jax"
class _A ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : List[str] = ByTaTokenizer
lowerCamelCase : Union[str, Any] = False
def _a ( self : Dict ) -> List[str]:
super().setUp()
__UpperCAmelCase =ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _a ( self : int ) -> Optional[int]:
return ByTaTokenizer.from_pretrained("""google/byt5-small""" )
def _a ( self : Union[str, Any] , **__SCREAMING_SNAKE_CASE : Optional[Any] ) -> ByTaTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def _a ( self : Dict , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any]=False , __SCREAMING_SNAKE_CASE : Optional[int]=20 , __SCREAMING_SNAKE_CASE : Union[str, Any]=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
__UpperCAmelCase =[]
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
try:
__UpperCAmelCase =tokenizer.decode([i] , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__UpperCAmelCase =list(filter(lambda __SCREAMING_SNAKE_CASE : re.match(R"""^[ a-zA-Z]+$""" , t[1] ) , __SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase =list(filter(lambda __SCREAMING_SNAKE_CASE : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) )
if max_length is not None and len(__SCREAMING_SNAKE_CASE ) > max_length:
__UpperCAmelCase =toks[:max_length]
if min_length is not None and len(__SCREAMING_SNAKE_CASE ) < min_length and len(__SCREAMING_SNAKE_CASE ) > 0:
while len(__SCREAMING_SNAKE_CASE ) < min_length:
__UpperCAmelCase =toks + toks
# toks_str = [t[1] for t in toks]
__UpperCAmelCase =[t[0] for t in toks]
# Ensure consistency
__UpperCAmelCase =tokenizer.decode(__SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE )
if " " not in output_txt and len(__SCREAMING_SNAKE_CASE ) > 1:
__UpperCAmelCase =(
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE )
)
if with_prefix_space:
__UpperCAmelCase =""" """ + output_txt
__UpperCAmelCase =tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
return output_txt, output_ids
def _a ( self : str ) -> Optional[int]:
__UpperCAmelCase =self.ta_base_tokenizer
__UpperCAmelCase =tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] )
__UpperCAmelCase =tokenizer(["""hi""", """I went to the gym""", """"""] )
self.assertListEqual(batch_with_eos_added["""input_ids"""] , batch_without_eos_added["""input_ids"""] )
def _a ( self : Union[str, Any] ) -> str:
__UpperCAmelCase =self.ta_base_tokenizer
__UpperCAmelCase ="""Unicode €."""
__UpperCAmelCase =tokenizer(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =[88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["""input_ids"""] , __SCREAMING_SNAKE_CASE )
# decoding
__UpperCAmelCase =tokenizer.decode(__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , """Unicode €.</s>""" )
__UpperCAmelCase =tokenizer("""e è é ê ë""" )
__UpperCAmelCase =[104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["""input_ids"""] , __SCREAMING_SNAKE_CASE )
# decoding
__UpperCAmelCase =tokenizer.decode(__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , """e è é ê ë</s>""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) , """e è é ê ë</s>""" )
def _a ( self : Any ) -> Union[str, Any]:
__UpperCAmelCase =self.ta_base_tokenizer
__UpperCAmelCase =["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
__UpperCAmelCase =[68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
__UpperCAmelCase =tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if FRAMEWORK != "jax":
__UpperCAmelCase =list(batch.input_ids.numpy()[0] )
else:
__UpperCAmelCase =list(batch.input_ids.tolist()[0] )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def _a ( self : int ) -> List[Any]:
__UpperCAmelCase =self.ta_base_tokenizer
__UpperCAmelCase =["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__UpperCAmelCase =tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""" , __SCREAMING_SNAKE_CASE )
self.assertIn("""attention_mask""" , __SCREAMING_SNAKE_CASE )
self.assertNotIn("""decoder_input_ids""" , __SCREAMING_SNAKE_CASE )
self.assertNotIn("""decoder_attention_mask""" , __SCREAMING_SNAKE_CASE )
def _a ( self : Optional[Any] ) -> Optional[Any]:
__UpperCAmelCase =self.ta_base_tokenizer
__UpperCAmelCase =[
"""Summary of the text.""",
"""Another summary.""",
]
__UpperCAmelCase =tokenizer(
text_target=__SCREAMING_SNAKE_CASE , max_length=32 , padding="""max_length""" , truncation=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def _a ( self : int ) -> Tuple:
__UpperCAmelCase =self.ta_base_tokenizer
__UpperCAmelCase =["""A long paragraph for summarization. </s>"""]
__UpperCAmelCase =["""Summary of the text. </s>"""]
# fmt: off
__UpperCAmelCase =[68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
__UpperCAmelCase =[86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
__UpperCAmelCase =tokenizer(__SCREAMING_SNAKE_CASE , text_target=__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , batch["""input_ids"""][0] )
self.assertEqual(__SCREAMING_SNAKE_CASE , batch["""labels"""][0] )
def _a ( self : Union[str, Any] ) -> List[str]:
# safety check on max_len default value so we are sure the test works
__UpperCAmelCase =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__UpperCAmelCase =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__UpperCAmelCase =tempfile.mkdtemp()
__UpperCAmelCase =""" He is very happy, UNwant\u00E9d,running"""
__UpperCAmelCase =tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =tokenizer.__class__.from_pretrained(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =after_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
shutil.rmtree(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__UpperCAmelCase =tempfile.mkdtemp()
__UpperCAmelCase =""" He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
__UpperCAmelCase =tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
__UpperCAmelCase =tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =tokenizer.__class__.from_pretrained(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =after_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertIn("""new_additional_special_token""" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__UpperCAmelCase =tokenizer.__class__.from_pretrained(__SCREAMING_SNAKE_CASE , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__SCREAMING_SNAKE_CASE )
def _a ( self : int ) -> Any:
__UpperCAmelCase =[]
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__SCREAMING_SNAKE_CASE )
with open(os.path.join(__SCREAMING_SNAKE_CASE , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
__UpperCAmelCase =json.load(__SCREAMING_SNAKE_CASE )
with open(os.path.join(__SCREAMING_SNAKE_CASE , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
__UpperCAmelCase =json.load(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =[f'''<extra_id_{i}>''' for i in range(125 )]
__UpperCAmelCase =added_tokens_extra_ids + [
"""an_additional_special_token"""
]
__UpperCAmelCase =added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(__SCREAMING_SNAKE_CASE , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
with open(os.path.join(__SCREAMING_SNAKE_CASE , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__UpperCAmelCase =tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , )
self.assertIn(
"""an_additional_special_token""" , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["""an_additional_special_token"""] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__UpperCAmelCase =added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" , lstrip=__SCREAMING_SNAKE_CASE )]
__UpperCAmelCase =tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , )
self.assertIn("""a_new_additional_special_token""" , tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) , )
def _a ( self : str ) -> List[Any]:
__UpperCAmelCase =[]
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertTrue(tokenizer.decode([255] ) == """""" )
def _a ( self : Union[str, Any] ) -> Optional[Any]:
pass
def _a ( self : Optional[Any] ) -> List[Any]:
pass
def _a ( self : Optional[Any] ) -> int:
pass
def _a ( self : List[Any] ) -> int:
pass
def _a ( self : Any ) -> Optional[int]:
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
__UpperCAmelCase =self.get_tokenizers(fast=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase =["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""]
__UpperCAmelCase =tokenizer.convert_tokens_to_string(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _a ( self : List[Any] ) -> Any:
__UpperCAmelCase =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase =[
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
__UpperCAmelCase =0
__UpperCAmelCase =tokenizer.convert_ids_to_tokens(
__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
for attr in attributes_list:
setattr(__SCREAMING_SNAKE_CASE , attr + """_id""" , __SCREAMING_SNAKE_CASE )
self.assertEqual(getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(getattr(__SCREAMING_SNAKE_CASE , attr + """_id""" ) , __SCREAMING_SNAKE_CASE )
setattr(__SCREAMING_SNAKE_CASE , attr + """_id""" , __SCREAMING_SNAKE_CASE )
self.assertEqual(getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(getattr(__SCREAMING_SNAKE_CASE , attr + """_id""" ) , __SCREAMING_SNAKE_CASE )
setattr(__SCREAMING_SNAKE_CASE , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(__SCREAMING_SNAKE_CASE , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(__SCREAMING_SNAKE_CASE , """additional_special_tokens_ids""" ) , [] )
setattr(__SCREAMING_SNAKE_CASE , """additional_special_tokens_ids""" , [token_id_to_test_setters] )
self.assertListEqual(getattr(__SCREAMING_SNAKE_CASE , """additional_special_tokens""" ) , [token_to_test_setters] )
self.assertListEqual(getattr(__SCREAMING_SNAKE_CASE , """additional_special_tokens_ids""" ) , [token_id_to_test_setters] )
| 68
|
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
__A = TypeVar("T")
def lowercase__ ( A_: int ) -> int:
"""simple docstring"""
return (position - 1) // 2
def lowercase__ ( A_: int ) -> int:
"""simple docstring"""
return (2 * position) + 1
def lowercase__ ( A_: int ) -> int:
"""simple docstring"""
return (2 * position) + 2
class _A ( Generic[T] ):
"""simple docstring"""
def __init__( self : List[str] ) -> None:
__UpperCAmelCase =[]
__UpperCAmelCase ={}
__UpperCAmelCase =0
def __len__( self : str ) -> int:
return self.elements
def __repr__( self : Dict ) -> str:
return str(self.heap )
def _a ( self : Optional[int] ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
__UpperCAmelCase =self.elements
self.elements += 1
self._bubble_up(__SCREAMING_SNAKE_CASE )
def _a ( self : Optional[int] ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
__UpperCAmelCase , __UpperCAmelCase =self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
__UpperCAmelCase , __UpperCAmelCase =self.heap[0]
self._bubble_down(__SCREAMING_SNAKE_CASE )
return elem
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None:
# Update the weight of the given key
__UpperCAmelCase =self.position_map[elem]
__UpperCAmelCase =(elem, weight)
if position > 0:
__UpperCAmelCase =get_parent_position(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase , __UpperCAmelCase =self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(__SCREAMING_SNAKE_CASE )
else:
self._bubble_down(__SCREAMING_SNAKE_CASE )
else:
self._bubble_down(__SCREAMING_SNAKE_CASE )
def _a ( self : Any , __SCREAMING_SNAKE_CASE : T ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
__UpperCAmelCase =self.position_map[elem]
if curr_pos == 0:
return None
__UpperCAmelCase =get_parent_position(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase , __UpperCAmelCase =self.heap[curr_pos]
__UpperCAmelCase , __UpperCAmelCase =self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return self._bubble_up(__SCREAMING_SNAKE_CASE )
return None
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : T ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
__UpperCAmelCase =self.position_map[elem]
__UpperCAmelCase , __UpperCAmelCase =self.heap[curr_pos]
__UpperCAmelCase =get_child_left_position(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =get_child_right_position(__SCREAMING_SNAKE_CASE )
if child_left_position < self.elements and child_right_position < self.elements:
__UpperCAmelCase , __UpperCAmelCase =self.heap[child_left_position]
__UpperCAmelCase , __UpperCAmelCase =self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return self._bubble_down(__SCREAMING_SNAKE_CASE )
if child_left_position < self.elements:
__UpperCAmelCase , __UpperCAmelCase =self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return self._bubble_down(__SCREAMING_SNAKE_CASE )
else:
return None
if child_right_position < self.elements:
__UpperCAmelCase , __UpperCAmelCase =self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return self._bubble_down(__SCREAMING_SNAKE_CASE )
return None
def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> None:
# Swap the nodes at the given positions
__UpperCAmelCase =self.heap[nodea_pos][0]
__UpperCAmelCase =self.heap[nodea_pos][0]
__UpperCAmelCase , __UpperCAmelCase =(
self.heap[nodea_pos],
self.heap[nodea_pos],
)
__UpperCAmelCase =nodea_pos
__UpperCAmelCase =nodea_pos
class _A ( Generic[T] ):
"""simple docstring"""
def __init__( self : List[Any] ) -> None:
__UpperCAmelCase ={}
__UpperCAmelCase =0
def __repr__( self : Tuple ) -> str:
return str(self.connections )
def __len__( self : str ) -> int:
return self.nodes
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : T ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
__UpperCAmelCase ={}
self.nodes += 1
def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(__SCREAMING_SNAKE_CASE )
self.add_node(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =weight
__UpperCAmelCase =weight
def lowercase__ ( A_: GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]:
"""simple docstring"""
__UpperCAmelCase ={node: maxsize for node in graph.connections}
__UpperCAmelCase ={node: None for node in graph.connections}
__UpperCAmelCase =MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(A_ , A_ )
if priority_queue.is_empty():
return dist, parent
# initialization
__UpperCAmelCase =priority_queue.extract_min()
__UpperCAmelCase =0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__UpperCAmelCase =dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(A_ , dist[neighbour] )
__UpperCAmelCase =node
# running prim's algorithm
while not priority_queue.is_empty():
__UpperCAmelCase =priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__UpperCAmelCase =dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(A_ , dist[neighbour] )
__UpperCAmelCase =node
return dist, parent
| 68
| 1
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
SCREAMING_SNAKE_CASE_ = {
'''vocab_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'''
),
},
'''tokenizer_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''',
'''roberta-base-openai-detector''': (
'''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'''
),
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'''
),
},
}
SCREAMING_SNAKE_CASE_ = {
'''roberta-base''': 512,
'''roberta-large''': 512,
'''roberta-large-mnli''': 512,
'''distilroberta-base''': 512,
'''roberta-base-openai-detector''': 512,
'''roberta-large-openai-detector''': 512,
}
class a ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
A__ : List[Any] = VOCAB_FILES_NAMES
A__ : Any = PRETRAINED_VOCAB_FILES_MAP
A__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Tuple = ["input_ids", "attention_mask"]
A__ : List[Any] = RobertaTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_="replace" , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_="<mask>" , snake_case_=False , snake_case_=True , **snake_case_ , ) -> str:
super().__init__(
snake_case_ , snake_case_ , tokenizer_file=snake_case_ , errors=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , add_prefix_space=snake_case_ , trim_offsets=snake_case_ , **snake_case_ , )
_UpperCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , snake_case_ ) != add_prefix_space:
_UpperCAmelCase = getattr(snake_case_ , pre_tok_state.pop("type" ) )
_UpperCAmelCase = add_prefix_space
_UpperCAmelCase = pre_tok_class(**snake_case_ )
_UpperCAmelCase = add_prefix_space
_UpperCAmelCase = "post_processor"
_UpperCAmelCase = getattr(self.backend_tokenizer , snake_case_ , snake_case_ )
if tokenizer_component_instance:
_UpperCAmelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_UpperCAmelCase = tuple(state["sep"] )
if "cls" in state:
_UpperCAmelCase = tuple(state["cls"] )
_UpperCAmelCase = False
if state.get("add_prefix_space" , snake_case_ ) != add_prefix_space:
_UpperCAmelCase = add_prefix_space
_UpperCAmelCase = True
if state.get("trim_offsets" , snake_case_ ) != trim_offsets:
_UpperCAmelCase = trim_offsets
_UpperCAmelCase = True
if changes_to_apply:
_UpperCAmelCase = getattr(snake_case_ , state.pop("type" ) )
_UpperCAmelCase = component_class(**snake_case_ )
setattr(self.backend_tokenizer , snake_case_ , snake_case_ )
@property
def __A ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def __A ( self , snake_case_ ) -> List[Any]:
_UpperCAmelCase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else value
_UpperCAmelCase = value
def __A ( self , *snake_case_ , **snake_case_ ) -> BatchEncoding:
_UpperCAmelCase = kwargs.get("is_split_into_words" , snake_case_ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*snake_case_ , **snake_case_ )
def __A ( self , *snake_case_ , **snake_case_ ) -> BatchEncoding:
_UpperCAmelCase = kwargs.get("is_split_into_words" , snake_case_ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*snake_case_ , **snake_case_ )
def __A ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
_UpperCAmelCase = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
def __A ( self , snake_case_ , snake_case_=None ) -> Tuple:
_UpperCAmelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __A ( self , snake_case_ , snake_case_ = None ) -> List[int]:
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 579
|
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"files" , [
["full:README.md", "dataset_infos.json"],
["empty:README.md", "dataset_infos.json"],
["dataset_infos.json"],
["full:README.md"],
] , )
def A__ ( A__ , A__ ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = tmp_path_factory.mktemp("dset_infos_dir" )
if "full:README.md" in files:
with open(dataset_infos_dir / "README.md" , "w" ) as f:
f.write("---\ndataset_info:\n dataset_size: 42\n---" )
if "empty:README.md" in files:
with open(dataset_infos_dir / "README.md" , "w" ) as f:
f.write("" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / "dataset_infos.json" , "w" ) as f:
f.write("{\"default\": {\"dataset_size\": 42}}" )
_UpperCAmelCase = DatasetInfosDict.from_directory(A__ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
"dataset_info" , [
DatasetInfo(),
DatasetInfo(
description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=42 , ),
] , )
def A__ ( A__ , A__ ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = str(A__ )
dataset_info.write_to_directory(A__ )
_UpperCAmelCase = DatasetInfo.from_directory(A__ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(A__ , "dataset_info.json" ) )
def A__ ( ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = DatasetInfo(
description="foo" , citation="bar" , homepage="https://foo.bar" , license="CC0" , features=Features({"a": Value("int32" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train", "num_examples": 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
_UpperCAmelCase = dataset_info._to_yaml_dict()
assert sorted(A__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
_UpperCAmelCase = yaml.safe_dump(A__ )
_UpperCAmelCase = yaml.safe_load(A__ )
assert dataset_info_yaml_dict == reloaded
def A__ ( ) -> int:
'''simple docstring'''
_UpperCAmelCase = DatasetInfo()
_UpperCAmelCase = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"dataset_infos_dict" , [
DatasetInfosDict(),
DatasetInfosDict({"default": DatasetInfo()} ),
DatasetInfosDict({"my_config_name": DatasetInfo()} ),
DatasetInfosDict(
{
"default": DatasetInfo(
description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=42 , )
} ),
DatasetInfosDict(
{
"v1": DatasetInfo(dataset_size=42 ),
"v2": DatasetInfo(dataset_size=1337 ),
} ),
] , )
def A__ ( A__ , A__ ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = str(A__ )
dataset_infos_dict.write_to_directory(A__ )
_UpperCAmelCase = DatasetInfosDict.from_directory(A__ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_UpperCAmelCase = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_UpperCAmelCase = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(A__ , "README.md" ) )
| 579
| 1
|
'''simple docstring'''
import math
from collections.abc import Iterator
from itertools import takewhile
def _SCREAMING_SNAKE_CASE ( UpperCamelCase__ : int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCamelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
a_ : List[Any] = 2
while True:
if is_prime(UpperCamelCase__ ):
yield num
num += 1
def _SCREAMING_SNAKE_CASE ( UpperCamelCase__ : int = 200_0000 ):
"""simple docstring"""
return sum(takewhile(lambda UpperCamelCase__ : x < n , prime_generator() ) )
if __name__ == "__main__":
print(f"{solution() = }")
| 442
|
'''simple docstring'''
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
lowerCAmelCase_ : Any = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class SCREAMING_SNAKE_CASE ( datasets.BuilderConfig ):
__magic_name__ : Optional[datasets.Features] = None
def _SCREAMING_SNAKE_CASE ( UpperCamelCase__ : "pyspark.sql.DataFrame" , UpperCamelCase__ : List[int] , ):
"""simple docstring"""
import pyspark
def generate_fn():
a_ : Any = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) )
for partition_id in partition_order:
a_ : int = df_with_partition_id.select("""*""" ).where(F"part_id = {partition_id}" ).drop("""part_id""" )
a_ : str = partition_df.collect()
a_ : List[str] = 0
for row in rows:
yield F"{partition_id}_{row_id}", row.asDict()
row_id += 1
return generate_fn
class SCREAMING_SNAKE_CASE ( _BaseExamplesIterable ):
def __init__( self : Dict , lowercase__ : "pyspark.sql.DataFrame" , lowercase__ : Optional[int]=None , ):
'''simple docstring'''
a_ : Tuple = df
a_ : Dict = partition_order or range(self.df.rdd.getNumPartitions() )
a_ : Dict = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : Optional[Any] ):
'''simple docstring'''
yield from self.generate_examples_fn()
def lowercase_ ( self : Dict , lowercase__ : np.random.Generator ):
'''simple docstring'''
a_ : Optional[int] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(lowercase__ )
return SparkExamplesIterable(self.df , partition_order=lowercase__ )
def lowercase_ ( self : Any , lowercase__ : int , lowercase__ : int ):
'''simple docstring'''
a_ : Optional[int] = self.split_shard_indices_by_worker(lowercase__ , lowercase__ )
return SparkExamplesIterable(self.df , partition_order=lowercase__ )
@property
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
return len(self.partition_order )
class SCREAMING_SNAKE_CASE ( datasets.DatasetBuilder ):
__magic_name__ : Optional[int] = SparkConfig
def __init__( self : Optional[Any] , lowercase__ : "pyspark.sql.DataFrame" , lowercase__ : str = None , lowercase__ : str = None , **lowercase__ : Dict , ):
'''simple docstring'''
import pyspark
a_ : Dict = pyspark.sql.SparkSession.builder.getOrCreate()
a_ : List[str] = df
a_ : List[str] = working_dir
super().__init__(
cache_dir=lowercase__ , config_name=str(self.df.semanticHash() ) , **lowercase__ , )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
def create_cache_and_write_probe(lowercase__ : Any ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=lowercase__ )
a_ : List[Any] = os.path.join(self._cache_dir , """fs_test""" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(lowercase__ , """a""" )
return [probe_file]
if self._spark.conf.get("""spark.master""" , """""" ).startswith("""local""" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
a_ : str = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(lowercase__ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"""When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def lowercase_ ( self : Tuple , lowercase__ : datasets.download.download_manager.DownloadManager ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def lowercase_ ( self : Any , lowercase__ : int ):
'''simple docstring'''
import pyspark
def get_arrow_batch_size(lowercase__ : List[Any] ):
for batch in it:
yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} )
a_ : Union[str, Any] = self.df.count()
a_ : List[str] = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
a_ : str = (
self.df.limit(lowercase__ )
.repartition(1 )
.mapInArrow(lowercase__ , """batch_bytes: long""" )
.agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
a_ : List[str] = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
a_ : List[str] = min(lowercase__ , int(approx_total_size / max_shard_size ) )
a_ : Union[str, Any] = self.df.repartition(lowercase__ )
def lowercase_ ( self : Optional[Any] , lowercase__ : str , lowercase__ : str , lowercase__ : int , ):
'''simple docstring'''
import pyspark
a_ : str = ParquetWriter if file_format == """parquet""" else ArrowWriter
a_ : int = os.path.join(self._working_dir , os.path.basename(lowercase__ ) ) if self._working_dir else fpath
a_ : Optional[int] = file_format == """parquet"""
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
a_ : int = self.config.features
a_ : List[str] = self._writer_batch_size
a_ : str = self._fs.storage_options
def write_arrow(lowercase__ : Optional[Any] ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
a_ : int = pyspark.TaskContext().taskAttemptId()
a_ : List[str] = next(lowercase__ , lowercase__ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
a_ : List[str] = 0
a_ : Union[str, Any] = writer_class(
features=lowercase__ , path=working_fpath.replace("""SSSSS""" , F"{shard_id:05d}" ).replace("""TTTTT""" , F"{task_id:05d}" ) , writer_batch_size=lowercase__ , storage_options=lowercase__ , embed_local_files=lowercase__ , )
a_ : List[str] = pa.Table.from_batches([first_batch] )
writer.write_table(lowercase__ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
a_ , a_ : Optional[Any] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
shard_id += 1
a_ : Any = writer_class(
features=writer._features , path=working_fpath.replace("""SSSSS""" , F"{shard_id:05d}" ).replace("""TTTTT""" , F"{task_id:05d}" ) , writer_batch_size=lowercase__ , storage_options=lowercase__ , embed_local_files=lowercase__ , )
a_ : str = pa.Table.from_batches([batch] )
writer.write_table(lowercase__ )
if writer._num_bytes > 0:
a_ , a_ : Any = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(lowercase__ ) ):
a_ : str = os.path.join(os.path.dirname(lowercase__ ) , os.path.basename(lowercase__ ) )
shutil.move(lowercase__ , lowercase__ )
a_ : Any = (
self.df.mapInArrow(lowercase__ , """task_id: long, num_examples: long, num_bytes: long""" )
.groupBy("""task_id""" )
.agg(
pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) , pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) , pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) , pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def lowercase_ ( self : Union[str, Any] , lowercase__ : "datasets.SplitGenerator" , lowercase__ : str = "arrow" , lowercase__ : Optional[Union[str, int]] = None , lowercase__ : Optional[int] = None , **lowercase__ : Optional[int] , ):
'''simple docstring'''
self._validate_cache_dir()
a_ : Dict = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(lowercase__ )
a_ : Any = not is_remote_filesystem(self._fs )
a_ : str = os.path.join if is_local else posixpath.join
a_ : Optional[Any] = """-TTTTT-SSSSS-of-NNNNN"""
a_ : int = F"{self.name}-{split_generator.name}{SUFFIX}.{file_format}"
a_ : Optional[Any] = path_join(self._output_dir , lowercase__ )
a_ : Dict = 0
a_ : Optional[Any] = 0
a_ : Dict = 0
a_ : List[str] = []
a_ : Dict = []
for task_id, content in self._prepare_split_single(lowercase__ , lowercase__ , lowercase__ ):
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) : Tuple = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(lowercase__ )
a_ : List[Any] = total_num_examples
a_ : int = total_num_bytes
# should rename everything at the end
logger.debug(F"Renaming {total_shards} shards." )
if total_shards > 1:
a_ : Dict = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
a_ : List[str] = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
lowercase__ : int , lowercase__ : int , lowercase__ : int , ):
rename(
lowercase__ , fpath.replace("""SSSSS""" , F"{shard_id:05d}" ).replace("""TTTTT""" , F"{task_id:05d}" ) , fpath.replace("""TTTTT-SSSSS""" , F"{global_shard_id:05d}" ).replace("""NNNNN""" , F"{total_shards:05d}" ) , )
a_ : int = []
a_ : List[str] = 0
for i in range(len(lowercase__ ) ):
a_ , a_ : List[str] = task_id_and_num_shards[i]
for shard_id in range(lowercase__ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(lowercase__ , len(lowercase__ ) ).map(lambda lowercase__ : _rename_shard(*lowercase__ ) ).collect()
else:
# don't use any pattern
a_ : int = 0
a_ : Any = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("""SSSSS""" , F"{shard_id:05d}" ).replace("""TTTTT""" , F"{task_id:05d}" ) , fpath.replace(lowercase__ , """""" ) , )
def lowercase_ ( self : Tuple , lowercase__ : "datasets.SplitGenerator" , ):
'''simple docstring'''
return SparkExamplesIterable(self.df )
| 442
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class a ( unittest.TestCase ):
def __init__( self : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : Tuple=13 , lowerCAmelCase : List[Any]=7 , lowerCAmelCase : List[str]=True , lowerCAmelCase : List[str]=True , lowerCAmelCase : int=True , lowerCAmelCase : List[Any]=True , lowerCAmelCase : str=99 , lowerCAmelCase : Optional[int]=32 , lowerCAmelCase : str=5 , lowerCAmelCase : Optional[Any]=4 , lowerCAmelCase : int=37 , lowerCAmelCase : Tuple="gelu" , lowerCAmelCase : Optional[int]=0.1 , lowerCAmelCase : Tuple=0.1 , lowerCAmelCase : Optional[int]=512 , lowerCAmelCase : Optional[Any]=16 , lowerCAmelCase : Tuple=2 , lowerCAmelCase : List[str]=0.0_2 , lowerCAmelCase : Any=4 , ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =parent
SCREAMING_SNAKE_CASE_: int =batch_size
SCREAMING_SNAKE_CASE_: Any =seq_length
SCREAMING_SNAKE_CASE_: Any =is_training
SCREAMING_SNAKE_CASE_: Optional[Any] =use_attention_mask
SCREAMING_SNAKE_CASE_: List[Any] =use_token_type_ids
SCREAMING_SNAKE_CASE_: Optional[int] =use_labels
SCREAMING_SNAKE_CASE_: int =vocab_size
SCREAMING_SNAKE_CASE_: Tuple =hidden_size
SCREAMING_SNAKE_CASE_: List[str] =num_hidden_layers
SCREAMING_SNAKE_CASE_: List[Any] =num_attention_heads
SCREAMING_SNAKE_CASE_: List[Any] =intermediate_size
SCREAMING_SNAKE_CASE_: Optional[Any] =hidden_act
SCREAMING_SNAKE_CASE_: int =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: int =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: Dict =max_position_embeddings
SCREAMING_SNAKE_CASE_: Tuple =type_vocab_size
SCREAMING_SNAKE_CASE_: Optional[int] =type_sequence_label_size
SCREAMING_SNAKE_CASE_: Optional[Any] =initializer_range
SCREAMING_SNAKE_CASE_: Dict =num_choices
def lowerCamelCase__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_: Union[str, Any] =None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE_: int =random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_: Any =None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_: Dict =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_: List[str] =BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_: Optional[Any] =config_and_inputs
SCREAMING_SNAKE_CASE_: List[str] ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowerCamelCase__ ( self : str ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_: Any =config_and_inputs
SCREAMING_SNAKE_CASE_: Any =True
SCREAMING_SNAKE_CASE_: List[str] =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
SCREAMING_SNAKE_CASE_: List[str] =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class a ( UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : Optional[Any] = True
UpperCamelCase : Any = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =FlaxBertModelTester(self )
@slow
def lowerCamelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =FlaxBertModel.from_pretrained("""bert-base-cased""" )
SCREAMING_SNAKE_CASE_: str =model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCAmelCase )
| 716
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
_UpperCAmelCase = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["""DPTFeatureExtractor"""]
_UpperCAmelCase = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 36
| 0
|
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
UpperCAmelCase_ : Tuple = True
except (ImportError, AttributeError):
UpperCAmelCase_ : List[str] = object
def _lowercase ( *UpperCamelCase__ : Union[str, Any], **UpperCamelCase__ : Optional[Any] ):
pass
UpperCAmelCase_ : Optional[int] = False
UpperCAmelCase_ : Dict = logging.get_logger('transformers-cli/serving')
def _lowercase ( UpperCamelCase__ : str ):
__A : List[str] = pipeline(
task=args.task, model=args.model if args.model else None, config=args.config, tokenizer=args.tokenizer, device=args.device, )
return ServeCommand(UpperCamelCase__, args.host, args.port, args.workers )
class _lowerCamelCase ( snake_case_ ):
'''simple docstring'''
__lowercase : dict
class _lowerCamelCase ( snake_case_ ):
'''simple docstring'''
__lowercase : List[str]
__lowercase : Optional[List[int]]
class _lowerCamelCase ( snake_case_ ):
'''simple docstring'''
__lowercase : str
class _lowerCamelCase ( snake_case_ ):
'''simple docstring'''
__lowercase : Any
class _lowerCamelCase ( snake_case_ ):
'''simple docstring'''
@staticmethod
def snake_case__ ( __lowercase ):
"""simple docstring"""
__A : Any = parser.add_parser(
'serve' , help='CLI tool to run inference requests through REST and GraphQL endpoints.' )
serve_parser.add_argument(
'--task' , type=a_ , choices=get_supported_tasks() , help='The task to run the pipeline on' , )
serve_parser.add_argument('--host' , type=a_ , default='localhost' , help='Interface the server will listen on.' )
serve_parser.add_argument('--port' , type=a_ , default=8_888 , help='Port the serving will listen to.' )
serve_parser.add_argument('--workers' , type=a_ , default=1 , help='Number of http workers' )
serve_parser.add_argument('--model' , type=a_ , help='Model\'s name or path to stored model.' )
serve_parser.add_argument('--config' , type=a_ , help='Model\'s config name or path to stored model.' )
serve_parser.add_argument('--tokenizer' , type=a_ , help='Tokenizer name to use.' )
serve_parser.add_argument(
'--device' , type=a_ , default=-1 , help='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)' , )
serve_parser.set_defaults(func=a_ )
def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase ):
"""simple docstring"""
__A : List[Any] = pipeline
__A : str = host
__A : List[Any] = port
__A : Union[str, Any] = workers
if not _serve_dependencies_installed:
raise RuntimeError(
'Using serve command requires FastAPI and uvicorn. '
'Please install transformers with [serving]: pip install \"transformers[serving]\".'
'Or install FastAPI and uvicorn separately.' )
else:
logger.info(F"""Serving model over {host}:{port}""" )
__A : str = FastAPI(
routes=[
APIRoute(
'/' , self.model_info , response_model=a_ , response_class=a_ , methods=['GET'] , ),
APIRoute(
'/tokenize' , self.tokenize , response_model=a_ , response_class=a_ , methods=['POST'] , ),
APIRoute(
'/detokenize' , self.detokenize , response_model=a_ , response_class=a_ , methods=['POST'] , ),
APIRoute(
'/forward' , self.forward , response_model=a_ , response_class=a_ , methods=['POST'] , ),
] , timeout=600 , )
def snake_case__ ( self ):
"""simple docstring"""
run(self._app , host=self.host , port=self.port , workers=self.workers )
def snake_case__ ( self ):
"""simple docstring"""
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def snake_case__ ( self , __lowercase = Body(a_ , embed=a_ ) , __lowercase = Body(a_ , embed=a_ ) ):
"""simple docstring"""
try:
__A : Optional[int] = self._pipeline.tokenizer.tokenize(a_ )
if return_ids:
__A : Any = self._pipeline.tokenizer.convert_tokens_to_ids(a_ )
return ServeTokenizeResult(tokens=a_ , tokens_ids=a_ )
else:
return ServeTokenizeResult(tokens=a_ )
except Exception as e:
raise HTTPException(status_code=500 , detail={'model': '', 'error': str(a_ )} )
def snake_case__ ( self , __lowercase = Body(a_ , embed=a_ ) , __lowercase = Body(a_ , embed=a_ ) , __lowercase = Body(a_ , embed=a_ ) , ):
"""simple docstring"""
try:
__A : Dict = self._pipeline.tokenizer.decode(a_ , a_ , a_ )
return ServeDeTokenizeResult(model='' , text=a_ )
except Exception as e:
raise HTTPException(status_code=500 , detail={'model': '', 'error': str(a_ )} )
async def snake_case__ ( self , __lowercase=Body(a_ , embed=a_ ) ):
"""simple docstring"""
if len(a_ ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
__A : List[Any] = self._pipeline(a_ )
return ServeForwardResult(output=a_ )
except Exception as e:
raise HTTPException(500 , {'error': str(a_ )} )
| 365
|
"""simple docstring"""
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class _lowerCAmelCase ( lowerCamelCase ):
def _a ( self ) -> List[str]:
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def _a ( self ) -> Optional[int]:
with self.assertRaises(a_ ):
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def _a ( self ) -> int:
with self.assertRaises(a_ ):
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("bool" ) , type=Value("int64" ) ) )
def _a ( self ) -> Optional[Any]:
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _a ( self ) -> int:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , type=Value("int64" ) ) )
def _a ( self ) -> Dict:
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , try_type=Value("int64" ) ) )
self.assertEqual(arr.type , pa.string() )
def _a ( self ) -> Union[str, Any]:
_UpperCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def _a ( self ) -> Tuple:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , type=ArrayaD((1, 3) , "int64" ) ) )
def _a ( self ) -> str:
_UpperCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def _a ( self ) -> Tuple:
_UpperCAmelCase = pa.array(TypedSequence(["foo", "bar"] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def _a ( self ) -> List[str]:
import PIL.Image
_UpperCAmelCase = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"datasets.arrow_writer.cast_to_python_objects" , side_effect=a_ ) as mock_cast_to_python_objects:
_UpperCAmelCase = pa.array(TypedSequence([{"path": None, "bytes": B"image_bytes"}, pil_image] , type=Image() ) )
_UpperCAmelCase , _UpperCAmelCase = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("optimize_list_casting" , a_ )
self.assertFalse(kwargs["optimize_list_casting"] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferReader(UpperCamelCase__ ) if isinstance(UpperCamelCase__ , pa.Buffer ) else pa.memory_map(UpperCamelCase__ )
_UpperCAmelCase = pa.ipc.open_stream(UpperCamelCase__ )
_UpperCAmelCase = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = Features({"labels": ClassLabel(names=["neg", "pos"] )} )
with ArrowWriter(stream=UpperCamelCase__ , features=UpperCamelCase__ ) as writer:
writer.write({"labels": 0} )
writer.write({"labels": 1} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pa.ipc.open_stream(UpperCamelCase__ )
_UpperCAmelCase = f.read_all()
_UpperCAmelCase = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(UpperCamelCase__ )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ , hash_salt="split_name" , check_duplicates=UpperCamelCase__ , ) as writer:
with pytest.raises(UpperCamelCase__ ):
writer.write({"col_1": "foo", "col_2": 1} , key=[1, 2] )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ , hash_salt="split_name" , check_duplicates=UpperCamelCase__ , ) as writer:
with pytest.raises(UpperCamelCase__ ):
writer.write({"col_1": "foo", "col_2": 1} , key=10 )
writer.write({"col_1": "bar", "col_2": 2} , key=10 )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ , hash_salt="split_name" , check_duplicates=UpperCamelCase__ , ) as writer:
writer.write({"col_1": "foo", "col_2": 1} , key=1 )
writer.write({"col_1": "bar", "col_2": 2} , key=2 )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
writer.write_batch({"col_1": [], "col_2": []} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer:
writer.write_table(pa.Table.from_pydict({"col_1": ["foo", "bar"], "col_2": [1, 2]} ) )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__ , schema=UpperCamelCase__ , writer_batch_size=UpperCamelCase__ ) as writer:
writer.write_row(pa.Table.from_pydict({"col_1": ["foo"], "col_2": [1]} ) )
writer.write_row(pa.Table.from_pydict({"col_1": ["bar"], "col_2": [2]} ) )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __lowerCamelCase ( ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = {"col_1": pa.string(), "col_2": pa.intaa()}
_UpperCAmelCase = os.path.join(UpperCamelCase__ , "test.arrow" )
with ArrowWriter(path=UpperCamelCase__ , schema=pa.schema(UpperCamelCase__ ) ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(UpperCamelCase__ , metadata=writer._schema.metadata )
_check_output(UpperCamelCase__ , 1 )
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if pa.types.is_list(UpperCamelCase__ ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if isinstance(lst[0] , UpperCamelCase__ ):
change_first_primitive_element_in_list(lst[0] , UpperCamelCase__ )
else:
_UpperCAmelCase = value
@pytest.mark.parametrize("optimized_int_type, expected_dtype" , [(None, pa.intaa()), (Value("int32" ), pa.intaa())] )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.array(TypedSequence(UpperCamelCase__ , optimized_int_type=UpperCamelCase__ ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"col, expected_dtype" , [
("attention_mask", pa.inta()),
("special_tokens_mask", pa.inta()),
("token_type_ids", pa.inta()),
("input_ids", pa.intaa()),
("other", pa.intaa()),
] , )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = pa.array(OptimizedTypedSequence(UpperCamelCase__ , col=UpperCamelCase__ ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
_UpperCAmelCase = copy.deepcopy(UpperCamelCase__ )
_UpperCAmelCase = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase = pa.array(OptimizedTypedSequence(UpperCamelCase__ , col=UpperCamelCase__ ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("raise_exception" , [False, True] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = str(tmp_path / "dataset-train.arrow" )
try:
with ArrowWriter(path=UpperCamelCase__ ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = "mock://dataset-train.arrow"
with ArrowWriter(path=UpperCamelCase__ , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(UpperCamelCase__ ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(UpperCamelCase__ )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ParquetWriter(stream=UpperCamelCase__ ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
_UpperCAmelCase , _UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pq.read_table(UpperCamelCase__ )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("embed_local_files" , [False, True] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
import PIL.Image
_UpperCAmelCase = str(tmp_path / "test_image_rgb.jpg" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(UpperCamelCase__ , format="png" )
_UpperCAmelCase = pa.BufferOutputStream()
with ParquetWriter(
stream=UpperCamelCase__ , features=Features({"image": Image()} ) , embed_local_files=UpperCamelCase__ ) as writer:
writer.write({"image": image_path} )
writer.finalize()
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pq.read_table(UpperCamelCase__ )
_UpperCAmelCase = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["image"][0]["path"] , UpperCamelCase__ )
with open(UpperCamelCase__ , "rb" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = pa.schema([pa.field("col_1" , pa.string() , nullable=UpperCamelCase__ )] )
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(stream=UpperCamelCase__ ) as writer:
writer._build_writer(inferred_schema=UpperCamelCase__ )
assert writer._schema == pa.schema([pa.field("col_1" , pa.string() )] )
| 657
| 0
|
"""simple docstring"""
def __lowerCAmelCase( __UpperCAmelCase = 10**12 ):
"""simple docstring"""
_lowercase : Dict = 1
_lowercase : Union[str, Any] = 0
_lowercase : Optional[Any] = 1
_lowercase : Any = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(f"""{solution() = }""")
| 701
|
"""simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def __lowerCAmelCase( __UpperCAmelCase ):
"""simple docstring"""
if not isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
raise TypeError('Undefined for non-integers' )
elif precision < 1:
raise ValueError('Undefined for non-natural numbers' )
_lowercase : Optional[Any] = precision
_lowercase : Dict = ceil(precision / 14 )
_lowercase : int = 426_880 * Decimal(10_005 ).sqrt()
_lowercase : Optional[Any] = 1
_lowercase : Union[str, Any] = 13_591_409
_lowercase : Optional[int] = Decimal(__UpperCAmelCase )
for k in range(1 ,__UpperCAmelCase ):
_lowercase : List[str] = factorial(6 * k ) // (factorial(3 * k ) * factorial(__UpperCAmelCase ) ** 3)
linear_term += 545_140_134
exponential_term *= -262_537_412_640_768_000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = 50
print(f"""The first {n} digits of pi is: {pi(n)}""")
| 283
| 0
|
"""simple docstring"""
a : dict[str, float] = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.60_9344,
"knot": 1.852,
}
a : dict[str, float] = {
"km/h": 1.0,
"m/s": 0.2_7777_7778,
"mph": 0.6_2137_1192,
"knot": 0.5_3995_6803,
}
def __magic_name__ ( UpperCamelCase : float , UpperCamelCase : str , UpperCamelCase : str ) -> float:
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
a__ = (
f'Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n'
f'Valid values are: {", ".join(UpperCamelCase )}'
)
raise ValueError(UpperCamelCase )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 273
|
"""simple docstring"""
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowercase(_lowercase , _lowercase , _lowercase ):
@register_to_config
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
a__ = nn.Embedding(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a__ = nn.Embedding(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a__ = False
a__ = nn.Dropout(p=__SCREAMING_SNAKE_CASE )
a__ = TaConfig(
vocab_size=__SCREAMING_SNAKE_CASE , d_model=__SCREAMING_SNAKE_CASE , num_heads=__SCREAMING_SNAKE_CASE , d_kv=__SCREAMING_SNAKE_CASE , d_ff=__SCREAMING_SNAKE_CASE , dropout_rate=__SCREAMING_SNAKE_CASE , feed_forward_proj=__SCREAMING_SNAKE_CASE , is_decoder=__SCREAMING_SNAKE_CASE , is_encoder_decoder=__SCREAMING_SNAKE_CASE , )
a__ = nn.ModuleList()
for lyr_num in range(__SCREAMING_SNAKE_CASE ):
a__ = TaBlock(__SCREAMING_SNAKE_CASE )
self.encoders.append(__SCREAMING_SNAKE_CASE )
a__ = TaLayerNorm(__SCREAMING_SNAKE_CASE )
a__ = nn.Dropout(p=__SCREAMING_SNAKE_CASE )
def lowercase__ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
a__ = self.token_embedder(__SCREAMING_SNAKE_CASE )
a__ = encoder_input_tokens.shape[1]
a__ = torch.arange(__SCREAMING_SNAKE_CASE , device=encoder_input_tokens.device )
x += self.position_encoding(__SCREAMING_SNAKE_CASE )
a__ = self.dropout_pre(__SCREAMING_SNAKE_CASE )
# inverted the attention mask
a__ = encoder_input_tokens.size()
a__ = self.get_extended_attention_mask(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for lyr in self.encoders:
a__ = lyr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )[0]
a__ = self.layer_norm(__SCREAMING_SNAKE_CASE )
return self.dropout_post(__SCREAMING_SNAKE_CASE ), encoder_inputs_mask
| 273
| 1
|
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __lowercase ( _A ):
lowercase = 'segformer'
def __init__( self : Optional[int] , __lowerCamelCase : List[str]=3 , __lowerCamelCase : List[str]=4 , __lowerCamelCase : Optional[Any]=[2, 2, 2, 2] , __lowerCamelCase : Union[str, Any]=[8, 4, 2, 1] , __lowerCamelCase : Optional[int]=[32, 64, 1_60, 2_56] , __lowerCamelCase : Optional[int]=[7, 3, 3, 3] , __lowerCamelCase : int=[4, 2, 2, 2] , __lowerCamelCase : List[str]=[1, 2, 5, 8] , __lowerCamelCase : List[str]=[4, 4, 4, 4] , __lowerCamelCase : int="gelu" , __lowerCamelCase : List[str]=0.0 , __lowerCamelCase : Optional[int]=0.0 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : List[str]=0.02 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : int=1E-6 , __lowerCamelCase : str=2_56 , __lowerCamelCase : Union[str, Any]=2_55 , **__lowerCamelCase : Optional[int] , ) -> str:
'''simple docstring'''
super().__init__(**__lowerCamelCase )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'''Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'''
''' removed, as the behaviour will default to that of reshape_last_stage = True.''' , __lowerCamelCase , )
lowercase = num_channels
lowercase = num_encoder_blocks
lowercase = depths
lowercase = sr_ratios
lowercase = hidden_sizes
lowercase = patch_sizes
lowercase = strides
lowercase = mlp_ratios
lowercase = num_attention_heads
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = classifier_dropout_prob
lowercase = initializer_range
lowercase = drop_path_rate
lowercase = layer_norm_eps
lowercase = decoder_hidden_size
lowercase = kwargs.get('''reshape_last_stage''' , __lowerCamelCase )
lowercase = semantic_loss_ignore_index
class __lowercase ( _A ):
lowercase = version.parse('1.11' )
@property
def __a ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __a ( self : Any ) -> float:
'''simple docstring'''
return 1E-4
@property
def __a ( self : Any ) -> int:
'''simple docstring'''
return 12
| 704
|
import math
import sys
import cva
import numpy as np
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase )-> np.ndarray:
"""simple docstring"""
lowercase = math.sqrt(UpperCAmelCase )
lowercase = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )-> np.ndarray:
"""simple docstring"""
lowercase = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase )-> np.ndarray:
"""simple docstring"""
lowercase = np.zeros((kernel_size, kernel_size) )
for i in range(0, UpperCAmelCase ):
for j in range(0, UpperCAmelCase ):
lowercase = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(UpperCAmelCase, UpperCAmelCase )
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, )-> np.ndarray:
"""simple docstring"""
lowercase = np.zeros(img.shape )
lowercase = get_gauss_kernel(UpperCAmelCase, UpperCAmelCase )
lowercase ,lowercase = img.shape
for i in range(kernel_size // 2, size_x - kernel_size // 2 ):
for j in range(kernel_size // 2, size_y - kernel_size // 2 ):
lowercase = get_slice(UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )
lowercase = img_s - img_s[kernel_size // 2, kernel_size // 2]
lowercase = vec_gaussian(UpperCAmelCase, UpperCAmelCase )
lowercase = np.multiply(UpperCAmelCase, UpperCAmelCase )
lowercase = np.multiply(UpperCAmelCase, UpperCAmelCase )
lowercase = np.sum(UpperCAmelCase ) / np.sum(UpperCAmelCase )
lowercase = val
return imga
def __UpperCAmelCase ( UpperCAmelCase )-> tuple:
"""simple docstring"""
lowercase = args[1] if args[1:] else '''../image_data/lena.jpg'''
lowercase = float(args[2] ) if args[2:] else 1.0
lowercase = float(args[3] ) if args[3:] else 1.0
if args[4:]:
lowercase = int(args[4] )
lowercase = kernel_size + abs(kernel_size % 2 - 1 )
else:
lowercase = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
A_ , A_ , A_ , A_ = parse_args(sys.argv)
A_ = cva.imread(filename, 0)
cva.imshow("input image", img)
A_ = img / 255
A_ = out.astype("float32")
A_ = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
A_ = out * 255
A_ = np.uinta(out)
cva.imshow("output image", out)
cva.waitKey(0)
cva.destroyAllWindows()
| 479
| 0
|
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def _lowerCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase, __lowerCAmelCase )
def _lowerCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = emb.weight.shape
SCREAMING_SNAKE_CASE_ = nn.Linear(__lowerCAmelCase, __lowerCAmelCase, bias=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = emb.weight.data
return lin_layer
def _lowerCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ = torch.load(__lowerCAmelCase, map_location='''cpu''' )
SCREAMING_SNAKE_CASE_ = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
SCREAMING_SNAKE_CASE_ = mam_aaa['''model''']
remove_ignore_keys_(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = state_dict['''encoder.embed_tokens.weight'''].shape[0]
SCREAMING_SNAKE_CASE_ = MaMaaaConfig(
vocab_size=__lowerCAmelCase, max_position_embeddings=1_024, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, encoder_attention_heads=args.encoder_attention_heads, decoder_attention_heads=args.decoder_attention_heads, encoder_ffn_dim=args.encoder_ffn_embed_dim, decoder_ffn_dim=args.decoder_ffn_embed_dim, d_model=args.encoder_embed_dim, encoder_layerdrop=args.encoder_layerdrop, decoder_layerdrop=args.decoder_layerdrop, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function='''relu''', )
SCREAMING_SNAKE_CASE_ = state_dict['''decoder.embed_tokens.weight''']
SCREAMING_SNAKE_CASE_ = MaMaaaForConditionalGeneration(__lowerCAmelCase )
model.model.load_state_dict(__lowerCAmelCase, strict=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 626
|
lowerCamelCase__ : List[str] = """Alexander Joslin"""
import operator as op
from .stack import Stack
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int:
snake_case__ = {'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
snake_case__ = Stack()
snake_case__ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(__lowerCAmelCase ) )
elif i in operators:
# RULE 2
operator_stack.push(__lowerCAmelCase )
elif i == ")":
# RULE 4
snake_case__ = operator_stack.peek()
operator_stack.pop()
snake_case__ = operand_stack.peek()
operand_stack.pop()
snake_case__ = operand_stack.peek()
operand_stack.pop()
snake_case__ = operators[opr](__lowerCAmelCase , __lowerCAmelCase )
operand_stack.push(__lowerCAmelCase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCamelCase__ : Optional[Any] = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 33
| 0
|
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
lowercase_ = 'bert-base-cased'
lowercase_ = 'google/pegasus-xsum'
lowercase_ = [' Sam ate lunch today.', 'Sams lunch ingredients.']
lowercase_ = ['A very interesting story about what I ate for lunch.', 'Avocado, celery, turkey, coffee']
lowercase_ = 'patrickvonplaten/t5-tiny-random'
lowercase_ = 'sshleifer/bart-tiny-random'
lowercase_ = 'sshleifer/tiny-mbart'
lowercase_ = 'sshleifer/tiny-marian-en-de'
def a ( A__ : Path , A__ : list ) -> Tuple:
"""simple docstring"""
_lowercase ='\n'.join(A__ )
Path(A__ ).open('w' ).writelines(A__ )
def a ( A__ : str ) -> Union[str, Any]:
"""simple docstring"""
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(A__ , F'''{split}.source''' ) , A__ )
_dump_articles(os.path.join(A__ , F'''{split}.target''' ) , A__ )
return tmp_dir
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def A__ ( self , lowerCAmelCase ) -> List[str]:
'''simple docstring'''
_lowercase =AutoTokenizer.from_pretrained(lowerCAmelCase )
_lowercase =make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_lowercase =max(len(tokenizer.encode(lowerCAmelCase ) ) for a in ARTICLES )
_lowercase =max(len(tokenizer.encode(lowerCAmelCase ) ) for a in SUMMARIES )
_lowercase =4
_lowercase =8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
_lowercase , _lowercase ='ro_RO', 'de_DE' # ignored for all but mbart, but never causes error.
_lowercase =SeqaSeqDataset(
lowerCAmelCase , data_dir=lowerCAmelCase , type_path='train' , max_source_length=lowerCAmelCase , max_target_length=lowerCAmelCase , src_lang=lowerCAmelCase , tgt_lang=lowerCAmelCase , )
_lowercase =DataLoader(lowerCAmelCase , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(lowerCAmelCase , lowerCAmelCase )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
_lowercase =shift_tokens_right(batch['labels'] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def A__ ( self , lowerCAmelCase ) -> str:
'''simple docstring'''
_lowercase =AutoTokenizer.from_pretrained(lowerCAmelCase )
_lowercase =make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_lowercase =max(len(tokenizer.encode(lowerCAmelCase ) ) for a in ARTICLES )
_lowercase =max(len(tokenizer.encode(lowerCAmelCase ) ) for a in SUMMARIES )
_lowercase =4
_lowercase =LegacySeqaSeqDataset(
lowerCAmelCase , data_dir=lowerCAmelCase , type_path='train' , max_source_length=20 , max_target_length=lowerCAmelCase , )
_lowercase =DataLoader(lowerCAmelCase , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def A__ ( self ) -> List[str]:
'''simple docstring'''
_lowercase =AutoTokenizer.from_pretrained('facebook/mbart-large-cc25' )
_lowercase =Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
_lowercase =tmp_dir.joinpath('train.source' ).open().readlines()
_lowercase =Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(lowerCAmelCase , lowerCAmelCase , 128 , lowerCAmelCase )
_lowercase ={x.name for x in tmp_dir.iterdir()}
_lowercase ={x.name for x in save_dir.iterdir()}
_lowercase =save_dir.joinpath('train.source' ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(lowerCAmelCase ) < len(lowerCAmelCase )
assert len(lowerCAmelCase ) == 1
assert len(packed_examples[0] ) == sum(len(lowerCAmelCase ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='This test requires fairseq' )
def A__ ( self ) -> int:
'''simple docstring'''
if not FAIRSEQ_AVAILABLE:
return
_lowercase , _lowercase , _lowercase =self._get_dataset(max_len=64 )
_lowercase =64
_lowercase =ds.make_dynamic_sampler(lowerCAmelCase , required_batch_size_multiple=lowerCAmelCase )
_lowercase =[len(lowerCAmelCase ) for x in batch_sampler]
assert len(set(lowerCAmelCase ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(lowerCAmelCase ) == len(lowerCAmelCase ) # no dropped or added examples
_lowercase =DataLoader(lowerCAmelCase , batch_sampler=lowerCAmelCase , collate_fn=ds.collate_fn , num_workers=2 )
_lowercase =[]
_lowercase =[]
for batch in data_loader:
_lowercase =batch['input_ids'].shape
_lowercase =src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
_lowercase =np.product(batch['input_ids'].shape )
num_src_per_batch.append(lowerCAmelCase )
if num_src_tokens > (max_tokens * 1.1):
failures.append(lowerCAmelCase )
assert num_src_per_batch[0] == max(lowerCAmelCase )
if failures:
raise AssertionError(F'''too many tokens in {len(lowerCAmelCase )} batches''' )
def A__ ( self ) -> List[str]:
'''simple docstring'''
_lowercase , _lowercase , _lowercase =self._get_dataset(max_len=512 )
_lowercase =2
_lowercase =ds.make_sortish_sampler(lowerCAmelCase , shuffle=lowerCAmelCase )
_lowercase =DataLoader(lowerCAmelCase , batch_size=lowerCAmelCase , collate_fn=ds.collate_fn , num_workers=2 )
_lowercase =DataLoader(lowerCAmelCase , batch_size=lowerCAmelCase , collate_fn=ds.collate_fn , num_workers=2 , sampler=lowerCAmelCase )
_lowercase =tokenizer.pad_token_id
def count_pad_tokens(lowerCAmelCase , lowerCAmelCase="input_ids" ):
return [batch[k].eq(lowerCAmelCase ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(lowerCAmelCase , k='labels' ) ) < sum(count_pad_tokens(lowerCAmelCase , k='labels' ) )
assert sum(count_pad_tokens(lowerCAmelCase ) ) < sum(count_pad_tokens(lowerCAmelCase ) )
assert len(lowerCAmelCase ) == len(lowerCAmelCase )
def A__ ( self , lowerCAmelCase=1_000 , lowerCAmelCase=128 ) -> Union[str, Any]:
'''simple docstring'''
if os.getenv('USE_REAL_DATA' , lowerCAmelCase ):
_lowercase ='examples/seq2seq/wmt_en_ro'
_lowercase =max_len * 2 * 64
if not Path(lowerCAmelCase ).joinpath('train.len' ).exists():
save_len_file(lowerCAmelCase , lowerCAmelCase )
else:
_lowercase ='examples/seq2seq/test_data/wmt_en_ro'
_lowercase =max_len * 4
save_len_file(lowerCAmelCase , lowerCAmelCase )
_lowercase =AutoTokenizer.from_pretrained(lowerCAmelCase )
_lowercase =SeqaSeqDataset(
lowerCAmelCase , data_dir=lowerCAmelCase , type_path='train' , max_source_length=lowerCAmelCase , max_target_length=lowerCAmelCase , n_obs=lowerCAmelCase , )
return ds, max_tokens, tokenizer
def A__ ( self ) -> int:
'''simple docstring'''
_lowercase , _lowercase , _lowercase =self._get_dataset()
_lowercase =set(DistributedSortishSampler(lowerCAmelCase , 256 , num_replicas=2 , rank=0 , add_extra_examples=lowerCAmelCase ) )
_lowercase =set(DistributedSortishSampler(lowerCAmelCase , 256 , num_replicas=2 , rank=1 , add_extra_examples=lowerCAmelCase ) )
assert idsa.intersection(lowerCAmelCase ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def A__ ( self , lowerCAmelCase ) -> Dict:
'''simple docstring'''
_lowercase =AutoTokenizer.from_pretrained(lowerCAmelCase , use_fast=lowerCAmelCase )
if tok_name == MBART_TINY:
_lowercase =SeqaSeqDataset(
lowerCAmelCase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , src_lang='EN' , tgt_lang='FR' , )
_lowercase =train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
_lowercase =SeqaSeqDataset(
lowerCAmelCase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , )
_lowercase =train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(lowerCAmelCase ) == 1 if tok_name == BART_TINY else len(lowerCAmelCase ) == 0
| 706
|
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
lowercase_ = {
'bart': (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'bert': (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-base-cased-finetuned-mrpc': (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'dpr': (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'gpt2': (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlnet': (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm': (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm-roberta': (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'transfo-xl': (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'openai-gpt': (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'roberta': (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'layoutlm': (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'roberta-large-mnli': (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'camembert': (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'flaubert': (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert': (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert-base-distilled-squad': (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert': (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert-visual-feature-encoder': (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'ctrl': (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'albert': (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
't5': (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'electra': (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'wav2vec2': (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def a ( A__ : Tuple , A__ : List[Any] , A__ : Optional[int] , A__ : Dict , A__ : Any=False , A__ : str=True ) -> str:
"""simple docstring"""
if model_type not in MODEL_CLASSES:
raise ValueError(F'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' )
_lowercase , _lowercase , _lowercase , _lowercase =MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
_lowercase =cached_file(A__ , A__ , force_download=not use_cached_models )
_lowercase =config_class.from_json_file(A__ )
_lowercase =True
_lowercase =True
print(F'''Building TensorFlow model from configuration: {config}''' )
_lowercase =model_class(A__ )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
_lowercase =cached_file(
A__ , A__ , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
_lowercase =load_pytorch_checkpoint_in_tfa_model(A__ , A__ )
if compare_with_pt_model:
_lowercase =tf_model(tf_model.dummy_inputs , training=A__ ) # build the network
_lowercase =torch.load(A__ , map_location='cpu' )
_lowercase =pt_model_class.from_pretrained(
pretrained_model_name_or_path=A__ , config=A__ , state_dict=A__ )
with torch.no_grad():
_lowercase =pt_model(**pt_model.dummy_inputs )
_lowercase =pto[0].numpy()
_lowercase =tfo[0].numpy()
_lowercase =np.amax(np.abs(np_pt - np_tf ) )
print(F'''Max absolute difference between models outputs {diff}''' )
assert diff <= 2e-2, F'''Error, model absolute difference is >2e-2: {diff}'''
# Save pytorch-model
print(F'''Save TensorFlow model to {tf_dump_path}''' )
tf_model.save_weights(A__ , save_format='h5' )
def a ( A__ : str , A__ : str , A__ : Optional[Any]=None , A__ : Any=None , A__ : Optional[int]=False , A__ : Optional[int]=False , A__ : int=False , A__ : str=False , ) -> List[Any]:
"""simple docstring"""
if args_model_type is None:
_lowercase =list(MODEL_CLASSES.keys() )
else:
_lowercase =[args_model_type]
for j, model_type in enumerate(A__ , start=1 ):
print('=' * 100 )
print(F''' Converting model type {j}/{len(A__ )}: {model_type}''' )
print('=' * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(F'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase =MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
_lowercase =list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
_lowercase =model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(A__ , A__ ) , start=1 ):
print('-' * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(F''' Skipping finetuned checkpoint {model_shortcut_name}''' )
continue
_lowercase =model_shortcut_name
elif only_convert_finetuned_models:
print(F''' Skipping not finetuned checkpoint {model_shortcut_name}''' )
continue
print(
F''' Converting checkpoint {i}/{len(A__ )}: {model_shortcut_name} - model_type {model_type}''' )
print('-' * 100 )
if config_shortcut_name in aws_config_map:
_lowercase =cached_file(A__ , A__ , force_download=not use_cached_models )
else:
_lowercase =config_shortcut_name
if model_shortcut_name in aws_model_maps:
_lowercase =cached_file(A__ , A__ , force_download=not use_cached_models )
else:
_lowercase =model_shortcut_name
if os.path.isfile(A__ ):
_lowercase ='converted_model'
convert_pt_checkpoint_to_tf(
model_type=A__ , pytorch_checkpoint_path=A__ , config_file=A__ , tf_dump_path=os.path.join(A__ , model_shortcut_name + '-tf_model.h5' ) , compare_with_pt_model=A__ , )
if remove_cached_files:
os.remove(A__ )
os.remove(A__ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_dump_path', default=None, type=str, required=True, help='Path to the output Tensorflow dump file.'
)
parser.add_argument(
'--model_type',
default=None,
type=str,
help=(
f"Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and "
'convert all the models from AWS.'
),
)
parser.add_argument(
'--pytorch_checkpoint_path',
default=None,
type=str,
help=(
'Path to the PyTorch checkpoint path or shortcut name to download from AWS. '
'If not given, will download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--config_file',
default=None,
type=str,
help=(
'The config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture. If not given and '
'--pytorch_checkpoint_path is not given or is a shortcut name '
'use the configuration associated to the shortcut name on the AWS'
),
)
parser.add_argument(
'--compare_with_pt_model', action='store_true', help='Compare Tensorflow and PyTorch model predictions.'
)
parser.add_argument(
'--use_cached_models',
action='store_true',
help='Use cached models if possible instead of updating to latest checkpoint versions.',
)
parser.add_argument(
'--remove_cached_files',
action='store_true',
help='Remove pytorch models after conversion (save memory when converting in batches).',
)
parser.add_argument('--only_convert_finetuned_models', action='store_true', help='Only convert finetuned models.')
lowercase_ = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 380
| 0
|
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class lowercase_ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase_ = FlaxXLMRobertaModel.from_pretrained("xlm-roberta-base" )
UpperCAmelCase_ = AutoTokenizer.from_pretrained("xlm-roberta-base" )
UpperCAmelCase_ = "The dog is cute and lives in the garden house"
UpperCAmelCase_ = jnp.array([tokenizer.encode(UpperCamelCase__ )] )
UpperCAmelCase_ = (1, 1_2, 7_6_8) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_ = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
UpperCAmelCase_ = model(UpperCamelCase__ )["last_hidden_state"]
self.assertEqual(output.shape , UpperCamelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , UpperCamelCase__ , atol=1e-3 ) )
| 660
|
'''simple docstring'''
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
__snake_case : List[Any] = {
'''sample_size''': 32,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': 10_00,
'''block_out_channels''': [32, 64],
'''attention_head_dim''': 8,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
__snake_case : Optional[int] = {
'''sample_size''': 64,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 3,
'''num_class_embeds''': 10_00,
'''block_out_channels''': [1_92, 1_92 * 2, 1_92 * 3, 1_92 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
__snake_case : int = {
'''sample_size''': 2_56,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': None,
'''block_out_channels''': [2_56, 2_56, 2_56 * 2, 2_56 * 2, 2_56 * 4, 2_56 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''default''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
__snake_case : Dict = {
'''num_train_timesteps''': 40,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
__snake_case : Tuple = {
'''num_train_timesteps''': 2_01,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
__snake_case : str = {
'''num_train_timesteps''': 1_51,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
def lowerCamelCase__ ( A_ ):
if isinstance(A_ , A_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("boolean value expected" )
def lowerCamelCase__ ( A_ , A_ , A_ , A_ , A_=False ):
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.0.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.0.bias"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.2.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.2.bias"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.emb_layers.1.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.emb_layers.1.bias"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.0.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.0.bias"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.3.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.3.bias"""]
if has_skip:
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.skip_connection.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.skip_connection.bias"""]
return new_checkpoint
def lowerCamelCase__ ( A_ , A_ , A_ , A_ , A_=None ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = checkpoint[F"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = checkpoint[F"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 )
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.norm.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.norm.bias"""]
UpperCAmelCase_ = weight_q.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = bias_q.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = weight_k.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = bias_k.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = weight_v.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = bias_v.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = (
checkpoint[F"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 )
)
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def lowerCamelCase__ ( A_ , A_ ):
UpperCAmelCase_ = torch.load(A_ , map_location="cpu" )
UpperCAmelCase_ = {}
UpperCAmelCase_ = checkpoint["time_embed.0.weight"]
UpperCAmelCase_ = checkpoint["time_embed.0.bias"]
UpperCAmelCase_ = checkpoint["time_embed.2.weight"]
UpperCAmelCase_ = checkpoint["time_embed.2.bias"]
if unet_config["num_class_embeds"] is not None:
UpperCAmelCase_ = checkpoint["label_emb.weight"]
UpperCAmelCase_ = checkpoint["input_blocks.0.0.weight"]
UpperCAmelCase_ = checkpoint["input_blocks.0.0.bias"]
UpperCAmelCase_ = unet_config["down_block_types"]
UpperCAmelCase_ = unet_config["layers_per_block"]
UpperCAmelCase_ = unet_config["attention_head_dim"]
UpperCAmelCase_ = unet_config["block_out_channels"]
UpperCAmelCase_ = 1
UpperCAmelCase_ = channels_list[0]
for i, layer_type in enumerate(A_ ):
UpperCAmelCase_ = channels_list[i]
UpperCAmelCase_ = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(A_ ):
UpperCAmelCase_ = F"""down_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase_ = True if j == 0 and downsample_block_has_skip else False
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(A_ ):
UpperCAmelCase_ = F"""down_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase_ = True if j == 0 and downsample_block_has_skip else False
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
UpperCAmelCase_ = F"""down_blocks.{i}.attentions.{j}"""
UpperCAmelCase_ = F"""input_blocks.{current_layer}.1"""
UpperCAmelCase_ = convert_attention(
A_ , A_ , A_ , A_ , A_ )
current_layer += 1
if i != len(A_ ) - 1:
UpperCAmelCase_ = F"""down_blocks.{i}.downsamplers.0"""
UpperCAmelCase_ = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
current_layer += 1
UpperCAmelCase_ = current_channels
# hardcoded the mid-block for now
UpperCAmelCase_ = "mid_block.resnets.0"
UpperCAmelCase_ = "middle_block.0"
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
UpperCAmelCase_ = "mid_block.attentions.0"
UpperCAmelCase_ = "middle_block.1"
UpperCAmelCase_ = convert_attention(A_ , A_ , A_ , A_ , A_ )
UpperCAmelCase_ = "mid_block.resnets.1"
UpperCAmelCase_ = "middle_block.2"
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
UpperCAmelCase_ = 0
UpperCAmelCase_ = unet_config["up_block_types"]
for i, layer_type in enumerate(A_ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
UpperCAmelCase_ = F"""up_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = F"""output_blocks.{current_layer}.0"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
current_layer += 1
if i != len(A_ ) - 1:
UpperCAmelCase_ = F"""up_blocks.{i}.upsamplers.0"""
UpperCAmelCase_ = F"""output_blocks.{current_layer-1}.1"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
UpperCAmelCase_ = F"""up_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = F"""output_blocks.{current_layer}.0"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
UpperCAmelCase_ = F"""up_blocks.{i}.attentions.{j}"""
UpperCAmelCase_ = F"""output_blocks.{current_layer}.1"""
UpperCAmelCase_ = convert_attention(
A_ , A_ , A_ , A_ , A_ )
current_layer += 1
if i != len(A_ ) - 1:
UpperCAmelCase_ = F"""up_blocks.{i}.upsamplers.0"""
UpperCAmelCase_ = F"""output_blocks.{current_layer-1}.2"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
UpperCAmelCase_ = checkpoint["out.0.weight"]
UpperCAmelCase_ = checkpoint["out.0.bias"]
UpperCAmelCase_ = checkpoint["out.2.weight"]
UpperCAmelCase_ = checkpoint["out.2.bias"]
return new_checkpoint
if __name__ == "__main__":
__snake_case : List[str] = argparse.ArgumentParser()
parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''')
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.'''
)
parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''')
__snake_case : List[str] = parser.parse_args()
__snake_case : Any = strabool(args.class_cond)
__snake_case : List[str] = os.path.basename(args.unet_path)
print(F'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
__snake_case : Optional[int] = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__snake_case : Union[str, Any] = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
__snake_case : List[str] = TEST_UNET_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
__snake_case : Optional[Any] = None
__snake_case : Optional[int] = con_pt_to_diffuser(args.unet_path, unet_config)
__snake_case : str = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
__snake_case : Tuple = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
__snake_case : Optional[int] = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__snake_case : Union[str, Any] = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
__snake_case : Optional[Any] = CMStochasticIterativeScheduler(**scheduler_config)
__snake_case : Dict = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 660
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase_ : Optional[Any] = {"""configuration_unispeech""": ["""UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP""", """UniSpeechConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : int = [
"""UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""UniSpeechForCTC""",
"""UniSpeechForPreTraining""",
"""UniSpeechForSequenceClassification""",
"""UniSpeechModel""",
"""UniSpeechPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
UpperCamelCase_ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 497
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCamelCase_ : Union[str, Any] = {"""vocab_file""": """sentencepiece.bpe.model"""}
UpperCamelCase_ : Optional[Any] = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
}
UpperCamelCase_ : int = {
"""moussaKam/mbarthez""": 1024,
"""moussaKam/barthez""": 1024,
"""moussaKam/barthez-orangesum-title""": 1024,
}
UpperCamelCase_ : Optional[int] = """▁"""
class _lowercase ( lowerCAmelCase ):
_a : int = VOCAB_FILES_NAMES
_a : int = PRETRAINED_VOCAB_FILES_MAP
_a : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : Optional[int] = ['''input_ids''', '''attention_mask''']
def __init__( self : str , a : Optional[Any] , a : Optional[Any]="<s>" , a : Dict="</s>" , a : Any="</s>" , a : Optional[int]="<s>" , a : Optional[Any]="<unk>" , a : int="<pad>" , a : Tuple="<mask>" , a : Optional[Dict[str, Any]] = None , **a : List[Any] , ):
"""simple docstring"""
__snake_case : Dict =AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
__snake_case : List[str] ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , sp_model_kwargs=self.sp_model_kwargs , **a , )
__snake_case : Optional[int] =vocab_file
__snake_case : Dict =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(a ) )
__snake_case : Optional[Any] ={'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
__snake_case : Optional[int] =len(self.sp_model ) - 1
__snake_case : Union[str, Any] ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def _UpperCamelCase ( self : Tuple , a : List[int] , a : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__snake_case : Dict =[self.cls_token_id]
__snake_case : List[str] =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _UpperCamelCase ( self : str , a : List[int] , a : Optional[List[int]] = None , a : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
if token_ids_a is None:
return [1] + ([0] * len(a )) + [1]
return [1] + ([0] * len(a )) + [1, 1] + ([0] * len(a )) + [1]
def _UpperCamelCase ( self : Dict , a : List[int] , a : Optional[List[int]] = None ):
"""simple docstring"""
__snake_case : Optional[int] =[self.sep_token_id]
__snake_case : int =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
return len(self.sp_model )
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case : List[Any] ={self.convert_ids_to_tokens(a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _UpperCamelCase ( self : str , a : str ):
"""simple docstring"""
return self.sp_model.encode(a , out_type=a )
def _UpperCamelCase ( self : int , a : Union[str, Any] ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__snake_case : List[Any] =self.sp_model.PieceToId(a )
return spm_id if spm_id else self.unk_token_id
def _UpperCamelCase ( self : List[str] , a : Dict ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(a )
def _UpperCamelCase ( self : Optional[int] , a : int ):
"""simple docstring"""
__snake_case : int =[]
__snake_case : Optional[int] =''''''
__snake_case : Optional[int] =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a ) + token
__snake_case : str =True
__snake_case : int =[]
else:
current_sub_tokens.append(a )
__snake_case : Tuple =False
out_string += self.sp_model.decode(a )
return out_string.strip()
def __getstate__( self : Optional[int] ):
"""simple docstring"""
__snake_case : List[Any] =self.__dict__.copy()
__snake_case : Optional[Any] =None
return state
def __setstate__( self : Optional[int] , a : Optional[Any] ):
"""simple docstring"""
__snake_case : Any =d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__snake_case : Tuple ={}
__snake_case : int =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCamelCase ( self : Optional[Any] , a : str , a : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__snake_case : int =os.path.join(
a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a )
elif not os.path.isfile(self.vocab_file ):
with open(a , '''wb''' ) as fi:
__snake_case : List[Any] =self.sp_model.serialized_model_proto()
fi.write(a )
return (out_vocab_file,)
| 497
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.