code
stringlengths
82
53.2k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' import inspect import unittest import numpy as np from transformers import ViTConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel class UpperCAmelCase_ (unittest.TestCase ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0.0_2 , ) -> Optional[int]: __lowerCamelCase : Optional[int] = parent __lowerCamelCase : Dict = batch_size __lowerCamelCase : int = image_size __lowerCamelCase : List[str] = patch_size __lowerCamelCase : Optional[int] = num_channels __lowerCamelCase : Any = is_training __lowerCamelCase : Dict = use_labels __lowerCamelCase : List[Any] = hidden_size __lowerCamelCase : List[Any] = num_hidden_layers __lowerCamelCase : Optional[Any] = num_attention_heads __lowerCamelCase : Dict = intermediate_size __lowerCamelCase : Union[str, Any] = hidden_act __lowerCamelCase : Optional[int] = hidden_dropout_prob __lowerCamelCase : Tuple = attention_probs_dropout_prob __lowerCamelCase : str = type_sequence_label_size __lowerCamelCase : List[str] = initializer_range # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) __lowerCamelCase : str = (image_size // patch_size) ** 2 __lowerCamelCase : Optional[int] = num_patches + 1 def lowercase_ ( self ) -> Union[str, Any]: __lowerCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowerCamelCase : Optional[int] = ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , ) return config, pixel_values def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: __lowerCamelCase : Union[str, Any] = FlaxViTModel(config=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : str = model(SCREAMING_SNAKE_CASE_ ) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) __lowerCamelCase : str = (self.image_size, self.image_size) __lowerCamelCase : str = (self.patch_size, self.patch_size) __lowerCamelCase : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str: __lowerCamelCase : Tuple = self.type_sequence_label_size __lowerCamelCase : Any = FlaxViTForImageClassification(config=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Dict = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images __lowerCamelCase : List[str] = 1 __lowerCamelCase : List[Any] = FlaxViTForImageClassification(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __lowerCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> Optional[int]: __lowerCamelCase : List[Any] = self.prepare_config_and_inputs() ( ( __lowerCamelCase ) , ( __lowerCamelCase ) , ) : int = config_and_inputs __lowerCamelCase : Union[str, Any] = {'pixel_values': pixel_values} return config, inputs_dict @require_flax class UpperCAmelCase_ (_UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase : str = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else () def lowercase_ ( self ) -> None: __lowerCamelCase : str = FlaxViTModelTester(self ) __lowerCamelCase : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 ) def lowercase_ ( self ) -> List[Any]: self.config_tester.run_common_tests() def lowercase_ ( self ) -> Optional[Any]: __lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> Any: __lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> Optional[Any]: __lowerCamelCase , __lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase : Optional[Any] = model_class(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Dict = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCamelCase : List[str] = [*signature.parameters.keys()] __lowerCamelCase : Optional[int] = ['pixel_values'] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> Any: __lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __lowerCamelCase : List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Optional[int] = model_class(SCREAMING_SNAKE_CASE_ ) @jax.jit def model_jitted(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): return model(pixel_values=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) with self.subTest('JIT Enabled' ): __lowerCamelCase : Optional[int] = model_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple() with self.subTest('JIT Disabled' ): with jax.disable_jit(): __lowerCamelCase : Union[str, Any] = model_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple() self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) ) for jitted_output, output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowercase_ ( self ) -> List[Any]: for model_class_name in self.all_model_classes: __lowerCamelCase : Union[str, Any] = model_class_name.from_pretrained('google/vit-base-patch16-224' ) __lowerCamelCase : Union[str, Any] = model(np.ones((1, 3, 2_24, 2_24) ) ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
13
'''simple docstring''' import sys from collections import defaultdict class UpperCAmelCase_ : """simple docstring""" def __init__( self ) -> int: __lowerCamelCase : Any = [] def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Any: return self.node_position[vertex] def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple: __lowerCamelCase : Optional[int] = pos def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int: if start > size // 2 - 1: return else: if 2 * start + 2 >= size: __lowerCamelCase : str = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: __lowerCamelCase : Optional[Any] = 2 * start + 1 else: __lowerCamelCase : int = 2 * start + 2 if heap[smallest_child] < heap[start]: __lowerCamelCase , __lowerCamelCase : Optional[Any] = heap[smallest_child], positions[smallest_child] __lowerCamelCase , __lowerCamelCase : int = ( heap[start], positions[start], ) __lowerCamelCase , __lowerCamelCase : str = temp, tempa __lowerCamelCase : Dict = self.get_position(positions[smallest_child] ) self.set_position( positions[smallest_child] , self.get_position(positions[start] ) ) self.set_position(positions[start] , SCREAMING_SNAKE_CASE_ ) self.top_to_bottom(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any: __lowerCamelCase : Any = position[index] while index != 0: __lowerCamelCase : Union[str, Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 ) if val < heap[parent]: __lowerCamelCase : Union[str, Any] = heap[parent] __lowerCamelCase : Any = position[parent] self.set_position(position[parent] , SCREAMING_SNAKE_CASE_ ) else: __lowerCamelCase : Tuple = val __lowerCamelCase : List[str] = temp self.set_position(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) break __lowerCamelCase : Tuple = parent else: __lowerCamelCase : Union[str, Any] = val __lowerCamelCase : Tuple = temp self.set_position(SCREAMING_SNAKE_CASE_ , 0 ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: __lowerCamelCase : Optional[int] = len(SCREAMING_SNAKE_CASE_ ) // 2 - 1 for i in range(SCREAMING_SNAKE_CASE_ , -1 , -1 ): self.top_to_bottom(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]: __lowerCamelCase : Any = positions[0] __lowerCamelCase : Union[str, Any] = sys.maxsize self.top_to_bottom(SCREAMING_SNAKE_CASE_ , 0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) return temp def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[int] ) -> str: __lowerCamelCase : List[Any] = Heap() __lowerCamelCase : Optional[int] = [0] * len(UpperCAmelCase_ ) __lowerCamelCase : str = [-1] * len(UpperCAmelCase_ ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph __lowerCamelCase : List[str] = [] # Heap of Distance of vertices from their neighboring vertex __lowerCamelCase : Tuple = [] for vertex in range(len(UpperCAmelCase_ ) ): distance_tv.append(sys.maxsize ) positions.append(UpperCAmelCase_ ) heap.node_position.append(UpperCAmelCase_ ) __lowerCamelCase : Tuple = [] __lowerCamelCase : Dict = 1 __lowerCamelCase : str = sys.maxsize for neighbor, distance in adjacency_list[0]: __lowerCamelCase : Any = 0 __lowerCamelCase : Any = distance heap.heapify(UpperCAmelCase_ , UpperCAmelCase_ ) for _ in range(1 , len(UpperCAmelCase_ ) ): __lowerCamelCase : List[Any] = heap.delete_minimum(UpperCAmelCase_ , UpperCAmelCase_ ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) __lowerCamelCase : Union[str, Any] = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(UpperCAmelCase_ )] ): __lowerCamelCase : Dict = distance heap.bottom_to_top( UpperCAmelCase_ , heap.get_position(UpperCAmelCase_ ) , UpperCAmelCase_ , UpperCAmelCase_ ) __lowerCamelCase : str = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > A__ : Tuple = int(input("""Enter number of edges: """).strip()) A__ : str = defaultdict(list) for _ in range(edges_number): A__ : Optional[int] = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
13
1
'''simple docstring''' from math import sqrt def UpperCamelCase__ ( _lowercase : int ) -> bool: assert isinstance(_lowercase , _lowercase ) and ( number >= 0 ), "'number' must been an int and positive" __UpperCAmelCase: int = True # 0 and 1 are none primes. if number <= 1: __UpperCAmelCase: Optional[Any] = False for divisor in range(2 , int(round(sqrt(_lowercase ) ) ) + 1 ): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: __UpperCAmelCase: Any = False break # precondition assert isinstance(_lowercase , _lowercase ), "'status' must been from type bool" return status def UpperCamelCase__ ( _lowercase : Any ) -> Union[str, Any]: assert isinstance(_lowercase , _lowercase ) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N __UpperCAmelCase: Tuple = list(range(2 , n + 1 ) ) __UpperCAmelCase: Dict = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(_lowercase ) ): for j in range(i + 1 , len(_lowercase ) ): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): __UpperCAmelCase: Union[str, Any] = 0 # filters actual prime numbers. __UpperCAmelCase: Dict = [x for x in begin_list if x != 0] # precondition assert isinstance(_lowercase , _lowercase ), "'ans' must been from type list" return ans def UpperCamelCase__ ( _lowercase : Any ) -> str: assert isinstance(_lowercase , _lowercase ) and (n > 2), "'N' must been an int and > 2" __UpperCAmelCase: str = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2 , n + 1 ): if is_prime(_lowercase ): ans.append(_lowercase ) # precondition assert isinstance(_lowercase , _lowercase ), "'ans' must been from type list" return ans def UpperCamelCase__ ( _lowercase : List[Any] ) -> List[Any]: assert isinstance(_lowercase , _lowercase ) and number >= 0, "'number' must been an int and >= 0" __UpperCAmelCase: Optional[int] = [] # this list will be returns of the function. # potential prime number factors. __UpperCAmelCase: str = 2 __UpperCAmelCase: int = number if number == 0 or number == 1: ans.append(_lowercase ) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(_lowercase ): while quotient != 1: if is_prime(_lowercase ) and (quotient % factor == 0): ans.append(_lowercase ) quotient /= factor else: factor += 1 else: ans.append(_lowercase ) # precondition assert isinstance(_lowercase , _lowercase ), "'ans' must been from type list" return ans def UpperCamelCase__ ( _lowercase : Union[str, Any] ) -> Optional[Any]: assert isinstance(_lowercase , _lowercase ) and ( number >= 0 ), "'number' bust been an int and >= 0" __UpperCAmelCase: Union[str, Any] = 0 # prime factorization of 'number' __UpperCAmelCase: Optional[Any] = prime_factorization(_lowercase ) __UpperCAmelCase: List[str] = max(_lowercase ) # precondition assert isinstance(_lowercase , _lowercase ), "'ans' must been from type int" return ans def UpperCamelCase__ ( _lowercase : Dict ) -> Union[str, Any]: assert isinstance(_lowercase , _lowercase ) and ( number >= 0 ), "'number' bust been an int and >= 0" __UpperCAmelCase: Optional[int] = 0 # prime factorization of 'number' __UpperCAmelCase: Union[str, Any] = prime_factorization(_lowercase ) __UpperCAmelCase: Tuple = min(_lowercase ) # precondition assert isinstance(_lowercase , _lowercase ), "'ans' must been from type int" return ans def UpperCamelCase__ ( _lowercase : Union[str, Any] ) -> Dict: assert isinstance(_lowercase , _lowercase ), "'number' must been an int" assert isinstance(number % 2 == 0 , _lowercase ), "compare bust been from type bool" return number % 2 == 0 def UpperCamelCase__ ( _lowercase : Dict ) -> Any: assert isinstance(_lowercase , _lowercase ), "'number' must been an int" assert isinstance(number % 2 != 0 , _lowercase ), "compare bust been from type bool" return number % 2 != 0 def UpperCamelCase__ ( _lowercase : Optional[int] ) -> Optional[int]: assert ( isinstance(_lowercase , _lowercase ) and (number > 2) and is_even(_lowercase ) ), "'number' must been an int, even and > 2" __UpperCAmelCase: str = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' __UpperCAmelCase: Union[str, Any] = get_prime_numbers(_lowercase ) __UpperCAmelCase: List[Any] = len(_lowercase ) # run variable for while-loops. __UpperCAmelCase: Union[str, Any] = 0 __UpperCAmelCase: Optional[int] = None # exit variable. for break up the loops __UpperCAmelCase: int = True while i < len_pn and loop: __UpperCAmelCase: Optional[int] = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: __UpperCAmelCase: Union[str, Any] = False ans.append(prime_numbers[i] ) ans.append(prime_numbers[j] ) j += 1 i += 1 # precondition assert ( isinstance(_lowercase , _lowercase ) and (len(_lowercase ) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0] ) and is_prime(ans[1] ) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def UpperCamelCase__ ( _lowercase : str , _lowercase : str ) -> List[Any]: assert ( isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase ) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." __UpperCAmelCase: int = 0 while numbera != 0: __UpperCAmelCase: Union[str, Any] = numbera % numbera __UpperCAmelCase: Tuple = numbera __UpperCAmelCase: Union[str, Any] = rest # precondition assert isinstance(_lowercase , _lowercase ) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def UpperCamelCase__ ( _lowercase : str , _lowercase : Union[str, Any] ) -> Any: assert ( isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase ) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." __UpperCAmelCase: Optional[int] = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' __UpperCAmelCase: Dict = prime_factorization(_lowercase ) __UpperCAmelCase: Tuple = prime_factorization(_lowercase ) elif numbera == 1 or numbera == 1: __UpperCAmelCase: int = [] __UpperCAmelCase: Dict = [] __UpperCAmelCase: Union[str, Any] = max(_lowercase , _lowercase ) __UpperCAmelCase: Optional[int] = 0 __UpperCAmelCase: List[Any] = 0 __UpperCAmelCase: str = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: __UpperCAmelCase: Any = prime_fac_a.count(_lowercase ) __UpperCAmelCase: List[str] = prime_fac_a.count(_lowercase ) for _ in range(max(_lowercase , _lowercase ) ): ans *= n else: __UpperCAmelCase: List[str] = prime_fac_a.count(_lowercase ) for _ in range(_lowercase ): ans *= n done.append(_lowercase ) # iterates through primeFac2 for n in prime_fac_a: if n not in done: __UpperCAmelCase: Optional[int] = prime_fac_a.count(_lowercase ) for _ in range(_lowercase ): ans *= n done.append(_lowercase ) # precondition assert isinstance(_lowercase , _lowercase ) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def UpperCamelCase__ ( _lowercase : Any ) -> Dict: assert isinstance(_lowercase , _lowercase ) and (n >= 0), "'number' must been a positive int" __UpperCAmelCase: List[Any] = 0 __UpperCAmelCase: Any = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(_lowercase ): ans += 1 # precondition assert isinstance(_lowercase , _lowercase ) and is_prime( _lowercase ), "'ans' must been a prime number and from type int" return ans def UpperCamelCase__ ( _lowercase : Dict , _lowercase : List[str] ) -> List[str]: assert ( is_prime(_lowercase ) and is_prime(_lowercase ) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" __UpperCAmelCase: Optional[int] = p_number_a + 1 # jump to the next number __UpperCAmelCase: str = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(_lowercase ): number += 1 while number < p_number_a: ans.append(_lowercase ) number += 1 # fetch the next prime number. while not is_prime(_lowercase ): number += 1 # precondition assert ( isinstance(_lowercase , _lowercase ) and ans[0] != p_number_a and ans[len(_lowercase ) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def UpperCamelCase__ ( _lowercase : Any ) -> List[Any]: assert isinstance(_lowercase , _lowercase ) and (n >= 1), "'n' must been int and >= 1" __UpperCAmelCase: Any = [] # will be returned. for divisor in range(1 , n + 1 ): if n % divisor == 0: ans.append(_lowercase ) # precondition assert ans[0] == 1 and ans[len(_lowercase ) - 1] == n, "Error in function getDivisiors(...)" return ans def UpperCamelCase__ ( _lowercase : int ) -> List[str]: assert isinstance(_lowercase , _lowercase ) and ( number > 1 ), "'number' must been an int and >= 1" __UpperCAmelCase: Dict = get_divisors(_lowercase ) # precondition assert ( isinstance(_lowercase , _lowercase ) and (divisors[0] == 1) and (divisors[len(_lowercase ) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1] ) == number def UpperCamelCase__ ( _lowercase : int , _lowercase : List[str] ) -> Optional[Any]: assert ( isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase ) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. __UpperCAmelCase: Dict = gcd(abs(_lowercase ) , abs(_lowercase ) ) # precondition assert ( isinstance(_lowercase , _lowercase ) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def UpperCamelCase__ ( _lowercase : List[Any] ) -> int: assert isinstance(_lowercase , _lowercase ) and (n >= 0), "'n' must been a int and >= 0" __UpperCAmelCase: Dict = 1 # this will be return. for factor in range(1 , n + 1 ): ans *= factor return ans def UpperCamelCase__ ( _lowercase : int ) -> List[Any]: assert isinstance(_lowercase , _lowercase ) and (n >= 0), "'n' must been an int and >= 0" __UpperCAmelCase: Dict = 0 __UpperCAmelCase: int = 1 __UpperCAmelCase: Dict = 1 # this will be return for _ in range(n - 1 ): __UpperCAmelCase: Tuple = ans ans += fiba __UpperCAmelCase: Tuple = tmp return ans
701
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_ = { 'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json', } class a ( __lowerCAmelCase ): """simple docstring""" __lowerCAmelCase = """timesformer""" def __init__( self , snake_case_=224 , snake_case_=16 , snake_case_=3 , snake_case_=8 , snake_case_=768 , snake_case_=12 , snake_case_=12 , snake_case_=3072 , snake_case_="gelu" , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.0_2 , snake_case_=1e-6 , snake_case_=True , snake_case_="divided_space_time" , snake_case_=0 , **snake_case_ , ): '''simple docstring''' super().__init__(**snake_case_ ) __UpperCAmelCase: Tuple = image_size __UpperCAmelCase: List[Any] = patch_size __UpperCAmelCase: Optional[Any] = num_channels __UpperCAmelCase: int = num_frames __UpperCAmelCase: List[str] = hidden_size __UpperCAmelCase: List[str] = num_hidden_layers __UpperCAmelCase: Dict = num_attention_heads __UpperCAmelCase: List[str] = intermediate_size __UpperCAmelCase: Union[str, Any] = hidden_act __UpperCAmelCase: Dict = hidden_dropout_prob __UpperCAmelCase: Optional[Any] = attention_probs_dropout_prob __UpperCAmelCase: Union[str, Any] = initializer_range __UpperCAmelCase: List[Any] = layer_norm_eps __UpperCAmelCase: int = qkv_bias __UpperCAmelCase: str = attention_type __UpperCAmelCase: Tuple = drop_path_rate
466
0
'''simple docstring''' import argparse import collections import os import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_table.py UpperCamelCase__ = '''src/transformers''' UpperCamelCase__ = '''docs/source/en''' UpperCamelCase__ = '''.''' def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str: with open(lowerCAmelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: UpperCAmelCase__ : Optional[Any] = f.readlines() # Find the start prompt. UpperCAmelCase__ : Union[str, Any] = 0 while not lines[start_index].startswith(lowerCAmelCase__ ): start_index += 1 start_index += 1 UpperCAmelCase__ : str = start_index while not lines[end_index].startswith(lowerCAmelCase__ ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # Add here suffixes that are used to identify models, separated by | UpperCamelCase__ = '''Model|Encoder|Decoder|ForConditionalGeneration''' # Regexes that match TF/Flax/PT model names. UpperCamelCase__ = re.compile(R'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''') UpperCamelCase__ = re.compile(R'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''') # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. UpperCamelCase__ = re.compile(R'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''') # This is to make sure the transformers module imported is the one in the repo. UpperCamelCase__ = direct_transformers_import(TRANSFORMERS_PATH) def a__ ( lowerCAmelCase__ ) -> Tuple: UpperCAmelCase__ : Optional[int] = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , lowerCAmelCase__ ) return [m.group(0 ) for m in matches] def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]: UpperCAmelCase__ : int = 2 if text == '''✅''' or text == '''❌''' else len(lowerCAmelCase__ ) UpperCAmelCase__ : Optional[int] = (width - text_length) // 2 UpperCAmelCase__ : List[Any] = width - text_length - left_indent return " " * left_indent + text + " " * right_indent def a__ ( ) -> Dict: UpperCAmelCase__ : Optional[Any] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES UpperCAmelCase__ : int = { name: config_maping_names[code] for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if code in config_maping_names } UpperCAmelCase__ : Union[str, Any] = {name: config.replace('''Config''' , '''''' ) for name, config in model_name_to_config.items()} # Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax. UpperCAmelCase__ : str = collections.defaultdict(lowerCAmelCase__ ) UpperCAmelCase__ : Tuple = collections.defaultdict(lowerCAmelCase__ ) UpperCAmelCase__ : Dict = collections.defaultdict(lowerCAmelCase__ ) UpperCAmelCase__ : Any = collections.defaultdict(lowerCAmelCase__ ) UpperCAmelCase__ : int = collections.defaultdict(lowerCAmelCase__ ) # Let's lookup through all transformers object (once). for attr_name in dir(lowerCAmelCase__ ): UpperCAmelCase__ : Optional[int] = None if attr_name.endswith('''Tokenizer''' ): UpperCAmelCase__ : List[Any] = slow_tokenizers UpperCAmelCase__ : Tuple = attr_name[:-9] elif attr_name.endswith('''TokenizerFast''' ): UpperCAmelCase__ : Dict = fast_tokenizers UpperCAmelCase__ : Tuple = attr_name[:-13] elif _re_tf_models.match(lowerCAmelCase__ ) is not None: UpperCAmelCase__ : List[Any] = tf_models UpperCAmelCase__ : Tuple = _re_tf_models.match(lowerCAmelCase__ ).groups()[0] elif _re_flax_models.match(lowerCAmelCase__ ) is not None: UpperCAmelCase__ : Optional[Any] = flax_models UpperCAmelCase__ : str = _re_flax_models.match(lowerCAmelCase__ ).groups()[0] elif _re_pt_models.match(lowerCAmelCase__ ) is not None: UpperCAmelCase__ : int = pt_models UpperCAmelCase__ : List[str] = _re_pt_models.match(lowerCAmelCase__ ).groups()[0] if lookup_dict is not None: while len(lowerCAmelCase__ ) > 0: if attr_name in model_name_to_prefix.values(): UpperCAmelCase__ : Tuple = True break # Try again after removing the last word in the name UpperCAmelCase__ : List[Any] = ''''''.join(camel_case_split(lowerCAmelCase__ )[:-1] ) # Let's build that table! UpperCAmelCase__ : List[Any] = list(model_name_to_config.keys() ) model_names.sort(key=str.lower ) UpperCAmelCase__ : str = ['''Model''', '''Tokenizer slow''', '''Tokenizer fast''', '''PyTorch support''', '''TensorFlow support''', '''Flax Support'''] # We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side). UpperCAmelCase__ : Tuple = [len(lowerCAmelCase__ ) + 2 for c in columns] UpperCAmelCase__ : Any = max([len(lowerCAmelCase__ ) for name in model_names] ) + 2 # Build the table per se UpperCAmelCase__ : Union[str, Any] = '''|''' + '''|'''.join([_center_text(lowerCAmelCase__ , lowerCAmelCase__ ) for c, w in zip(lowerCAmelCase__ , lowerCAmelCase__ )] ) + '''|\n''' # Use ":-----:" format to center-aligned table cell texts table += "|" + "|".join([''':''' + '''-''' * (w - 2) + ''':''' for w in widths] ) + "|\n" UpperCAmelCase__ : Dict = {True: '''✅''', False: '''❌'''} for name in model_names: UpperCAmelCase__ : Tuple = model_name_to_prefix[name] UpperCAmelCase__ : int = [ name, check[slow_tokenizers[prefix]], check[fast_tokenizers[prefix]], check[pt_models[prefix]], check[tf_models[prefix]], check[flax_models[prefix]], ] table += "|" + "|".join([_center_text(lowerCAmelCase__ , lowerCAmelCase__ ) for l, w in zip(lowerCAmelCase__ , lowerCAmelCase__ )] ) + "|\n" return table def a__ ( lowerCAmelCase__=False ) -> Union[str, Any]: UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = _find_text_in_file( filename=os.path.join(lowerCAmelCase__ , '''index.md''' ) , start_prompt='''<!--This table is updated automatically from the auto modules''' , end_prompt='''<!-- End table-->''' , ) UpperCAmelCase__ : int = get_model_table_from_auto_modules() if current_table != new_table: if overwrite: with open(os.path.join(lowerCAmelCase__ , '''index.md''' ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(lines[:start_index] + [new_table] + lines[end_index:] ) else: raise ValueError( '''The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.''' ) if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') UpperCamelCase__ = parser.parse_args() check_model_table(args.fix_and_overwrite)
75
'''simple docstring''' import os import re import warnings from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_ta import TaTokenizer else: lowerCAmelCase__ = None lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} lowerCAmelCase__ = { 'vocab_file': { 't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model', 't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model', 't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model', }, 'tokenizer_file': { 't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json', 't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json', 't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json', }, } # TODO(PVP) - this should be removed in Transformers v5 lowerCAmelCase__ = { 't5-small': 512, 't5-base': 512, 't5-large': 512, 't5-3b': 512, 't5-11b': 512, } class __lowercase (__lowerCamelCase ): _lowerCamelCase = VOCAB_FILES_NAMES _lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase = ['''input_ids''', '''attention_mask'''] _lowerCamelCase = TaTokenizer _lowerCamelCase = [] def __init__( self : Optional[int] , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : List[Any]="</s>" , UpperCAmelCase_ : str="<unk>" , UpperCAmelCase_ : List[Any]="<pad>" , UpperCAmelCase_ : Union[str, Any]=100 , UpperCAmelCase_ : Tuple=None , **UpperCAmelCase_ : List[Any] , ): # Add extra_ids to the special token list if extra_ids > 0 and additional_special_tokens is None: UpperCamelCase__ : Any = [F'<extra_id_{i}>' for i in range(UpperCAmelCase_)] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra special tokens UpperCamelCase__ : Union[str, Any] = len(set(filter(lambda UpperCAmelCase_: bool('extra_id_' in str(UpperCAmelCase_)) , UpperCAmelCase_))) if extra_tokens != extra_ids: raise ValueError( F'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are' ' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids' ' tokens') super().__init__( UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , extra_ids=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , **UpperCAmelCase_ , ) UpperCamelCase__ : Union[str, Any] = vocab_file UpperCamelCase__ : Optional[Any] = False if not self.vocab_file else True UpperCamelCase__ : Tuple = extra_ids @staticmethod def __UpperCamelCase ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict): if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes: UpperCamelCase__ : Union[str, Any] = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( 'This tokenizer was incorrectly instantiated with a model max length of' F' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this' ' behavior is kept to avoid breaking backwards compatibility when padding/encoding with' ' `truncation is True`.\n- Be aware that you SHOULD NOT rely on' F' {pretrained_model_name_or_path} automatically truncating your input to' F' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences' F' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with' ' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please' ' instantiate this tokenizer with `model_max_length` set to your preferred value.' , UpperCAmelCase_ , ) return max_model_length def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None): if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.') if not os.path.isdir(UpperCAmelCase_): logger.error(F'Vocabulary path ({save_directory}) should be a directory') return UpperCamelCase__ : Optional[Any] = os.path.join( UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCAmelCase_): copyfile(self.vocab_file , UpperCAmelCase_) logger.info(F'Copy vocab file to {out_vocab_file}') return (out_vocab_file,) def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None): UpperCamelCase__ : Optional[int] = token_ids_a + [self.eos_token_id] if token_ids_a is None: return self.prefix_tokens + token_ids_a else: UpperCamelCase__ : Any = token_ids_a + [self.eos_token_id] return self.prefix_tokens + token_ids_a + token_ids_a def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None): UpperCamelCase__ : List[Any] = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos) * [0] return len(token_ids_a + eos + token_ids_a + eos) * [0] def __UpperCamelCase ( self : Tuple): return list( set(filter(lambda UpperCAmelCase_: bool(re.search(R'<extra_id_\d+>' , UpperCAmelCase_)) is not None , self.additional_special_tokens))) def __UpperCamelCase ( self : Dict): return [self.convert_tokens_to_ids(UpperCAmelCase_) for token in self.get_sentinel_tokens()]
596
0
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def lowerCamelCase(self ): A_ : int = tempfile.mkdtemp() # fmt: off A_ : Union[str, Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest"""] # fmt: on A_ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) A_ : str = { """do_resize""": True, """size""": {"""height""": 18, """width""": 18}, """do_normalize""": True, """image_mean""": [0.5, 0.5, 0.5], """image_std""": [0.5, 0.5, 0.5], } A_ : str = os.path.join(self.tmpdirname , lowerCAmelCase_ ) with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp: json.dump(lowerCAmelCase_ , lowerCAmelCase_ ) def lowerCamelCase(self , **lowerCAmelCase_ ): return BertTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ) def lowerCamelCase(self , **lowerCAmelCase_ ): return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ) def lowerCamelCase(self ): shutil.rmtree(self.tmpdirname ) def lowerCamelCase(self ): A_ : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] A_ : Dict = [Image.fromarray(np.moveaxis(lowerCAmelCase_ , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowerCamelCase(self ): A_ : int = self.get_tokenizer() A_ : Optional[Any] = self.get_image_processor() A_ : Optional[int] = VisionTextDualEncoderProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ ) processor.save_pretrained(self.tmpdirname ) A_ : Union[str, Any] = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , lowerCAmelCase_ ) def lowerCamelCase(self ): A_ : Dict = VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) A_ : Optional[Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) A_ : Tuple = self.get_image_processor(do_normalize=lowerCAmelCase_ , padding_value=1.0 ) A_ : int = VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowerCAmelCase_ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , lowerCAmelCase_ ) def lowerCamelCase(self ): A_ : Optional[int] = self.get_image_processor() A_ : Optional[Any] = self.get_tokenizer() A_ : str = VisionTextDualEncoderProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ ) A_ : str = self.prepare_image_inputs() A_ : Union[str, Any] = image_processor(lowerCAmelCase_ , return_tensors="""np""" ) A_ : List[str] = processor(images=lowerCAmelCase_ , return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def lowerCamelCase(self ): A_ : Optional[Any] = self.get_image_processor() A_ : str = self.get_tokenizer() A_ : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ ) A_ : Optional[int] = """lower newer""" A_ : str = processor(text=lowerCAmelCase_ ) A_ : Optional[int] = tokenizer(lowerCAmelCase_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowerCamelCase(self ): A_ : int = self.get_image_processor() A_ : int = self.get_tokenizer() A_ : int = VisionTextDualEncoderProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ ) A_ : Any = """lower newer""" A_ : Optional[int] = self.prepare_image_inputs() A_ : Dict = processor(text=lowerCAmelCase_ , images=lowerCAmelCase_ ) self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] ) # test if it raises when no input is passed with self.assertRaises(lowerCAmelCase_ ): processor() def lowerCamelCase(self ): A_ : Dict = self.get_image_processor() A_ : Optional[Any] = self.get_tokenizer() A_ : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ ) A_ : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] A_ : Dict = processor.batch_decode(lowerCAmelCase_ ) A_ : Any = tokenizer.batch_decode(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) def lowerCamelCase(self ): A_ : int = self.get_image_processor() A_ : Union[str, Any] = self.get_tokenizer() A_ : Optional[int] = VisionTextDualEncoderProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ ) A_ : Union[str, Any] = """lower newer""" A_ : Dict = self.prepare_image_inputs() A_ : Tuple = processor(text=lowerCAmelCase_ , images=lowerCAmelCase_ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
480
"""simple docstring""" import unittest from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class SCREAMING_SNAKE_CASE : """simple docstring""" @staticmethod def lowerCamelCase(*lowerCAmelCase_ , **lowerCAmelCase_ ): pass @is_pipeline_test @require_torch @require_vision class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" _A : List[Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): A_ : Union[str, Any] = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" ) A_ : Union[str, Any] = [ { """image""": Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ), """question""": """How many cats are there?""", }, { """image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""", """question""": """How many cats are there?""", }, ] return vqa_pipeline, examples def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_ ): A_ : str = vqa_pipeline(lowerCAmelCase_ , top_k=1 ) self.assertEqual( lowerCAmelCase_ , [ [{"""score""": ANY(lowerCAmelCase_ ), """answer""": ANY(lowerCAmelCase_ )}], [{"""score""": ANY(lowerCAmelCase_ ), """answer""": ANY(lowerCAmelCase_ )}], ] , ) @require_torch def lowerCamelCase(self ): A_ : Tuple = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" ) A_ : Any = """./tests/fixtures/tests_samples/COCO/000000039769.png""" A_ : Any = """How many cats are there?""" A_ : Optional[int] = vqa_pipeline(image=lowerCAmelCase_ , question="""How many cats are there?""" , top_k=2 ) self.assertEqual( lowerCAmelCase_ , [{"""score""": ANY(lowerCAmelCase_ ), """answer""": ANY(lowerCAmelCase_ )}, {"""score""": ANY(lowerCAmelCase_ ), """answer""": ANY(lowerCAmelCase_ )}] ) A_ : Any = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 ) self.assertEqual( lowerCAmelCase_ , [{"""score""": ANY(lowerCAmelCase_ ), """answer""": ANY(lowerCAmelCase_ )}, {"""score""": ANY(lowerCAmelCase_ ), """answer""": ANY(lowerCAmelCase_ )}] ) @slow @require_torch def lowerCamelCase(self ): A_ : int = pipeline("""visual-question-answering""" , model="""dandelin/vilt-b32-finetuned-vqa""" ) A_ : Tuple = """./tests/fixtures/tests_samples/COCO/000000039769.png""" A_ : int = """How many cats are there?""" A_ : List[Any] = vqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2 ) self.assertEqual( nested_simplify(lowerCAmelCase_ , decimals=4 ) , [{"""score""": 0.8799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}] ) A_ : Any = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 ) self.assertEqual( nested_simplify(lowerCAmelCase_ , decimals=4 ) , [{"""score""": 0.8799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}] ) A_ : Tuple = vqa_pipeline( [{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 ) self.assertEqual( nested_simplify(lowerCAmelCase_ , decimals=4 ) , [[{"""score""": 0.8799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}]] * 2 , ) @require_tf @unittest.skip("""Visual question answering not implemented in TF""" ) def lowerCamelCase(self ): pass
480
1
import argparse from pathlib import Path import torch from packaging import version from torch.onnx import export from diffusers import AutoencoderKL UpperCAmelCase_ = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11") def A__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : tuple , SCREAMING_SNAKE_CASE_ : Path , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int=False , ) -> Union[str, Any]: """simple docstring""" output_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE_ , output_names=SCREAMING_SNAKE_CASE_ , dynamic_axes=SCREAMING_SNAKE_CASE_ , do_constant_folding=SCREAMING_SNAKE_CASE_ , use_external_data_format=SCREAMING_SNAKE_CASE_ , enable_onnx_checker=SCREAMING_SNAKE_CASE_ , opset_version=SCREAMING_SNAKE_CASE_ , ) else: export( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE_ , output_names=SCREAMING_SNAKE_CASE_ , dynamic_axes=SCREAMING_SNAKE_CASE_ , do_constant_folding=SCREAMING_SNAKE_CASE_ , opset_version=SCREAMING_SNAKE_CASE_ , ) @torch.no_grad() def A__ ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : bool = False ) -> Any: """simple docstring""" _UpperCAmelCase = torch.floataa if fpaa else torch.floataa if fpaa and torch.cuda.is_available(): _UpperCAmelCase = '''cuda''' elif fpaa and not torch.cuda.is_available(): raise ValueError('''`float16` model export is only supported on GPUs with CUDA''' ) else: _UpperCAmelCase = '''cpu''' _UpperCAmelCase = Path(SCREAMING_SNAKE_CASE_ ) # VAE DECODER _UpperCAmelCase = AutoencoderKL.from_pretrained(model_path + '''/vae''' ) _UpperCAmelCase = vae_decoder.config.latent_channels # forward only through the decoder part _UpperCAmelCase = vae_decoder.decode onnx_export( SCREAMING_SNAKE_CASE_ , model_args=( torch.randn(1 , SCREAMING_SNAKE_CASE_ , 25 , 25 ).to(device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ ), False, ) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={ '''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''}, } , opset=SCREAMING_SNAKE_CASE_ , ) del vae_decoder if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument( "--model_path", type=str, required=True, help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).", ) parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.") parser.add_argument( "--opset", default=14, type=int, help="The version of the ONNX operator set to use.", ) parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode") UpperCAmelCase_ = parser.parse_args() print(args.output_path) convert_models(args.model_path, args.output_path, args.opset, args.fpaa) print("SD: Done: ONNX")
32
import torch from diffusers import CMStochasticIterativeScheduler from .test_schedulers import SchedulerCommonTest class _SCREAMING_SNAKE_CASE ( snake_case ): lowerCamelCase_ = (CMStochasticIterativeScheduler,) lowerCamelCase_ = 1_0 def _UpperCAmelCase ( self : Any , **snake_case_ : Tuple ): """simple docstring""" A : str = { '''num_train_timesteps''': 201, '''sigma_min''': 0.0_02, '''sigma_max''': 80.0, } config.update(**snake_case_ ) return config def _UpperCAmelCase ( self : Union[str, Any] ): """simple docstring""" A : List[str] = 10 A : Dict = self.get_scheduler_config() A : Optional[int] = self.scheduler_classes[0](**snake_case_ ) scheduler.set_timesteps(snake_case_ ) A : List[str] = scheduler.timesteps[0] A : Any = scheduler.timesteps[1] A : int = self.dummy_sample A : str = 0.1 * sample A : Tuple = scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample A : Tuple = scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def _UpperCAmelCase ( self : Dict ): """simple docstring""" for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=snake_case_ ) def _UpperCAmelCase ( self : Dict ): """simple docstring""" for clip_denoised in [True, False]: self.check_over_configs(clip_denoised=snake_case_ ) def _UpperCAmelCase ( self : Optional[Any] ): """simple docstring""" A : List[Any] = self.scheduler_classes[0] A : Union[str, Any] = self.get_scheduler_config() A : List[str] = scheduler_class(**snake_case_ ) A : str = 1 scheduler.set_timesteps(snake_case_ ) A : Optional[int] = scheduler.timesteps A : int = torch.manual_seed(0 ) A : Optional[Any] = self.dummy_model() A : int = self.dummy_sample_deter * scheduler.init_noise_sigma for i, t in enumerate(snake_case_ ): # 1. scale model input A : Dict = scheduler.scale_model_input(snake_case_ , snake_case_ ) # 2. predict noise residual A : List[Any] = model(snake_case_ , snake_case_ ) # 3. predict previous sample x_t-1 A : Union[str, Any] = scheduler.step(snake_case_ , snake_case_ , snake_case_ , generator=snake_case_ ).prev_sample A : Union[str, Any] = pred_prev_sample A : List[str] = torch.sum(torch.abs(snake_case_ ) ) A : int = torch.mean(torch.abs(snake_case_ ) ) assert abs(result_sum.item() - 1_92.76_14 ) < 1E-2 assert abs(result_mean.item() - 0.25_10 ) < 1E-3 def _UpperCAmelCase ( self : Union[str, Any] ): """simple docstring""" A : Tuple = self.scheduler_classes[0] A : Tuple = self.get_scheduler_config() A : str = scheduler_class(**snake_case_ ) A : Optional[int] = [106, 0] scheduler.set_timesteps(timesteps=snake_case_ ) A : Optional[int] = scheduler.timesteps A : Any = torch.manual_seed(0 ) A : Tuple = self.dummy_model() A : str = self.dummy_sample_deter * scheduler.init_noise_sigma for t in timesteps: # 1. scale model input A : Tuple = scheduler.scale_model_input(snake_case_ , snake_case_ ) # 2. predict noise residual A : str = model(snake_case_ , snake_case_ ) # 3. predict previous sample x_t-1 A : Any = scheduler.step(snake_case_ , snake_case_ , snake_case_ , generator=snake_case_ ).prev_sample A : str = pred_prev_sample A : str = torch.sum(torch.abs(snake_case_ ) ) A : Union[str, Any] = torch.mean(torch.abs(snake_case_ ) ) assert abs(result_sum.item() - 3_47.63_57 ) < 1E-2 assert abs(result_mean.item() - 0.45_27 ) < 1E-3 def _UpperCAmelCase ( self : int ): """simple docstring""" A : Optional[int] = self.scheduler_classes[0] A : Optional[int] = self.get_scheduler_config() A : Any = scheduler_class(**snake_case_ ) A : Union[str, Any] = [39, 30, 12, 15, 0] with self.assertRaises(snake_case_ , msg='''`timesteps` must be in descending order.''' ): scheduler.set_timesteps(timesteps=snake_case_ ) def _UpperCAmelCase ( self : Optional[int] ): """simple docstring""" A : List[str] = self.scheduler_classes[0] A : Dict = self.get_scheduler_config() A : Tuple = scheduler_class(**snake_case_ ) A : Any = [39, 30, 12, 1, 0] A : List[Any] = len(snake_case_ ) with self.assertRaises(snake_case_ , msg='''Can only pass one of `num_inference_steps` or `timesteps`.''' ): scheduler.set_timesteps(num_inference_steps=snake_case_ , timesteps=snake_case_ ) def _UpperCAmelCase ( self : Dict ): """simple docstring""" A : List[Any] = self.scheduler_classes[0] A : str = self.get_scheduler_config() A : List[Any] = scheduler_class(**snake_case_ ) A : Dict = [scheduler.config.num_train_timesteps] with self.assertRaises( snake_case_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ): scheduler.set_timesteps(timesteps=snake_case_ )
256
0
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging A =logging.get_logger(__name__) class _a ( __a ): __a : Dict = """encoder-decoder""" __a : Optional[int] = True def __init__( self : Optional[int] , **lowercase : List[str] ): '''simple docstring''' super().__init__(**lowercase ) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" UpperCAmelCase = kwargs.pop('''encoder''' ) UpperCAmelCase = encoder_config.pop('''model_type''' ) UpperCAmelCase = kwargs.pop('''decoder''' ) UpperCAmelCase = decoder_config.pop('''model_type''' ) from ..auto.configuration_auto import AutoConfig UpperCAmelCase = AutoConfig.for_model(lowercase , **lowercase ) UpperCAmelCase = AutoConfig.for_model(lowercase , **lowercase ) UpperCAmelCase = True @classmethod def A ( cls : Optional[int] , lowercase : PretrainedConfig , lowercase : PretrainedConfig , **lowercase : Tuple ): '''simple docstring''' logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' ) UpperCAmelCase = True UpperCAmelCase = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **lowercase ) def A ( self : str ): '''simple docstring''' UpperCAmelCase = copy.deepcopy(self.__dict__ ) UpperCAmelCase = self.encoder.to_dict() UpperCAmelCase = self.decoder.to_dict() UpperCAmelCase = self.__class__.model_type return output
707
'''simple docstring''' from __future__ import annotations import requests def snake_case_ (_a : str ): UpperCAmelCase = F"https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty" return requests.get(_a ).json() def snake_case_ (_a : int = 1_0 ): UpperCAmelCase = '''https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty''' UpperCAmelCase = requests.get(_a ).json()[:max_stories] return [get_hackernews_story(_a ) for story_id in story_ids] def snake_case_ (_a : int = 1_0 ): UpperCAmelCase = hackernews_top_stories(_a ) return "\n".join('''* [{title}]({url})'''.format(**_a ) for story in stories ) if __name__ == "__main__": print(hackernews_top_stories_as_markdown())
358
0
"""simple docstring""" import qiskit def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Optional[Any] = 2 ): """simple docstring""" snake_case : List[Any] = qubits # Using Aer's simulator snake_case : Any = qiskit.Aer.get_backend("aer_simulator" ) # Creating a Quantum Circuit acting on the q register snake_case : Tuple = qiskit.QuantumCircuit(lowerCAmelCase__ , lowerCAmelCase__ ) # Adding a H gate on qubit 0 (now q0 in superposition) circuit.h(0 ) for i in range(1 , lowerCAmelCase__ ): # Adding CX (CNOT) gate circuit.cx(i - 1 , lowerCAmelCase__ ) # Mapping the quantum measurement to the classical bits circuit.measure(list(range(lowerCAmelCase__ ) ) , list(range(lowerCAmelCase__ ) ) ) # Now measuring any one qubit would affect other qubits to collapse # their super position and have same state as the measured one. # Executing the circuit on the simulator snake_case : str = qiskit.execute(lowerCAmelCase__ , lowerCAmelCase__ , shots=1_0_0_0 ) return job.result().get_counts(lowerCAmelCase__ ) if __name__ == "__main__": print(F'''Total count for various states are: {quantum_entanglement(3)}''')
449
import hashlib import unittest from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available from transformers.pipelines import DepthEstimationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class __A : '''simple docstring''' @staticmethod def SCREAMING_SNAKE_CASE__ ( *_snake_case , **_snake_case ): pass def UpperCamelCase_ ( lowerCAmelCase__ ): """simple docstring""" _lowerCAmelCase : List[str] = hashlib.mda(image.tobytes() ) return m.hexdigest() @is_pipeline_test @require_vision @require_timm @require_torch class __A ( unittest.TestCase ): '''simple docstring''' a_ = MODEL_FOR_DEPTH_ESTIMATION_MAPPING def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , _snake_case ): _lowerCAmelCase : int = DepthEstimationPipeline(model=_snake_case , image_processor=_snake_case ) return depth_estimator, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case ): _lowerCAmelCase : List[Any] = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" ) self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )} , _snake_case ) import datasets _lowerCAmelCase : Union[str, Any] = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" ) _lowerCAmelCase : int = depth_estimator( [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ), "http://images.cocodataset.org/val2017/000000039769.jpg", # RGBA dataset[0]["file"], # LA dataset[1]["file"], # L dataset[2]["file"], ] ) self.assertEqual( [ {"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )}, {"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )}, {"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )}, {"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )}, {"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )}, ] , _snake_case , ) @require_tf @unittest.skip("Depth estimation is not implemented in TF" ) def SCREAMING_SNAKE_CASE__ ( self ): pass @slow @require_torch def SCREAMING_SNAKE_CASE__ ( self ): _lowerCAmelCase : str = "Intel/dpt-large" _lowerCAmelCase : int = pipeline("depth-estimation" , model=_snake_case ) _lowerCAmelCase : Any = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" ) _lowerCAmelCase : int = hashimage(outputs["depth"] ) # This seems flaky. # self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977") self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ) , 29.304 ) self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ) , 2.662 ) @require_torch def SCREAMING_SNAKE_CASE__ ( self ): # This is highly irregular to have no small tests. self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" )
424
0
'''simple docstring''' from ...utils import is_note_seq_available, is_transformers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .notes_encoder import SpectrogramNotesEncoder from .continous_encoder import SpectrogramContEncoder from .pipeline_spectrogram_diffusion import ( SpectrogramContEncoder, SpectrogramDiffusionPipeline, TaFilmDecoder, ) try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .midi_utils import MidiProcessor
39
'''simple docstring''' import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def __lowerCAmelCase ( lowerCamelCase : str , lowerCamelCase : Union[str, Any] ): '''simple docstring''' assert isinstance(lowerCamelCase , lowerCamelCase ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def __lowerCAmelCase ( lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : Optional[int] ): '''simple docstring''' __lowerCAmelCase = tmp_path / "cache" __lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __lowerCAmelCase = JsonDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase , keep_in_memory=lowerCamelCase ).read() _check_json_dataset(lowerCamelCase , lowerCamelCase ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def __lowerCAmelCase ( lowerCamelCase : List[Any] , lowerCamelCase : int , lowerCamelCase : Dict ): '''simple docstring''' __lowerCAmelCase = tmp_path / "cache" __lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"} __lowerCAmelCase = features.copy() if features else default_expected_features __lowerCAmelCase = ( Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) __lowerCAmelCase = JsonDatasetReader(lowerCamelCase , features=lowerCamelCase , cache_dir=lowerCamelCase ).read() _check_json_dataset(lowerCamelCase , lowerCamelCase ) @pytest.mark.parametrize( "features" , [ None, {"col_3": "float64", "col_1": "string", "col_2": "int64"}, ] , ) def __lowerCAmelCase ( lowerCamelCase : List[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Any ): '''simple docstring''' __lowerCAmelCase = tmp_path / "cache" __lowerCAmelCase = {"col_3": "float64", "col_1": "string", "col_2": "int64"} __lowerCAmelCase = features.copy() if features else default_expected_features __lowerCAmelCase = ( Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) __lowerCAmelCase = JsonDatasetReader(lowerCamelCase , features=lowerCamelCase , cache_dir=lowerCamelCase ).read() assert isinstance(lowerCamelCase , lowerCamelCase ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def __lowerCAmelCase ( lowerCamelCase : Any , lowerCamelCase : Optional[int] ): '''simple docstring''' __lowerCAmelCase = {"col_2": "int64", "col_3": "float64", "col_1": "string"} __lowerCAmelCase = features.copy() __lowerCAmelCase = ( Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) __lowerCAmelCase = tmp_path / "cache" __lowerCAmelCase = JsonDatasetReader(lowerCamelCase , features=lowerCamelCase , cache_dir=lowerCamelCase ).read() assert isinstance(lowerCamelCase , lowerCamelCase ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def __lowerCAmelCase ( lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] ): '''simple docstring''' __lowerCAmelCase = tmp_path / "cache" __lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"} __lowerCAmelCase = JsonDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase , split=lowerCamelCase ).read() _check_json_dataset(lowerCamelCase , lowerCamelCase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type" , [str, list] ) def __lowerCAmelCase ( lowerCamelCase : Any , lowerCamelCase : List[str] , lowerCamelCase : Optional[int] ): '''simple docstring''' if issubclass(lowerCamelCase , lowerCamelCase ): __lowerCAmelCase = jsonl_path elif issubclass(lowerCamelCase , lowerCamelCase ): __lowerCAmelCase = [jsonl_path] __lowerCAmelCase = tmp_path / "cache" __lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"} __lowerCAmelCase = JsonDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase ).read() _check_json_dataset(lowerCamelCase , lowerCamelCase ) def __lowerCAmelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : int , lowerCamelCase : str=("train",) ): '''simple docstring''' assert isinstance(lowerCamelCase , lowerCamelCase ) for split in splits: __lowerCAmelCase = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def __lowerCAmelCase ( lowerCamelCase : Dict , lowerCamelCase : int , lowerCamelCase : List[str] ): '''simple docstring''' __lowerCAmelCase = tmp_path / "cache" __lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __lowerCAmelCase = JsonDatasetReader({"train": jsonl_path} , cache_dir=lowerCamelCase , keep_in_memory=lowerCamelCase ).read() _check_json_datasetdict(lowerCamelCase , lowerCamelCase ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : Dict ): '''simple docstring''' __lowerCAmelCase = tmp_path / "cache" __lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"} __lowerCAmelCase = features.copy() if features else default_expected_features __lowerCAmelCase = ( Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) __lowerCAmelCase = JsonDatasetReader({"train": jsonl_path} , features=lowerCamelCase , cache_dir=lowerCamelCase ).read() _check_json_datasetdict(lowerCamelCase , lowerCamelCase ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : Optional[int] , lowerCamelCase : int ): '''simple docstring''' if split: __lowerCAmelCase = {split: jsonl_path} else: __lowerCAmelCase = "train" __lowerCAmelCase = {"train": jsonl_path, "test": jsonl_path} __lowerCAmelCase = tmp_path / "cache" __lowerCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"} __lowerCAmelCase = JsonDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase ).read() _check_json_datasetdict(lowerCamelCase , lowerCamelCase , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def __lowerCAmelCase ( lowerCamelCase : Optional[Any] ): '''simple docstring''' return json.load(lowerCamelCase ) def __lowerCAmelCase ( lowerCamelCase : Tuple ): '''simple docstring''' return [json.loads(lowerCamelCase ) for line in buffer] class UpperCAmelCase__ : @pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int: with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase ).write() buffer.seek(0 ) __lowerCAmelCase = load_json_function(UpperCamelCase ) assert isinstance(UpperCamelCase , UpperCamelCase ) assert isinstance(exported_content[0] , UpperCamelCase ) assert len(UpperCamelCase ) == 10 @pytest.mark.parametrize( "orient, container, keys, len_at" , [ ("records", list, {"tokens", "labels", "answers", "id"}, None), ("split", dict, {"columns", "data"}, "data"), ("index", dict, set("0123456789" ), None), ("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"), ("values", list, None, None), ("table", dict, {"schema", "data"}, "data"), ] , ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int: with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase , orient=UpperCamelCase ).write() buffer.seek(0 ) __lowerCAmelCase = load_json(UpperCamelCase ) assert isinstance(UpperCamelCase , UpperCamelCase ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(UpperCamelCase , "keys" ) and not hasattr(exported_content[0] , "keys" ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(UpperCamelCase ) == 10 @pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str: with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase , num_proc=2 ).write() buffer.seek(0 ) __lowerCAmelCase = load_json_function(UpperCamelCase ) assert isinstance(UpperCamelCase , UpperCamelCase ) assert isinstance(exported_content[0] , UpperCamelCase ) assert len(UpperCamelCase ) == 10 @pytest.mark.parametrize( "orient, container, keys, len_at" , [ ("records", list, {"tokens", "labels", "answers", "id"}, None), ("split", dict, {"columns", "data"}, "data"), ("index", dict, set("0123456789" ), None), ("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"), ("values", list, None, None), ("table", dict, {"schema", "data"}, "data"), ] , ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]: with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase , orient=UpperCamelCase , num_proc=2 ).write() buffer.seek(0 ) __lowerCAmelCase = load_json(UpperCamelCase ) assert isinstance(UpperCamelCase , UpperCamelCase ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(UpperCamelCase , "keys" ) and not hasattr(exported_content[0] , "keys" ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(UpperCamelCase ) == 10 def UpperCAmelCase_ ( self , UpperCamelCase ) -> Any: with pytest.raises(UpperCamelCase ): with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase , UpperCamelCase , num_proc=0 ) @pytest.mark.parametrize("compression, extension" , [("gzip", "gz"), ("bz2", "bz2"), ("xz", "xz")] ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Tuple: __lowerCAmelCase = tmp_path_factory.mktemp("data" ) / F'''test.json.{extension}''' __lowerCAmelCase = str(shared_datadir / F'''test_file.json.{extension}''' ) JsonDatasetWriter(UpperCamelCase , UpperCamelCase , compression=UpperCamelCase ).write() with fsspec.open(UpperCamelCase , "rb" , compression="infer" ) as f: __lowerCAmelCase = f.read() with fsspec.open(UpperCamelCase , "rb" , compression="infer" ) as f: __lowerCAmelCase = f.read() assert exported_content == original_content
39
1
import argparse import numpy as np import torch from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging logging.set_verbosity_info() lowerCamelCase__ = logging.get_logger('transformers.models.speecht5') def _lowerCamelCase( __snake_case , __snake_case , __snake_case ) -> Any: hf_model.apply_weight_norm() __snake_case = checkpoint["input_conv.weight_g"] __snake_case = checkpoint["input_conv.weight_v"] __snake_case = checkpoint["input_conv.bias"] for i in range(len(config.upsample_rates ) ): __snake_case = checkpoint[f"""upsamples.{i}.1.weight_g"""] __snake_case = checkpoint[f"""upsamples.{i}.1.weight_v"""] __snake_case = checkpoint[f"""upsamples.{i}.1.bias"""] for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ): for j in range(len(config.resblock_dilation_sizes ) ): __snake_case = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_g"""] __snake_case = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_v"""] __snake_case = checkpoint[f"""blocks.{i}.convs1.{j}.1.bias"""] __snake_case = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_g"""] __snake_case = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_v"""] __snake_case = checkpoint[f"""blocks.{i}.convs2.{j}.1.bias"""] __snake_case = checkpoint["output_conv.1.weight_g"] __snake_case = checkpoint["output_conv.1.weight_v"] __snake_case = checkpoint["output_conv.1.bias"] hf_model.remove_weight_norm() @torch.no_grad() def _lowerCamelCase( __snake_case , __snake_case , __snake_case , __snake_case=None , __snake_case=None , ) -> Union[str, Any]: if config_path is not None: __snake_case = SpeechTaHifiGanConfig.from_pretrained(a_ ) else: __snake_case = SpeechTaHifiGanConfig() __snake_case = SpeechTaHifiGan(a_ ) __snake_case = torch.load(a_ ) load_weights(orig_checkpoint["model"]["generator"] , a_ , a_ ) __snake_case = np.load(a_ ) __snake_case = stats[0].reshape(-1 ) __snake_case = stats[1].reshape(-1 ) __snake_case = torch.from_numpy(a_ ).float() __snake_case = torch.from_numpy(a_ ).float() model.save_pretrained(a_ ) if repo_id: print("Pushing to the hub..." ) model.push_to_hub(a_ ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint') parser.add_argument('--stats_path', required=True, default=None, type=str, help='Path to stats.npy file') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.' ) parser.add_argument( '--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.' ) lowerCamelCase__ = parser.parse_args() convert_hifigan_checkpoint( args.checkpoint_path, args.stats_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
524
'''simple docstring''' import pytest from datasets import inspect_metric, list_metrics, load_metric @pytest.fixture def __UpperCAmelCase ( a_: Optional[Any] ): monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings", set() ) @pytest.fixture def __UpperCAmelCase ( a_: Tuple ): class A__ : """simple docstring""" def __init__( self : List[Any] , lowerCAmelCase__ : Optional[Any] ) -> Optional[int]: """simple docstring""" _UpperCAmelCase : Optional[Any] = metric_id class A__ : """simple docstring""" UpperCamelCase_ : Dict = [MetricMock(UpperCamelCase ) for metric_id in ['''accuracy''', '''mse''', '''precision''', '''codeparrot/apps_metric''']] def _lowerCAmelCase ( self : List[str] ) -> int: """simple docstring""" return self._metrics monkeypatch.setattr("datasets.inspect.huggingface_hub", HfhMock() ) @pytest.mark.parametrize( "func, args", [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] ) def __UpperCAmelCase ( a_: str, a_: str, a_: Tuple, a_: Any, a_: List[Any] ): if "tmp_path" in args: _UpperCAmelCase : Union[str, Any] = tuple(arg if arg != "tmp_path" else tmp_path for arg in args ) with pytest.warns(a_, match="https://huggingface.co/docs/evaluate" ): func(*a_ )
494
0
"""simple docstring""" from __future__ import annotations from statistics import mean def _lowerCAmelCase ( __lowerCamelCase:Union[str, Any] , __lowerCamelCase:Tuple , __lowerCamelCase:List[Any] ): '''simple docstring''' __magic_name__ = [0] * no_of_processes __magic_name__ = [0] * no_of_processes # Initialize remaining_time to waiting_time. for i in range(_lowercase ): __magic_name__ = burst_time[i] __magic_name__ = [] __magic_name__ = 0 __magic_name__ = 0 # When processes are not completed, # A process whose arrival time has passed \ # and has remaining execution time is put into the ready_process. # The shortest process in the ready_process, target_process is executed. while completed != no_of_processes: __magic_name__ = [] __magic_name__ = -1 for i in range(_lowercase ): if (arrival_time[i] <= total_time) and (remaining_time[i] > 0): ready_process.append(_lowercase ) if len(_lowercase ) > 0: __magic_name__ = ready_process[0] for i in ready_process: if remaining_time[i] < remaining_time[target_process]: __magic_name__ = i total_time += burst_time[target_process] completed += 1 __magic_name__ = 0 __magic_name__ = ( total_time - arrival_time[target_process] - burst_time[target_process] ) else: total_time += 1 return waiting_time def _lowerCAmelCase ( __lowerCamelCase:Tuple , __lowerCamelCase:Union[str, Any] , __lowerCamelCase:Union[str, Any] ): '''simple docstring''' __magic_name__ = [0] * no_of_processes for i in range(_lowercase ): __magic_name__ = burst_time[i] + waiting_time[i] return turn_around_time if __name__ == "__main__": print('''[TEST CASE 01]''') lowercase = 4 lowercase = [2, 5, 3, 7] lowercase = [0, 0, 0, 0] lowercase = calculate_waitingtime(arrival_time, burst_time, no_of_processes) lowercase = calculate_turnaroundtime( burst_time, no_of_processes, waiting_time ) # Printing the Result print('''PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time''') for i, process_id in enumerate(list(range(1, 5))): print( f'''{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t''' f'''{waiting_time[i]}\t\t\t\t{turn_around_time[i]}''' ) print(f'''\nAverage waiting time = {mean(waiting_time):.5f}''') print(f'''Average turnaround time = {mean(turn_around_time):.5f}''')
712
"""simple docstring""" import torch from diffusers import UnCLIPScheduler from .test_schedulers import SchedulerCommonTest class A_ ( snake_case_ ): UpperCAmelCase__ = (UnCLIPScheduler,) def _snake_case ( self : Any , **__lowerCamelCase : List[Any] ) -> str: __magic_name__ = { "num_train_timesteps": 1_0_0_0, "variance_type": "fixed_small_log", "clip_sample": True, "clip_sample_range": 1.0, "prediction_type": "epsilon", } config.update(**__lowerCamelCase ) return config def _snake_case ( self : int ) -> Tuple: for timesteps in [1, 5, 1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=__lowerCamelCase ) def _snake_case ( self : int ) -> Any: for variance in ["fixed_small_log", "learned_range"]: self.check_over_configs(variance_type=__lowerCamelCase ) def _snake_case ( self : List[Any] ) -> Tuple: for clip_sample in [True, False]: self.check_over_configs(clip_sample=__lowerCamelCase ) def _snake_case ( self : Tuple ) -> Optional[int]: for clip_sample_range in [1, 5, 1_0, 2_0]: self.check_over_configs(clip_sample_range=__lowerCamelCase ) def _snake_case ( self : List[Any] ) -> Any: for prediction_type in ["epsilon", "sample"]: self.check_over_configs(prediction_type=__lowerCamelCase ) def _snake_case ( self : Union[str, Any] ) -> List[str]: for time_step in [0, 5_0_0, 9_9_9]: for prev_timestep in [None, 5, 1_0_0, 2_5_0, 5_0_0, 7_5_0]: if prev_timestep is not None and prev_timestep >= time_step: continue self.check_over_forward(time_step=__lowerCamelCase , prev_timestep=__lowerCamelCase ) def _snake_case ( self : Union[str, Any] ) -> Tuple: __magic_name__ = self.scheduler_classes[0] __magic_name__ = self.get_scheduler_config(variance_type="fixed_small_log" ) __magic_name__ = scheduler_class(**__lowerCamelCase ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.00_00e-10 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.054_9625 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.999_4987 ) ) < 1e-5 def _snake_case ( self : Union[str, Any] ) -> Optional[Any]: __magic_name__ = self.scheduler_classes[0] __magic_name__ = self.get_scheduler_config(variance_type="learned_range" ) __magic_name__ = scheduler_class(**__lowerCamelCase ) __magic_name__ = 0.5 assert scheduler._get_variance(1 , predicted_variance=__lowerCamelCase ) - -10.171_2790 < 1e-5 assert scheduler._get_variance(4_8_7 , predicted_variance=__lowerCamelCase ) - -5.799_8052 < 1e-5 assert scheduler._get_variance(9_9_9 , predicted_variance=__lowerCamelCase ) - -0.001_0011 < 1e-5 def _snake_case ( self : Optional[Any] ) -> str: __magic_name__ = self.scheduler_classes[0] __magic_name__ = self.get_scheduler_config() __magic_name__ = scheduler_class(**__lowerCamelCase ) __magic_name__ = scheduler.timesteps __magic_name__ = self.dummy_model() __magic_name__ = self.dummy_sample_deter __magic_name__ = torch.manual_seed(0 ) for i, t in enumerate(__lowerCamelCase ): # 1. predict noise residual __magic_name__ = model(__lowerCamelCase , __lowerCamelCase ) # 2. predict previous mean of sample x_t-1 __magic_name__ = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , generator=__lowerCamelCase ).prev_sample __magic_name__ = pred_prev_sample __magic_name__ = torch.sum(torch.abs(__lowerCamelCase ) ) __magic_name__ = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 252.268_2495 ) < 1e-2 assert abs(result_mean.item() - 0.328_4743 ) < 1e-3 def _snake_case ( self : int ) -> int: __magic_name__ = self.scheduler_classes[0] __magic_name__ = self.get_scheduler_config() __magic_name__ = scheduler_class(**__lowerCamelCase ) scheduler.set_timesteps(2_5 ) __magic_name__ = scheduler.timesteps __magic_name__ = self.dummy_model() __magic_name__ = self.dummy_sample_deter __magic_name__ = torch.manual_seed(0 ) for i, t in enumerate(__lowerCamelCase ): # 1. predict noise residual __magic_name__ = model(__lowerCamelCase , __lowerCamelCase ) if i + 1 == timesteps.shape[0]: __magic_name__ = None else: __magic_name__ = timesteps[i + 1] # 2. predict previous mean of sample x_t-1 __magic_name__ = scheduler.step( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , prev_timestep=__lowerCamelCase , generator=__lowerCamelCase ).prev_sample __magic_name__ = pred_prev_sample __magic_name__ = torch.sum(torch.abs(__lowerCamelCase ) ) __magic_name__ = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 258.204_4983 ) < 1e-2 assert abs(result_mean.item() - 0.336_2038 ) < 1e-3 def _snake_case ( self : List[str] ) -> Any: pass def _snake_case ( self : int ) -> Optional[int]: pass
468
0
from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : List[Any] = { """MIT/ast-finetuned-audioset-10-10-0.4593""": ( """https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json""" ), } class UpperCAmelCase_ ( __lowerCamelCase ): __lowerCamelCase = 'audio-spectrogram-transformer' def __init__( self , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0_2 , _lowerCAmelCase=1e-12 , _lowerCAmelCase=16 , _lowerCAmelCase=True , _lowerCAmelCase=10 , _lowerCAmelCase=10 , _lowerCAmelCase=1024 , _lowerCAmelCase=128 , **_lowerCAmelCase , ): super().__init__(**_lowerCAmelCase ) UpperCAmelCase__ : Optional[int] = hidden_size UpperCAmelCase__ : int = num_hidden_layers UpperCAmelCase__ : List[Any] = num_attention_heads UpperCAmelCase__ : Dict = intermediate_size UpperCAmelCase__ : Dict = hidden_act UpperCAmelCase__ : str = hidden_dropout_prob UpperCAmelCase__ : str = attention_probs_dropout_prob UpperCAmelCase__ : Tuple = initializer_range UpperCAmelCase__ : Dict = layer_norm_eps UpperCAmelCase__ : Optional[Any] = patch_size UpperCAmelCase__ : Tuple = qkv_bias UpperCAmelCase__ : Tuple = frequency_stride UpperCAmelCase__ : Union[str, Any] = time_stride UpperCAmelCase__ : Optional[Any] = max_length UpperCAmelCase__ : Optional[int] = num_mel_bins
79
import logging from dataclasses import dataclass, field from pathlib import Path from typing import Optional, Union from .generation.configuration_utils import GenerationConfig from .training_args import TrainingArguments from .utils import add_start_docstrings __a : Dict = logging.getLogger(__name__) @dataclass @add_start_docstrings(TrainingArguments.__doc__ ) class UpperCAmelCase( snake_case_ ): """simple docstring""" a : bool = field(default=snake_case_ , metadata={"""help""": """Whether to use SortishSampler or not."""} ) a : bool = field( default=snake_case_ , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} ) a : Optional[int] = field( default=snake_case_ , metadata={ """help""": ( """The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default """ """to the `max_length` value of the model configuration.""" ) } , ) a : Optional[int] = field( default=snake_case_ , metadata={ """help""": ( """The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default """ """to the `num_beams` value of the model configuration.""" ) } , ) a : Optional[Union[str, Path, GenerationConfig]] = field( default=snake_case_ , metadata={ """help""": """Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.""" } , ) def __a ( self ) -> int: """simple docstring""" lowercase__ : List[str] = super().to_dict() for k, v in d.items(): if isinstance(lowerCamelCase , lowerCamelCase ): lowercase__ : Union[str, Any] = v.to_dict() return d
397
0
'''simple docstring''' import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() _lowercase = logging.get_logger(__name__) def __UpperCamelCase ( a : int , a : Tuple=False ) ->str: snake_case = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ('''cls_token''', '''vit.embeddings.cls_token'''), ('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''), ('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''), ('''pos_embed''', '''vit.embeddings.position_embeddings'''), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ('''norm.weight''', '''layernorm.weight'''), ('''norm.bias''', '''layernorm.bias'''), ('''pre_logits.fc.weight''', '''pooler.dense.weight'''), ('''pre_logits.fc.bias''', '''pooler.dense.bias'''), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" snake_case = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('''norm.weight''', '''vit.layernorm.weight'''), ('''norm.bias''', '''vit.layernorm.bias'''), ('''head.weight''', '''classifier.weight'''), ('''head.bias''', '''classifier.bias'''), ] ) return rename_keys def __UpperCamelCase ( a : str , a : Any , a : Optional[Any]=False ) ->int: for i in range(config.num_hidden_layers ): if base_model: snake_case = '''''' else: snake_case = '''vit.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) snake_case = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" ) snake_case = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict snake_case = in_proj_weight[ : config.hidden_size, : ] snake_case = in_proj_bias[: config.hidden_size] snake_case = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] snake_case = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] snake_case = in_proj_weight[ -config.hidden_size :, : ] snake_case = in_proj_bias[-config.hidden_size :] def __UpperCamelCase ( a : Union[str, Any] ) ->List[str]: snake_case = ['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(a , a ) def __UpperCamelCase ( a : Optional[Any] , a : Tuple , a : Optional[Any] ) ->Optional[int]: snake_case = dct.pop(a ) snake_case = val def __UpperCamelCase ( ) ->List[str]: snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg''' snake_case = Image.open(requests.get(a , stream=a ).raw ) return im @torch.no_grad() def __UpperCamelCase ( a : Any , a : Any ) ->Dict: snake_case = ViTConfig() snake_case = False # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size if vit_name[-5:] == "in21k": snake_case = True snake_case = int(vit_name[-12:-10] ) snake_case = int(vit_name[-9:-6] ) else: snake_case = 1000 snake_case = '''huggingface/label-files''' snake_case = '''imagenet-1k-id2label.json''' snake_case = json.load(open(hf_hub_download(a , a , repo_type='''dataset''' ) , '''r''' ) ) snake_case = {int(a ): v for k, v in idalabel.items()} snake_case = idalabel snake_case = {v: k for k, v in idalabel.items()} snake_case = int(vit_name[-6:-4] ) snake_case = int(vit_name[-3:] ) # size of the architecture if "deit" in vit_name: if vit_name[9:].startswith('''tiny''' ): snake_case = 192 snake_case = 768 snake_case = 12 snake_case = 3 elif vit_name[9:].startswith('''small''' ): snake_case = 384 snake_case = 1536 snake_case = 12 snake_case = 6 else: pass else: if vit_name[4:].startswith('''small''' ): snake_case = 768 snake_case = 2304 snake_case = 8 snake_case = 8 elif vit_name[4:].startswith('''base''' ): pass elif vit_name[4:].startswith('''large''' ): snake_case = 1024 snake_case = 4096 snake_case = 24 snake_case = 16 elif vit_name[4:].startswith('''huge''' ): snake_case = 1280 snake_case = 5120 snake_case = 32 snake_case = 16 # load original model from timm snake_case = timm.create_model(a , pretrained=a ) timm_model.eval() # load state_dict of original model, remove and rename some keys snake_case = timm_model.state_dict() if base_model: remove_classification_head_(a ) snake_case = create_rename_keys(a , a ) for src, dest in rename_keys: rename_key(a , a , a ) read_in_q_k_v(a , a , a ) # load HuggingFace model if vit_name[-5:] == "in21k": snake_case = ViTModel(a ).eval() else: snake_case = ViTForImageClassification(a ).eval() model.load_state_dict(a ) # Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor if "deit" in vit_name: snake_case = DeiTImageProcessor(size=config.image_size ) else: snake_case = ViTImageProcessor(size=config.image_size ) snake_case = image_processor(images=prepare_img() , return_tensors='''pt''' ) snake_case = encoding['''pixel_values'''] snake_case = model(a ) if base_model: snake_case = timm_model.forward_features(a ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(a , outputs.pooler_output , atol=1e-3 ) else: snake_case = timm_model(a ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(a , outputs.logits , atol=1e-3 ) Path(a ).mkdir(exist_ok=a ) print(f"""Saving model {vit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(a ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(a ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( '--vit_name', default='vit_base_patch16_224', type=str, help='Name of the ViT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) _lowercase = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
44
'''simple docstring''' import argparse import copy def __UpperCamelCase ( a : Union[str, Any] ) ->Tuple: snake_case = {} with open(a ) as f: for line in f: if line.split()[0] not in dict_of_neighbours: snake_case = [] _list.append([line.split()[1], line.split()[2]] ) snake_case = _list else: dict_of_neighbours[line.split()[0]].append( [line.split()[1], line.split()[2]] ) if line.split()[1] not in dict_of_neighbours: snake_case = [] _list.append([line.split()[0], line.split()[2]] ) snake_case = _list else: dict_of_neighbours[line.split()[1]].append( [line.split()[0], line.split()[2]] ) return dict_of_neighbours def __UpperCamelCase ( a : Dict , a : Tuple ) ->int: with open(a ) as f: snake_case = f.read(1 ) snake_case = start_node snake_case = [] snake_case = start_node snake_case = 0 while visiting not in first_solution: snake_case = 1_0000 for k in dict_of_neighbours[visiting]: if int(k[1] ) < int(a ) and k[0] not in first_solution: snake_case = k[1] snake_case = k[0] first_solution.append(a ) snake_case = distance_of_first_solution + int(a ) snake_case = best_node first_solution.append(a ) snake_case = 0 for k in dict_of_neighbours[first_solution[-2]]: if k[0] == start_node: break position += 1 snake_case = ( distance_of_first_solution + int(dict_of_neighbours[first_solution[-2]][position][1] ) - 1_0000 ) return first_solution, distance_of_first_solution def __UpperCamelCase ( a : Optional[int] , a : str ) ->str: snake_case = [] for n in solution[1:-1]: snake_case = solution.index(a ) for kn in solution[1:-1]: snake_case = solution.index(a ) if n == kn: continue snake_case = copy.deepcopy(a ) snake_case = kn snake_case = n snake_case = 0 for k in _tmp[:-1]: snake_case = _tmp[_tmp.index(a ) + 1] for i in dict_of_neighbours[k]: if i[0] == next_node: snake_case = distance + int(i[1] ) _tmp.append(a ) if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp ) snake_case = len(neighborhood_of_solution[0] ) - 1 neighborhood_of_solution.sort(key=lambda a : x[index_of_last_item_in_the_list] ) return neighborhood_of_solution def __UpperCamelCase ( a : Any , a : Optional[Any] , a : int , a : Optional[int] , a : Union[str, Any] ) ->List[Any]: snake_case = 1 snake_case = first_solution snake_case = [] snake_case = distance_of_first_solution snake_case = solution while count <= iters: snake_case = find_neighborhood(a , a ) snake_case = 0 snake_case = neighborhood[index_of_best_solution] snake_case = len(a ) - 1 snake_case = False while not found: snake_case = 0 while i < len(a ): if best_solution[i] != solution[i]: snake_case = best_solution[i] snake_case = solution[i] break snake_case = i + 1 if [first_exchange_node, second_exchange_node] not in tabu_list and [ second_exchange_node, first_exchange_node, ] not in tabu_list: tabu_list.append([first_exchange_node, second_exchange_node] ) snake_case = True snake_case = best_solution[:-1] snake_case = neighborhood[index_of_best_solution][best_cost_index] if cost < best_cost: snake_case = cost snake_case = solution else: snake_case = index_of_best_solution + 1 snake_case = neighborhood[index_of_best_solution] if len(a ) >= size: tabu_list.pop(0 ) snake_case = count + 1 return best_solution_ever, best_cost def __UpperCamelCase ( a : Union[str, Any]=None ) ->Optional[Any]: snake_case = generate_neighbours(args.File ) snake_case , snake_case = generate_first_solution( args.File , a ) snake_case , snake_case = tabu_search( a , a , a , args.Iterations , args.Size , ) print(f"""Best solution: {best_sol}, with total distance: {best_cost}.""" ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser(description='Tabu Search') parser.add_argument( '-f', '--File', type=str, help='Path to the file containing the data', required=True, ) parser.add_argument( '-i', '--Iterations', type=int, help='How many iterations the algorithm should perform', required=True, ) parser.add_argument( '-s', '--Size', type=int, help='Size of the tabu list', required=True ) # Pass the arguments to main method main(parser.parse_args())
44
1
from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import RegNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: int , __A: Tuple , __A: List[str]=3 , __A: Tuple=32 , __A: str=3 , __A: List[str]=10 , __A: Optional[Any]=[10, 20, 30, 40] , __A: Tuple=[1, 1, 2, 1] , __A: Tuple=True , __A: List[str]=True , __A: Optional[int]="relu" , __A: Optional[int]=3 , __A: Any=None , ) -> List[Any]: _A = parent _A = batch_size _A = image_size _A = num_channels _A = embeddings_size _A = hidden_sizes _A = depths _A = is_training _A = use_labels _A = hidden_act _A = num_labels _A = scope _A = len(__A ) def __A ( self: List[Any] ) -> Tuple: _A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.num_labels ) _A = self.get_config() return config, pixel_values, labels def __A ( self: Optional[Any] ) -> Optional[int]: return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def __A ( self: List[str] , __A: Optional[int] , __A: Dict , __A: Union[str, Any] ) -> Dict: _A = TFRegNetModel(config=__A ) _A = model(__A , training=__A ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def __A ( self: int , __A: List[Any] , __A: Dict , __A: Dict ) -> Optional[Any]: _A = self.num_labels _A = TFRegNetForImageClassification(__A ) _A = model(__A , labels=__A , training=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __A ( self: Union[str, Any] ) -> Tuple: _A = self.prepare_config_and_inputs() _A ,_A ,_A = config_and_inputs _A = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class SCREAMING_SNAKE_CASE ( snake_case , snake_case , unittest.TestCase ): """simple docstring""" A_ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else () A_ = ( {"feature-extraction": TFRegNetModel, "image-classification": TFRegNetForImageClassification} if is_tf_available() else {} ) A_ = False A_ = False A_ = False A_ = False A_ = False def __A ( self: Any ) -> Optional[int]: _A = TFRegNetModelTester(self ) _A = ConfigTester(self , config_class=__A , has_text_modality=__A ) def __A ( self: Dict ) -> List[str]: return @unittest.skip(reason='''RegNet does not use inputs_embeds''' ) def __A ( self: Dict ) -> int: pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , ) @slow def __A ( self: Optional[int] ) -> int: super().test_keras_fit() @unittest.skip(reason='''RegNet does not support input and output embeddings''' ) def __A ( self: Any ) -> Tuple: pass def __A ( self: Dict ) -> Optional[Any]: _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(__A ) _A = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _A = [*signature.parameters.keys()] _A = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __A ) def __A ( self: List[Any] ) -> Optional[int]: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__A ) def __A ( self: Optional[int] ) -> Dict: def check_hidden_states_output(__A: int , __A: List[Any] , __A: Dict ): _A = model_class(__A ) _A = model(**self._prepare_for_class(__A , __A ) , training=__A ) _A = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _A = self.model_tester.num_stages self.assertEqual(len(__A ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() _A = ['''basic''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: _A = layer_type _A = True check_hidden_states_output(__A , __A , __A ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _A = True check_hidden_states_output(__A , __A , __A ) def __A ( self: Optional[Any] ) -> Optional[Any]: _A ,_A = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(__A: Any , __A: List[Any] , __A: int , __A: Union[str, Any]={} ): _A = model(__A , return_dict=__A , **__A ) _A = model(__A , return_dict=__A , **__A ).to_tuple() def recursive_check(__A: Optional[Any] , __A: List[str] ): if isinstance(__A , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(__A , __A ): recursive_check(__A , __A ) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(__A , __A ) ) , msg=( '''Tuple and dict output are not equal. Difference:''' f""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}""" ) , ) recursive_check(__A , __A ) for model_class in self.all_model_classes: _A = model_class(__A ) _A = self._prepare_for_class(__A , __A ) _A = self._prepare_for_class(__A , __A ) check_equivalence(__A , __A , __A ) _A = self._prepare_for_class(__A , __A , return_labels=__A ) _A = self._prepare_for_class(__A , __A , return_labels=__A ) check_equivalence(__A , __A , __A ) _A = self._prepare_for_class(__A , __A ) _A = self._prepare_for_class(__A , __A ) check_equivalence(__A , __A , __A , {'''output_hidden_states''': True} ) _A = self._prepare_for_class(__A , __A , return_labels=__A ) _A = self._prepare_for_class(__A , __A , return_labels=__A ) check_equivalence(__A , __A , __A , {'''output_hidden_states''': True} ) def __A ( self: Optional[int] ) -> Dict: _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__A ) @slow def __A ( self: Union[str, Any] ) -> List[Any]: for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _A = TFRegNetModel.from_pretrained(__A ) self.assertIsNotNone(__A ) def __A ( ): '''simple docstring''' _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" @cached_property def __A ( self: Optional[int] ) -> Optional[Any]: return ( AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def __A ( self: Union[str, Any] ) -> Union[str, Any]: _A = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) _A = self.default_image_processor _A = prepare_img() _A = image_processor(images=__A , return_tensors='''tf''' ) # forward pass _A = model(**__A , training=__A ) # verify the logits _A = tf.TensorShape((1, 10_00) ) self.assertEqual(outputs.logits.shape , __A ) _A = tf.constant([-0.4_180, -1.5_051, -3.4_836] ) tf.debugging.assert_near(outputs.logits[0, :3] , __A , atol=1e-4 )
484
import argparse import json import os import re import shutil import torch from transformers import BioGptConfig, BioGptForCausalLM from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE from transformers.utils import WEIGHTS_NAME, logging logging.set_verbosity_warning() __A = 2 class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self: int , *, # begin keyword-only arguments __A: Any="<s>" , __A: List[str]="<pad>" , __A: Optional[Any]="</s>" , __A: Dict="<unk>" , __A: Any=None , ) -> Tuple: _A ,_A ,_A ,_A = bos, unk, pad, eos _A = [] _A = [] _A = {} _A = self.add_symbol(__A ) _A = self.add_symbol(__A ) _A = self.add_symbol(__A ) _A = self.add_symbol(__A ) if extra_special_symbols: for s in extra_special_symbols: self.add_symbol(__A ) _A = len(self.symbols ) def __eq__( self: Any , __A: Any ) -> Optional[Any]: return self.indices == other.indices def __getitem__( self: Tuple , __A: Optional[int] ) -> int: if idx < len(self.symbols ): return self.symbols[idx] return self.unk_word def __len__( self: Optional[Any] ) -> Optional[Any]: return len(self.symbols ) def __contains__( self: Dict , __A: List[str] ) -> Union[str, Any]: return sym in self.indices @classmethod def __A ( cls: Tuple , __A: Optional[Any] ) -> Optional[Any]: _A = cls() d.add_from_file(__A ) return d def __A ( self: List[Any] , __A: List[str] , __A: List[Any]=1 , __A: List[Any]=False ) -> Optional[Any]: if word in self.indices and not overwrite: _A = self.indices[word] _A = self.count[idx] + n return idx else: _A = len(self.symbols ) _A = idx self.symbols.append(__A ) self.count.append(__A ) return idx def __A ( self: Optional[Any] , __A: Optional[int] ) -> str: return 0 def __A ( self: List[str] , __A: Optional[Any] ) -> List[Any]: if isinstance(__A , __A ): try: with open(__A , '''r''' , encoding='''utf-8''' ) as fd: self.add_from_file(__A ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception('''Incorrect encoding detected in {}, please rebuild the dataset'''.format(__A ) ) return _A = f.readlines() _A = self._load_meta(__A ) for line in lines[indices_start_line:]: try: _A ,_A = line.rstrip().rsplit(''' ''' , 1 ) if field == "#fairseq:overwrite": _A = True _A ,_A = line.rsplit(''' ''' , 1 ) else: _A = False _A = int(__A ) _A = line if word in self and not overwrite: raise RuntimeError( '''Duplicate word found when loading Dictionary: \'{}\'. ''' '''Duplicate words can overwrite earlier ones by adding the ''' '''#fairseq:overwrite flag at the end of the corresponding row ''' '''in the dictionary file. If using the Camembert model, please ''' '''download an updated copy of the model file.'''.format(__A ) ) self.add_symbol(__A , n=__A , overwrite=__A ) except ValueError: raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt> [flags]\'''' ) def __A ( _lowercase ): '''simple docstring''' _A = dict((re.sub(R'''@@$''' , '''''' , _lowercase ), v) if k.endswith('''@@''' ) else (re.sub(R'''$''' , '''</w>''' , _lowercase ), v) for k, v in d.items() ) _A = '''<s> <pad> </s> <unk>'''.split() # restore the special tokens for k in keep_keys: del da[f"""{k}</w>"""] _A = d[k] # restore return da def __A ( _lowercase , _lowercase ): '''simple docstring''' if not os.path.exists(_lowercase ): raise ValueError(f"""path {biogpt_checkpoint_path} does not exist!""" ) os.makedirs(_lowercase , exist_ok=_lowercase ) print(f"""Writing results to {pytorch_dump_folder_path}""" ) # handle various types of models _A = os.path.join(_lowercase , '''checkpoint.pt''' ) if not os.path.isfile(_lowercase ): raise ValueError(f"""path to the file {checkpoint_file} does not exist!""" ) _A = torch.load(_lowercase , map_location='''cpu''' ) _A = chkpt['''cfg''']['''model'''] # dicts _A = os.path.join(_lowercase , '''dict.txt''' ) if not os.path.isfile(_lowercase ): raise ValueError(f"""path to the file {dict_file} does not exist!""" ) _A = Dictionary.load(_lowercase ) _A = rewrite_dict_keys(src_dict.indices ) _A = len(_lowercase ) _A = os.path.join(_lowercase , VOCAB_FILES_NAMES['''vocab_file'''] ) print(f"""Generating {src_vocab_file} of {src_vocab_size} records""" ) with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(_lowercase , ensure_ascii=_lowercase , indent=_lowercase ) ) # merges_file (bpecodes) _A = os.path.join(_lowercase , '''bpecodes''' ) if not os.path.isfile(_lowercase ): raise ValueError(f"""path to the file {bpecodes_file} does not exist!""" ) _A = os.path.join(_lowercase , VOCAB_FILES_NAMES['''merges_file'''] ) shutil.copyfile(_lowercase , _lowercase ) # model config _A = os.path.join(_lowercase , '''config.json''' ) _A = { '''activation_dropout''': args['''activation_dropout'''], '''architectures''': ['''BioGptForCausalLM'''], '''attention_probs_dropout_prob''': args['''attention_dropout'''], '''bos_token_id''': 0, '''eos_token_id''': 2, '''hidden_act''': args['''activation_fn'''], '''hidden_dropout_prob''': args['''dropout'''], '''hidden_size''': args['''decoder_embed_dim'''], '''initializer_range''': 0.02, '''intermediate_size''': args['''decoder_ffn_embed_dim'''], '''layer_norm_eps''': 1e-12, '''layerdrop''': args['''decoder_layerdrop'''], '''max_position_embeddings''': args['''max_target_positions'''], '''model_type''': '''biogpt''', '''num_attention_heads''': args['''decoder_attention_heads'''], '''num_hidden_layers''': args['''decoder_layers'''], '''pad_token_id''': 1, '''scale_embedding''': not args['''no_scale_embedding'''], '''tie_word_embeddings''': args['''share_decoder_input_output_embed'''], '''vocab_size''': src_vocab_size, } # good hparam defaults to start with print(f"""Generating {biogpt_model_config_file}""" ) with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(_lowercase , ensure_ascii=_lowercase , indent=_lowercase ) ) # tokenizer config _A = os.path.join(_lowercase , _lowercase ) _A = { '''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''model_max_length''': 10_24, '''pad_token''': '''<pad>''', '''special_tokens_map_file''': None, '''tokenizer_class''': '''BioGptTokenizer''', '''unk_token''': '''<unk>''', } print(f"""Generating {biogpt_tokenizer_config_file}""" ) with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(_lowercase , ensure_ascii=_lowercase , indent=_lowercase ) ) # model _A = chkpt['''model'''] # remove unneeded keys _A = [ '''decoder.version''', ] for k in ignore_keys: model_state_dict.pop(_lowercase , _lowercase ) _A = list(model_state_dict.keys() ) for layer_name in layer_names: if layer_name.endswith('''output_projection.weight''' ): _A = model_state_dict.pop(_lowercase ) else: _A = model_state_dict.pop(_lowercase ) _A = BioGptConfig.from_pretrained(_lowercase ) _A = BioGptForCausalLM(_lowercase ) # check that it loads ok model_new.load_state_dict(_lowercase ) # save _A = os.path.join(_lowercase , _lowercase ) print(f"""Generating {pytorch_weights_dump_path}""" ) torch.save(_lowercase , _lowercase ) print('''Conversion is done!''' ) if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument( '--biogpt_checkpoint_path', default=None, type=str, required=True, help=( 'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,' ' bpecodes, etc.' ), ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) __A = parser.parse_args() convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
484
1
import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel _lowerCAmelCase = logging.getLogger(__name__) def a__ ( a , a ) -> Optional[Any]: # save results if os.path.exists(lowerCamelCase__ ): if os.path.exists(os.path.join(lowerCamelCase__ , '''config.json''' ) ) and os.path.isfile( os.path.join(lowerCamelCase__ , '''config.json''' ) ): os.remove(os.path.join(lowerCamelCase__ , '''config.json''' ) ) if os.path.exists(os.path.join(lowerCamelCase__ , '''pytorch_model.bin''' ) ) and os.path.isfile( os.path.join(lowerCamelCase__ , '''pytorch_model.bin''' ) ): os.remove(os.path.join(lowerCamelCase__ , '''pytorch_model.bin''' ) ) else: os.makedirs(lowerCamelCase__ ) model.save_pretrained(lowerCamelCase__ ) def a__ ( a , a=False ) -> Tuple: A_ : Optional[int] = 2 if unlogit: A_ : Optional[Any] = torch.pow(lowerCamelCase__ , lowerCamelCase__ ) A_ : Optional[int] = p * torch.log(lowerCamelCase__ ) A_ : Optional[int] = 0 return -plogp.sum(dim=-1 ) def a__ ( a ) -> Optional[Any]: logger.info('''lv, h >\t''' + '''\t'''.join(f"""{x + 1}""" for x in range(len(lowerCamelCase__ ) ) ) ) for row in range(len(lowerCamelCase__ ) ): if tensor.dtype != torch.long: logger.info(f"""layer {row + 1}:\t""" + '''\t'''.join(f"""{x:.5f}""" for x in tensor[row].cpu().data ) ) else: logger.info(f"""layer {row + 1}:\t""" + '''\t'''.join(f"""{x:d}""" for x in tensor[row].cpu().data ) ) def a__ ( a , a , a , a=True , a=True , a=None , a=False ) -> List[Any]: A_ , A_ : Union[str, Any] = model.config.num_hidden_layers, model.config.num_attention_heads A_ : Union[str, Any] = torch.zeros(lowerCamelCase__ , lowerCamelCase__ ).to(args.device ) A_ : Optional[int] = torch.zeros(lowerCamelCase__ , lowerCamelCase__ ).to(args.device ) if head_mask is None: A_ : Dict = torch.ones(lowerCamelCase__ , lowerCamelCase__ ).to(args.device ) head_mask.requires_grad_(requires_grad=lowerCamelCase__ ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: A_ : Optional[int] = None A_ : Union[str, Any] = 0.0 A_ : Any = 0.0 for step, inputs in enumerate(tqdm(lowerCamelCase__ , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ): A_ : List[str] = tuple(t.to(args.device ) for t in inputs ) ((A_ ) , ) : Optional[Any] = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) A_ : str = model(lowerCamelCase__ , labels=lowerCamelCase__ , head_mask=lowerCamelCase__ ) # (loss), lm_logits, presents, (all hidden_states), (attentions) A_ , A_ , A_ : Optional[int] = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(lowerCamelCase__ ): A_ : List[str] = entropy(attn.detach() , lowerCamelCase__ ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(lowerCamelCase__ ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: A_ : Optional[Any] = 2 A_ : Tuple = torch.pow(torch.pow(lowerCamelCase__ , lowerCamelCase__ ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-2_0 if not args.dont_normalize_global_importance: A_ : List[str] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info('''Attention entropies''' ) print_ad_tensor(lowerCamelCase__ ) if compute_importance: logger.info('''Head importance scores''' ) print_ad_tensor(lowerCamelCase__ ) logger.info('''Head ranked by importance scores''' ) A_ : Dict = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) A_ : Optional[int] = torch.arange( head_importance.numel() , device=args.device ) A_ : str = head_ranks.view_as(lowerCamelCase__ ) print_ad_tensor(lowerCamelCase__ ) return attn_entropy, head_importance, total_loss def a__ ( a , a , a ) -> List[Any]: A_ , A_ , A_ : Tuple = compute_heads_importance(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , compute_entropy=lowerCamelCase__ ) A_ : int = 1 / loss # instead of downsteam score use the LM loss logger.info('''Pruning: original score: %f, threshold: %f''' , lowerCamelCase__ , original_score * args.masking_threshold ) A_ : Optional[Any] = torch.ones_like(lowerCamelCase__ ) A_ : List[Any] = max(1 , int(new_head_mask.numel() * args.masking_amount ) ) A_ : Optional[Any] = original_score while current_score >= original_score * args.masking_threshold: A_ : Tuple = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads A_ : Any = float('''Inf''' ) A_ : List[str] = head_importance.view(-1 ).sort()[1] if len(lowerCamelCase__ ) <= num_to_mask: print('''BREAK BY num_to_mask''' ) break # mask heads A_ : str = current_heads_to_mask[:num_to_mask] logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) ) A_ : Union[str, Any] = new_head_mask.view(-1 ) A_ : str = 0.0 A_ : Union[str, Any] = new_head_mask.view_as(lowerCamelCase__ ) A_ : Union[str, Any] = new_head_mask.clone().detach() print_ad_tensor(lowerCamelCase__ ) # Compute metric and head importance again A_ , A_ , A_ : Any = compute_heads_importance( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , compute_entropy=lowerCamelCase__ , head_mask=lowerCamelCase__ ) A_ : Optional[int] = 1 / loss logger.info( '''Masking: current score: %f, remaining heads %d (%.1f percents)''' , lowerCamelCase__ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_0_0 , ) logger.info('''Final head mask''' ) print_ad_tensor(lowerCamelCase__ ) np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() ) return head_mask def a__ ( a , a , a , a ) -> Optional[Any]: A_ : Tuple = datetime.now() A_ , A_ , A_ : List[Any] = compute_heads_importance( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , compute_entropy=lowerCamelCase__ , compute_importance=lowerCamelCase__ , head_mask=lowerCamelCase__ ) A_ : Optional[int] = 1 / loss A_ : List[str] = datetime.now() - before_time A_ : int = sum(p.numel() for p in model.parameters() ) A_ : Union[str, Any] = { layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(lowerCamelCase__ ) ) } for k, v in heads_to_prune.items(): if isinstance(lowerCamelCase__ , lowerCamelCase__ ): A_ : Any = [ v, ] assert sum(len(lowerCamelCase__ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(lowerCamelCase__ ) A_ : Dict = sum(p.numel() for p in model.parameters() ) A_ : Optional[Any] = datetime.now() A_ , A_ , A_ : Optional[Any] = compute_heads_importance( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , compute_entropy=lowerCamelCase__ , compute_importance=lowerCamelCase__ , head_mask=lowerCamelCase__ , actually_pruned=lowerCamelCase__ , ) A_ : Union[str, Any] = 1 / loss A_ : Optional[int] = datetime.now() - before_time logger.info( '''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , lowerCamelCase__ , lowerCamelCase__ , pruned_num_params / original_num_params * 1_0_0 , ) logger.info('''Pruning: score with masking: %f score with pruning: %f''' , lowerCamelCase__ , lowerCamelCase__ ) logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 1_0_0 ) save_model(lowerCamelCase__ , args.output_dir ) def a__ ( ) -> List[str]: A_ : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--data_dir''' , default=lowerCamelCase__ , type=lowerCamelCase__ , required=lowerCamelCase__ , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , ) parser.add_argument( '''--model_name_or_path''' , default=lowerCamelCase__ , type=lowerCamelCase__ , required=lowerCamelCase__ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--output_dir''' , default=lowerCamelCase__ , type=lowerCamelCase__ , required=lowerCamelCase__ , help='''The output directory where the model predictions and checkpoints will be written.''' , ) # Other parameters parser.add_argument( '''--config_name''' , default='''''' , type=lowerCamelCase__ , help='''Pretrained config name or path if not the same as model_name_or_path''' , ) parser.add_argument( '''--tokenizer_name''' , default='''''' , type=lowerCamelCase__ , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , ) parser.add_argument( '''--cache_dir''' , default=lowerCamelCase__ , type=lowerCamelCase__ , help='''Where do you want to store the pre-trained models downloaded from s3''' , ) parser.add_argument( '''--data_subset''' , type=lowerCamelCase__ , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' ) parser.add_argument( '''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' ) parser.add_argument( '''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' ) parser.add_argument( '''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' ) parser.add_argument( '''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , ) parser.add_argument( '''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' ) parser.add_argument( '''--masking_threshold''' , default=0.9 , type=lowerCamelCase__ , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , ) parser.add_argument( '''--masking_amount''' , default=0.1 , type=lowerCamelCase__ , help='''Amount to heads to masking at each masking step.''' ) parser.add_argument('''--metric_name''' , default='''acc''' , type=lowerCamelCase__ , help='''Metric to use for head masking.''' ) parser.add_argument( '''--max_seq_length''' , default=1_2_8 , type=lowerCamelCase__ , help=( '''The maximum total input sequence length after WordPiece tokenization. \n''' '''Sequences longer than this will be truncated, sequences shorter padded.''' ) , ) parser.add_argument('''--batch_size''' , default=1 , type=lowerCamelCase__ , help='''Batch size.''' ) parser.add_argument('''--seed''' , type=lowerCamelCase__ , default=4_2 ) parser.add_argument('''--local_rank''' , type=lowerCamelCase__ , default=-1 , help='''local_rank for distributed training on gpus''' ) parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' ) parser.add_argument('''--server_ip''' , type=lowerCamelCase__ , default='''''' , help='''Can be used for distant debugging.''' ) parser.add_argument('''--server_port''' , type=lowerCamelCase__ , default='''''' , help='''Can be used for distant debugging.''' ) A_ : Tuple = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('''Waiting for debugger attach''' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowerCamelCase__ ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: A_ : Any = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' ) A_ : List[Any] = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) A_ : Union[str, Any] = torch.device('''cuda''' , args.local_rank ) A_ : List[Any] = 1 torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) A_ : Dict = GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: A_ : Optional[int] = nn.parallel.DistributedDataParallel( lowerCamelCase__ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=lowerCamelCase__ ) elif args.n_gpu > 1: A_ : Union[str, Any] = nn.DataParallel(lowerCamelCase__ ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=lowerCamelCase__ ) torch.save(lowerCamelCase__ , os.path.join(args.output_dir , '''run_args.bin''' ) ) logger.info('''Training/evaluation parameters %s''' , lowerCamelCase__ ) # Prepare dataset A_ : str = np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) A_ : Dict = (torch.from_numpy(lowerCamelCase__ ),) A_ : int = TensorDataset(*lowerCamelCase__ ) A_ : str = RandomSampler(lowerCamelCase__ ) A_ : str = DataLoader(lowerCamelCase__ , sampler=lowerCamelCase__ , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: A_ : str = mask_heads(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) prune_heads(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) if __name__ == "__main__": main()
721
import unittest import numpy as np import torch from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad class __UpperCAmelCase( unittest.TestCase ): """simple docstring""" def UpperCAmelCase ( self ): """simple docstring""" A_ : str = 10 def UpperCAmelCase ( self ): """simple docstring""" A_ : Optional[Any] = [1, 2, 3, 4] A_ : Union[str, Any] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual(truncate_or_pad(__magic_name__ , self.block_size , 0 ) , __magic_name__ ) def UpperCAmelCase ( self ): """simple docstring""" A_ : List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] A_ : Union[str, Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(__magic_name__ , self.block_size , 0 ) , __magic_name__ ) def UpperCAmelCase ( self ): """simple docstring""" A_ : Tuple = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] A_ : Union[str, Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(__magic_name__ , self.block_size , 0 ) , __magic_name__ ) def UpperCAmelCase ( self ): """simple docstring""" A_ : Optional[Any] = '''It was the year of Our Lord one thousand seven hundred and seventy-five.\n\nSpiritual revelations were conceded to England at that favoured period, as at this.''' A_ , A_ : Optional[Any] = process_story(__magic_name__ ) self.assertEqual(__magic_name__ , [] ) def UpperCAmelCase ( self ): """simple docstring""" A_ : int = '''''' A_ , A_ : Union[str, Any] = process_story(__magic_name__ ) self.assertEqual(__magic_name__ , [] ) self.assertEqual(__magic_name__ , [] ) def UpperCAmelCase ( self ): """simple docstring""" A_ : int = ( '''It was the year of Our Lord one thousand seven hundred and ''' '''seventy-five\n\nSpiritual revelations were conceded to England ''' '''at that favoured period, as at this.\n@highlight\n\nIt was the best of times''' ) A_ , A_ : Optional[int] = process_story(__magic_name__ ) A_ : List[str] = [ '''It was the year of Our Lord one thousand seven hundred and seventy-five.''', '''Spiritual revelations were conceded to England at that favoured period, as at this.''', ] self.assertEqual(__magic_name__ , __magic_name__ ) A_ : Union[str, Any] = ['''It was the best of times.'''] self.assertEqual(__magic_name__ , __magic_name__ ) def UpperCAmelCase ( self ): """simple docstring""" A_ : List[str] = torch.tensor([1, 2, 3, 4] ) A_ : Any = torch.tensor([1, 1, 1, 1] ) np.testing.assert_array_equal(build_mask(__magic_name__ , 0 ).numpy() , expected.numpy() ) def UpperCAmelCase ( self ): """simple docstring""" A_ : Any = torch.tensor([1, 2, 3, 4, 23, 23, 23] ) A_ : str = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(__magic_name__ , 23 ).numpy() , expected.numpy() ) def UpperCAmelCase ( self ): """simple docstring""" A_ : Optional[int] = torch.tensor([8, 2, 3, 4, 1, 1, 1] ) A_ : Union[str, Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(__magic_name__ , 1 ).numpy() , expected.numpy() ) def UpperCAmelCase ( self ): """simple docstring""" A_ : int = 101 A_ : List[str] = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] ) A_ : Tuple = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] ) A_ : Optional[Any] = compute_token_type_ids(__magic_name__ , __magic_name__ ) np.testing.assert_array_equal(__magic_name__ , __magic_name__ )
236
0
from __future__ import annotations import unittest from transformers import EsmConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy import tensorflow as tf from transformers.models.esm.modeling_tf_esm import ( TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, TFEsmModel, ) class SCREAMING_SNAKE_CASE__ : def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , ) -> int: a_ : Tuple = parent a_ : Optional[Any] = 1_3 a_ : Tuple = 7 a_ : List[Any] = True a_ : Optional[Any] = True a_ : Any = True a_ : List[Any] = 9_9 a_ : List[str] = 3_2 a_ : str = 2 a_ : Any = 4 a_ : int = 3_7 a_ : Union[str, Any] = 'gelu' a_ : List[str] = 0.1 a_ : int = 0.1 a_ : List[str] = 5_1_2 a_ : Any = 1_6 a_ : Tuple = 2 a_ : Tuple = 0.02 a_ : List[str] = 3 a_ : Union[str, Any] = 4 a_ : Any = None def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]: a_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a_ : Union[str, Any] = None if self.use_input_mask: a_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) a_ : Union[str, Any] = None a_ : Dict = None a_ : int = None if self.use_labels: a_ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) a_ : Any = ids_tensor([self.batch_size] , self.num_choices ) a_ : List[str] = EsmConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]: ( ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ) : Any = self.prepare_config_and_inputs() a_ : Optional[int] = True a_ : Optional[int] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) a_ : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> str: a_ : Optional[int] = TFEsmModel(config=SCREAMING_SNAKE_CASE__ ) a_ : Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask} a_ : str = model(SCREAMING_SNAKE_CASE__ ) a_ : List[str] = [input_ids, input_mask] a_ : Any = model(SCREAMING_SNAKE_CASE__ ) a_ : List[Any] = model(SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE ( self : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , ) -> Optional[int]: a_ : Union[str, Any] = True a_ : Dict = TFEsmModel(config=SCREAMING_SNAKE_CASE__ ) a_ : str = { 'input_ids': input_ids, 'attention_mask': input_mask, 'encoder_hidden_states': encoder_hidden_states, 'encoder_attention_mask': encoder_attention_mask, } a_ : int = model(SCREAMING_SNAKE_CASE__ ) a_ : Optional[Any] = [input_ids, input_mask] a_ : Dict = model(SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ ) # Also check the case where encoder outputs are not passed a_ : List[str] = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int ) -> Tuple: a_ : str = TFEsmForMaskedLM(config=SCREAMING_SNAKE_CASE__ ) a_ : Tuple = model([input_ids, input_mask] ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any ) -> Dict: a_ : List[str] = self.num_labels a_ : Optional[Any] = TFEsmForTokenClassification(config=SCREAMING_SNAKE_CASE__ ) a_ : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask} a_ : Any = model(SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]: a_ : Union[str, Any] = self.prepare_config_and_inputs() ( ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ) : List[str] = config_and_inputs a_ : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_tf class SCREAMING_SNAKE_CASE__ ( lowercase__ , lowercase__ , unittest.TestCase ): snake_case__ : Tuple = ( ( TFEsmModel, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, ) if is_tf_available() else () ) snake_case__ : Dict = ( { '''feature-extraction''': TFEsmModel, '''fill-mask''': TFEsmForMaskedLM, '''text-classification''': TFEsmForSequenceClassification, '''token-classification''': TFEsmForTokenClassification, '''zero-shot''': TFEsmForSequenceClassification, } if is_tf_available() else {} ) snake_case__ : Any = False snake_case__ : Any = False def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]: a_ : Tuple = TFEsmModelTester(self ) a_ : Optional[int] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=3_7 ) def SCREAMING_SNAKE_CASE ( self : Tuple ) -> int: self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple: a_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]: a_ : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]: a_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict: a_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE__ ) @slow def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a_ : Optional[int] = TFEsmModel.from_pretrained(SCREAMING_SNAKE_CASE__ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE__ ) @unittest.skip('Protein models do not support embedding resizing.' ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]: pass @unittest.skip('Protein models do not support embedding resizing.' ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int: pass def SCREAMING_SNAKE_CASE ( self : Any ) -> int: a_ , a_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a_ : Optional[Any] = model_class(SCREAMING_SNAKE_CASE__ ) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer ) if model_class is TFEsmForMaskedLM: # Output embedding test differs from the main test because they're a matrix, not a layer a_ : Dict = model.get_bias() assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for k, v in name.items(): assert isinstance(SCREAMING_SNAKE_CASE__ , tf.Variable ) else: a_ : Union[str, Any] = model.get_output_embeddings() assert x is None a_ : List[str] = model.get_bias() assert name is None @require_tf class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @slow def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]: a_ : Optional[int] = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' ) a_ : Dict = tf.constant([[0, 1, 2, 3, 4, 5]] ) a_ : str = model(SCREAMING_SNAKE_CASE__ )[0] a_ : List[str] = [1, 6, 3_3] self.assertEqual(list(output.numpy().shape ) , SCREAMING_SNAKE_CASE__ ) # compare the actual values for a slice. a_ : Dict = tf.constant( [ [ [8.921518, -10.589814, -6.4671307], [-6.3967156, -13.911377, -1.1211915], [-7.781247, -13.951557, -3.740592], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) ) @slow def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]: a_ : Dict = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' ) a_ : List[Any] = tf.constant([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] ) a_ : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ )[0] # compare the actual values for a slice. a_ : int = tf.constant( [ [ [0.14443092, 0.54125327, 0.3247739], [0.30340484, 0.00526676, 0.31077722], [0.32278043, -0.24987096, 0.3414628], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
570
import json import os import shutil import tempfile import unittest from multiprocessing import get_context from pathlib import Path import datasets import numpy as np from datasets import load_dataset from parameterized import parameterized from transformers import AutoProcessor from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available from ..wavaveca.test_feature_extraction_wavaveca import floats_list if is_pyctcdecode_available(): from huggingface_hub import snapshot_download from pyctcdecode import BeamSearchDecoderCTC from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput if is_torch_available(): from transformers import WavaVecaForCTC @require_pyctcdecode class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]: a_ : List[str] = '| <pad> <unk> <s> </s> a b c d e f g h i j k'.split() a_ : Dict = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) ) a_ : int = { 'unk_token': '<unk>', 'bos_token': '<s>', 'eos_token': '</s>', } a_ : Optional[int] = { 'feature_size': 1, 'padding_value': 0.0, 'sampling_rate': 1_6_0_0_0, 'return_attention_mask': False, 'do_normalize': True, } a_ : List[str] = tempfile.mkdtemp() a_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) a_ : List[str] = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE__ ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + '\n' ) with open(self.feature_extraction_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + '\n' ) # load decoder from hub a_ : int = 'hf-internal-testing/ngram-beam-search-decoder' def SCREAMING_SNAKE_CASE ( self : List[Any] , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[Any]: a_ : List[Any] = self.add_kwargs_tokens_map.copy() kwargs.update(SCREAMING_SNAKE_CASE__ ) return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Any ) -> Any: return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Tuple , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> str: return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]: shutil.rmtree(self.tmpdirname ) def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: a_ : Optional[int] = self.get_tokenizer() a_ : Optional[Any] = self.get_feature_extractor() a_ : Any = self.get_decoder() a_ : List[str] = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ , decoder=SCREAMING_SNAKE_CASE__ ) processor.save_pretrained(self.tmpdirname ) a_ : Optional[int] = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname ) # tokenizer self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE__ ) # feature extractor self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , SCREAMING_SNAKE_CASE__ ) # decoder self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels ) self.assertEqual( processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , ) self.assertIsInstance(processor.decoder , SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict: a_ : Optional[Any] = WavaVecaProcessorWithLM( tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) processor.save_pretrained(self.tmpdirname ) # make sure that error is thrown when decoder alphabet doesn't match a_ : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained( self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 ) # decoder self.assertEqual(processor.language_model.alpha , 5.0 ) self.assertEqual(processor.language_model.beta , 3.0 ) self.assertEqual(processor.language_model.score_boundary , -7.0 ) self.assertEqual(processor.language_model.unk_score_offset , 3 ) def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]: a_ : List[Any] = self.get_tokenizer() # add token to trigger raise tokenizer.add_tokens(['xx'] ) with self.assertRaisesRegex(SCREAMING_SNAKE_CASE__ , 'include' ): WavaVecaProcessorWithLM( tokenizer=SCREAMING_SNAKE_CASE__ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]: a_ : Any = self.get_feature_extractor() a_ : Tuple = self.get_tokenizer() a_ : Any = self.get_decoder() a_ : str = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ , decoder=SCREAMING_SNAKE_CASE__ ) a_ : Optional[Any] = floats_list((3, 1_0_0_0) ) a_ : List[str] = feature_extractor(SCREAMING_SNAKE_CASE__ , return_tensors='np' ) a_ : Optional[int] = processor(SCREAMING_SNAKE_CASE__ , return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]: a_ : List[Any] = self.get_feature_extractor() a_ : Dict = self.get_tokenizer() a_ : Optional[Any] = self.get_decoder() a_ : Tuple = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ , decoder=SCREAMING_SNAKE_CASE__ ) a_ : Dict = 'This is a test string' a_ : Tuple = processor(text=SCREAMING_SNAKE_CASE__ ) a_ : str = tokenizer(SCREAMING_SNAKE_CASE__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[Any]=(2, 1_0, 1_6) , SCREAMING_SNAKE_CASE__ : int=7_7 ) -> Optional[int]: np.random.seed(SCREAMING_SNAKE_CASE__ ) return np.random.rand(*SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Dict ) -> int: a_ : Union[str, Any] = self.get_feature_extractor() a_ : Tuple = self.get_tokenizer() a_ : Optional[Any] = self.get_decoder() a_ : List[Any] = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ , decoder=SCREAMING_SNAKE_CASE__ ) a_ : Tuple = self._get_dummy_logits(shape=(1_0, 1_6) , seed=1_3 ) a_ : Union[str, Any] = processor.decode(SCREAMING_SNAKE_CASE__ ) a_ : int = decoder.decode_beams(SCREAMING_SNAKE_CASE__ )[0] self.assertEqual(decoded_decoder[0] , decoded_processor.text ) self.assertEqual('</s> <s> </s>' , decoded_processor.text ) self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score ) self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score ) @parameterized.expand([[None], ['fork'], ['spawn']] ) def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Dict: a_ : Any = self.get_feature_extractor() a_ : Union[str, Any] = self.get_tokenizer() a_ : str = self.get_decoder() a_ : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ , decoder=SCREAMING_SNAKE_CASE__ ) a_ : str = self._get_dummy_logits() # note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM. # otherwise, the LM won't be available to the pool's sub-processes. # manual logic used to allow parameterized test for both pool=None and pool=Pool(...) if pool_context is None: a_ : Optional[int] = processor.batch_decode(SCREAMING_SNAKE_CASE__ ) else: with get_context(SCREAMING_SNAKE_CASE__ ).Pool() as pool: a_ : Dict = processor.batch_decode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) a_ : List[str] = list(SCREAMING_SNAKE_CASE__ ) with get_context('fork' ).Pool() as p: a_ : List[Any] = decoder.decode_beams_batch(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) a_ , a_ , a_ : List[str] = [], [], [] for beams in decoded_beams: texts_decoder.append(beams[0][0] ) logit_scores_decoder.append(beams[0][-2] ) lm_scores_decoder.append(beams[0][-1] ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , decoded_processor.text ) self.assertListEqual(['<s> <s> </s>', '<s> <s> <s>'] , decoded_processor.text ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , decoded_processor.logit_score ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , decoded_processor.lm_score ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str: a_ : List[Any] = self.get_feature_extractor() a_ : Optional[int] = self.get_tokenizer() a_ : Optional[Any] = self.get_decoder() a_ : List[Any] = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ , decoder=SCREAMING_SNAKE_CASE__ ) a_ : str = self._get_dummy_logits() a_ : List[str] = 1_5 a_ : Tuple = -20.0 a_ : Dict = -4.0 a_ : Optional[int] = processor.batch_decode( SCREAMING_SNAKE_CASE__ , beam_width=SCREAMING_SNAKE_CASE__ , beam_prune_logp=SCREAMING_SNAKE_CASE__ , token_min_logp=SCREAMING_SNAKE_CASE__ , ) a_ : Union[str, Any] = decoded_processor_out.text a_ : Any = list(SCREAMING_SNAKE_CASE__ ) with get_context('fork' ).Pool() as pool: a_ : List[str] = decoder.decode_beams_batch( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , beam_width=SCREAMING_SNAKE_CASE__ , beam_prune_logp=SCREAMING_SNAKE_CASE__ , token_min_logp=SCREAMING_SNAKE_CASE__ , ) a_ : Any = [d[0][0] for d in decoded_decoder_out] a_ : List[Any] = [d[0][2] for d in decoded_decoder_out] a_ : int = [d[0][3] for d in decoded_decoder_out] self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.assertListEqual(['</s> <s> <s>', '<s> <s> <s>'] , SCREAMING_SNAKE_CASE__ ) self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE__ , decoded_processor_out.logit_score ) ) self.assertTrue(np.allclose([-20.054, -18.447] , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) ) self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE__ , decoded_processor_out.lm_score ) ) self.assertTrue(np.allclose([-15.554, -13.9474] , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) ) def SCREAMING_SNAKE_CASE ( self : Tuple ) -> str: a_ : Any = self.get_feature_extractor() a_ : List[Any] = self.get_tokenizer() a_ : str = self.get_decoder() a_ : int = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ , decoder=SCREAMING_SNAKE_CASE__ ) a_ : List[str] = self._get_dummy_logits() a_ : int = 2.0 a_ : Tuple = 5.0 a_ : List[str] = -20.0 a_ : Optional[Any] = True a_ : int = processor.batch_decode( SCREAMING_SNAKE_CASE__ , alpha=SCREAMING_SNAKE_CASE__ , beta=SCREAMING_SNAKE_CASE__ , unk_score_offset=SCREAMING_SNAKE_CASE__ , lm_score_boundary=SCREAMING_SNAKE_CASE__ , ) a_ : str = decoded_processor_out.text a_ : Dict = list(SCREAMING_SNAKE_CASE__ ) decoder.reset_params( alpha=SCREAMING_SNAKE_CASE__ , beta=SCREAMING_SNAKE_CASE__ , unk_score_offset=SCREAMING_SNAKE_CASE__ , lm_score_boundary=SCREAMING_SNAKE_CASE__ , ) with get_context('fork' ).Pool() as pool: a_ : Optional[int] = decoder.decode_beams_batch( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) a_ : int = [d[0][0] for d in decoded_decoder_out] self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.assertListEqual(['<s> </s> <s> </s> </s>', '</s> </s> <s> </s> </s>'] , SCREAMING_SNAKE_CASE__ ) a_ : Union[str, Any] = processor.decoder.model_container[processor.decoder._model_key] self.assertEqual(lm_model.alpha , 2.0 ) self.assertEqual(lm_model.beta , 5.0 ) self.assertEqual(lm_model.unk_score_offset , -20.0 ) self.assertEqual(lm_model.score_boundary , SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]: a_ : Any = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' ) a_ : int = processor.decoder.model_container[processor.decoder._model_key] a_ : Optional[Any] = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute() a_ : Dict = os.listdir(SCREAMING_SNAKE_CASE__ ) a_ : Dict = ['alphabet.json', 'language_model'] downloaded_decoder_files.sort() expected_decoder_files.sort() # test that only decoder relevant files from # https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main # are downloaded and none of the rest (e.g. README.md, ...) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple: a_ : List[Any] = snapshot_download('hf-internal-testing/processor_with_lm' ) a_ : Dict = WavaVecaProcessorWithLM.from_pretrained(SCREAMING_SNAKE_CASE__ ) a_ : List[Any] = processor.decoder.model_container[processor.decoder._model_key] a_ : List[Any] = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute() a_ : List[str] = os.listdir(SCREAMING_SNAKE_CASE__ ) a_ : Optional[Any] = os.listdir(SCREAMING_SNAKE_CASE__ ) local_decoder_files.sort() expected_decoder_files.sort() # test that both decoder form hub and local files in cache are the same self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]: a_ : str = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' ) a_ : Any = AutoProcessor.from_pretrained('hf-internal-testing/processor_with_lm' ) a_ : int = floats_list((3, 1_0_0_0) ) a_ : Tuple = processor_wavaveca(SCREAMING_SNAKE_CASE__ , return_tensors='np' ) a_ : List[str] = processor_auto(SCREAMING_SNAKE_CASE__ , return_tensors='np' ) for key in input_wavaveca.keys(): self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 ) a_ : Dict = self._get_dummy_logits() a_ : Dict = processor_wavaveca.batch_decode(SCREAMING_SNAKE_CASE__ ) a_ : List[str] = processor_auto.batch_decode(SCREAMING_SNAKE_CASE__ ) self.assertListEqual(decoded_wavaveca.text , decoded_auto.text ) def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]: a_ : int = self.get_feature_extractor() a_ : Dict = self.get_tokenizer() a_ : Tuple = self.get_decoder() a_ : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ , decoder=SCREAMING_SNAKE_CASE__ ) self.assertListEqual( processor.model_input_names , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , ) @staticmethod def SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[int]: a_ : Tuple = [d[key] for d in offsets] return retrieved_list def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]: a_ : List[str] = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' ) a_ : str = self._get_dummy_logits()[0] a_ : str = processor.decode(SCREAMING_SNAKE_CASE__ , output_word_offsets=SCREAMING_SNAKE_CASE__ ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue('text' in outputs ) self.assertTrue('word_offsets' in outputs ) self.assertTrue(isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) self.assertEqual(' '.join(self.get_from_offsets(outputs['word_offsets'] , 'word' ) ) , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'word' ) , ['<s>', '<s>', '</s>'] ) self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'start_offset' ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'end_offset' ) , [1, 3, 5] ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: a_ : str = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' ) a_ : Dict = self._get_dummy_logits() a_ : List[Any] = processor.batch_decode(SCREAMING_SNAKE_CASE__ , output_word_offsets=SCREAMING_SNAKE_CASE__ ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue('text' in outputs ) self.assertTrue('word_offsets' in outputs ) self.assertTrue(isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) self.assertListEqual( [' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE__ , 'word' ) ) for o in outputs['word_offsets']] , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'word' ) , ['<s>', '<s>', '</s>'] ) self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'start_offset' ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'end_offset' ) , [1, 3, 5] ) @slow @require_torch @require_torchaudio def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: import torch a_ : Optional[Any] = load_dataset('common_voice' , 'en' , split='train' , streaming=SCREAMING_SNAKE_CASE__ ) a_ : Dict = ds.cast_column('audio' , datasets.Audio(sampling_rate=1_6_0_0_0 ) ) a_ : str = iter(SCREAMING_SNAKE_CASE__ ) a_ : Optional[Any] = next(SCREAMING_SNAKE_CASE__ ) a_ : Union[str, Any] = AutoProcessor.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' ) a_ : Optional[Any] = WavaVecaForCTC.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' ) # compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train a_ : Optional[Any] = processor(sample['audio']['array'] , return_tensors='pt' ).input_values with torch.no_grad(): a_ : Any = model(SCREAMING_SNAKE_CASE__ ).logits.cpu().numpy() a_ : List[Any] = processor.decode(logits[0] , output_word_offsets=SCREAMING_SNAKE_CASE__ ) a_ : Tuple = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate a_ : int = [ { 'start_time': d['start_offset'] * time_offset, 'end_time': d['end_offset'] * time_offset, 'word': d['word'], } for d in output['word_offsets'] ] a_ : Union[str, Any] = 'WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL' # output words self.assertEqual(' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE__ , 'word' ) ) , SCREAMING_SNAKE_CASE__ ) self.assertEqual(' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE__ , 'word' ) ) , output.text ) # output times a_ : Optional[Any] = torch.tensor(self.get_from_offsets(SCREAMING_SNAKE_CASE__ , 'start_time' ) ) a_ : Any = torch.tensor(self.get_from_offsets(SCREAMING_SNAKE_CASE__ , 'end_time' ) ) # fmt: off a_ : Optional[int] = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] ) a_ : Optional[int] = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] ) # fmt: on self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=0.01 ) ) self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=0.01 ) )
570
1
'''simple docstring''' import gc import math import unittest import torch from diffusers import UNetaDModel from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin _lowercase : Dict = logging.get_logger(__name__) enable_full_determinism() class UpperCamelCase__( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ): __magic_name__ : Union[str, Any] = UNetaDModel __magic_name__ : Union[str, Any] = "sample" @property def a__( self : Dict )-> Any: """simple docstring""" UpperCAmelCase = 4 UpperCAmelCase = 3 UpperCAmelCase = (32, 32) UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCAmelCase ) UpperCAmelCase = torch.tensor([10] ).to(lowerCAmelCase ) return {"sample": noise, "timestep": time_step} @property def a__( self : Optional[Any] )-> Optional[int]: """simple docstring""" return (3, 32, 32) @property def a__( self : int )-> str: """simple docstring""" return (3, 32, 32) def a__( self : Optional[Any] )-> List[Any]: """simple docstring""" UpperCAmelCase = { '''block_out_channels''': (32, 64), '''down_block_types''': ('''DownBlock2D''', '''AttnDownBlock2D'''), '''up_block_types''': ('''AttnUpBlock2D''', '''UpBlock2D'''), '''attention_head_dim''': 3, '''out_channels''': 3, '''in_channels''': 3, '''layers_per_block''': 2, '''sample_size''': 32, } UpperCAmelCase = self.dummy_input return init_dict, inputs_dict class UpperCamelCase__( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ): __magic_name__ : Any = UNetaDModel __magic_name__ : str = "sample" @property def a__( self : Tuple )-> Optional[Any]: """simple docstring""" UpperCAmelCase = 4 UpperCAmelCase = 4 UpperCAmelCase = (32, 32) UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCAmelCase ) UpperCAmelCase = torch.tensor([10] ).to(lowerCAmelCase ) return {"sample": noise, "timestep": time_step} @property def a__( self : List[Any] )-> List[Any]: """simple docstring""" return (4, 32, 32) @property def a__( self : Union[str, Any] )-> List[str]: """simple docstring""" return (4, 32, 32) def a__( self : Optional[int] )-> Optional[int]: """simple docstring""" UpperCAmelCase = { '''sample_size''': 32, '''in_channels''': 4, '''out_channels''': 4, '''layers_per_block''': 2, '''block_out_channels''': (32, 64), '''attention_head_dim''': 32, '''down_block_types''': ('''DownBlock2D''', '''DownBlock2D'''), '''up_block_types''': ('''UpBlock2D''', '''UpBlock2D'''), } UpperCAmelCase = self.dummy_input return init_dict, inputs_dict def a__( self : List[str] )-> Optional[int]: """simple docstring""" UpperCAmelCase , UpperCAmelCase = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 ) model.to(lowerCAmelCase ) UpperCAmelCase = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' ) def a__( self : int )-> List[str]: """simple docstring""" UpperCAmelCase , UpperCAmelCase = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=lowerCAmelCase ) model.to(lowerCAmelCase ) UpperCAmelCase = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' ) def a__( self : Tuple )-> int: """simple docstring""" UpperCAmelCase , UpperCAmelCase = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=lowerCAmelCase ) model_accelerate.to(lowerCAmelCase ) model_accelerate.eval() UpperCAmelCase = torch.randn( 1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , ) UpperCAmelCase = noise.to(lowerCAmelCase ) UpperCAmelCase = torch.tensor([10] * noise.shape[0] ).to(lowerCAmelCase ) UpperCAmelCase = model_accelerate(lowerCAmelCase , lowerCAmelCase )['''sample'''] # two models don't need to stay in the device at the same time del model_accelerate torch.cuda.empty_cache() gc.collect() UpperCAmelCase , UpperCAmelCase = UNetaDModel.from_pretrained( '''fusing/unet-ldm-dummy-update''' , output_loading_info=lowerCAmelCase , low_cpu_mem_usage=lowerCAmelCase ) model_normal_load.to(lowerCAmelCase ) model_normal_load.eval() UpperCAmelCase = model_normal_load(lowerCAmelCase , lowerCAmelCase )['''sample'''] assert torch_all_close(lowerCAmelCase , lowerCAmelCase , rtol=1E-3 ) def a__( self : int )-> Any: """simple docstring""" UpperCAmelCase = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' ) model.eval() model.to(lowerCAmelCase ) UpperCAmelCase = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) UpperCAmelCase = noise.to(lowerCAmelCase ) UpperCAmelCase = torch.tensor([10] * noise.shape[0] ).to(lowerCAmelCase ) with torch.no_grad(): UpperCAmelCase = model(lowerCAmelCase , lowerCAmelCase ).sample UpperCAmelCase = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off UpperCAmelCase = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800] ) # fmt: on self.assertTrue(torch_all_close(lowerCAmelCase , lowerCAmelCase , rtol=1E-3 ) ) class UpperCamelCase__( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ): __magic_name__ : int = UNetaDModel __magic_name__ : List[Any] = "sample" @property def a__( self : Any , lowerCAmelCase : Union[str, Any]=(32, 32) )-> Optional[Any]: """simple docstring""" UpperCAmelCase = 4 UpperCAmelCase = 3 UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCAmelCase ) UpperCAmelCase = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=lowerCAmelCase ) return {"sample": noise, "timestep": time_step} @property def a__( self : List[str] )-> Union[str, Any]: """simple docstring""" return (3, 32, 32) @property def a__( self : Optional[Any] )-> Optional[int]: """simple docstring""" return (3, 32, 32) def a__( self : Dict )-> List[Any]: """simple docstring""" UpperCAmelCase = { '''block_out_channels''': [32, 64, 64, 64], '''in_channels''': 3, '''layers_per_block''': 1, '''out_channels''': 3, '''time_embedding_type''': '''fourier''', '''norm_eps''': 1E-6, '''mid_block_scale_factor''': math.sqrt(2.0 ), '''norm_num_groups''': None, '''down_block_types''': [ '''SkipDownBlock2D''', '''AttnSkipDownBlock2D''', '''SkipDownBlock2D''', '''SkipDownBlock2D''', ], '''up_block_types''': [ '''SkipUpBlock2D''', '''SkipUpBlock2D''', '''AttnSkipUpBlock2D''', '''SkipUpBlock2D''', ], } UpperCAmelCase = self.dummy_input return init_dict, inputs_dict @slow def a__( self : Any )-> str: """simple docstring""" UpperCAmelCase , UpperCAmelCase = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' , output_loading_info=lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 ) model.to(lowerCAmelCase ) UpperCAmelCase = self.dummy_input UpperCAmelCase = floats_tensor((4, 3) + (256, 256) ).to(lowerCAmelCase ) UpperCAmelCase = noise UpperCAmelCase = model(**lowerCAmelCase ) assert image is not None, "Make sure output is not None" @slow def a__( self : Optional[Any] )-> int: """simple docstring""" UpperCAmelCase = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' ) model.to(lowerCAmelCase ) UpperCAmelCase = 4 UpperCAmelCase = 3 UpperCAmelCase = (256, 256) UpperCAmelCase = torch.ones((batch_size, num_channels) + sizes ).to(lowerCAmelCase ) UpperCAmelCase = torch.tensor(batch_size * [1E-4] ).to(lowerCAmelCase ) with torch.no_grad(): UpperCAmelCase = model(lowerCAmelCase , lowerCAmelCase ).sample UpperCAmelCase = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off UpperCAmelCase = torch.tensor([-4842.8691, -6499.6631, -3800.1953, -7978.2686, -10980.7129, -20028.8535, 8148.2822, 2342.2905, 567.7608] ) # fmt: on self.assertTrue(torch_all_close(lowerCAmelCase , lowerCAmelCase , rtol=1E-2 ) ) def a__( self : List[str] )-> List[Any]: """simple docstring""" UpperCAmelCase = UNetaDModel.from_pretrained('''fusing/ncsnpp-ffhq-ve-dummy-update''' ) model.to(lowerCAmelCase ) UpperCAmelCase = 4 UpperCAmelCase = 3 UpperCAmelCase = (32, 32) UpperCAmelCase = torch.ones((batch_size, num_channels) + sizes ).to(lowerCAmelCase ) UpperCAmelCase = torch.tensor(batch_size * [1E-4] ).to(lowerCAmelCase ) with torch.no_grad(): UpperCAmelCase = model(lowerCAmelCase , lowerCAmelCase ).sample UpperCAmelCase = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off UpperCAmelCase = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256] ) # fmt: on self.assertTrue(torch_all_close(lowerCAmelCase , lowerCAmelCase , rtol=1E-2 ) ) def a__( self : Union[str, Any] )-> Optional[int]: """simple docstring""" pass
705
'''simple docstring''' import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def lowerCamelCase__ ( A : Optional[Any] , A : Tuple=1 ): '''simple docstring''' if n_shave_prefix_segments >= 0: return ".".join(path.split('''.''' )[n_shave_prefix_segments:] ) else: return ".".join(path.split('''.''' )[:n_shave_prefix_segments] ) def lowerCamelCase__ ( A : int , A : Optional[Any]=0 ): '''simple docstring''' UpperCAmelCase = [] for old_item in old_list: UpperCAmelCase = old_item.replace('''in_layers.0''' , '''norm1''' ) UpperCAmelCase = new_item.replace('''in_layers.2''' , '''conv1''' ) UpperCAmelCase = new_item.replace('''out_layers.0''' , '''norm2''' ) UpperCAmelCase = new_item.replace('''out_layers.3''' , '''conv2''' ) UpperCAmelCase = new_item.replace('''emb_layers.1''' , '''time_emb_proj''' ) UpperCAmelCase = new_item.replace('''skip_connection''' , '''conv_shortcut''' ) UpperCAmelCase = shave_segments(A , n_shave_prefix_segments=A ) mapping.append({'''old''': old_item, '''new''': new_item} ) return mapping def lowerCamelCase__ ( A : Any , A : int=0 ): '''simple docstring''' UpperCAmelCase = [] for old_item in old_list: UpperCAmelCase = old_item UpperCAmelCase = new_item.replace('''norm.weight''' , '''group_norm.weight''' ) UpperCAmelCase = new_item.replace('''norm.bias''' , '''group_norm.bias''' ) UpperCAmelCase = new_item.replace('''proj_out.weight''' , '''proj_attn.weight''' ) UpperCAmelCase = new_item.replace('''proj_out.bias''' , '''proj_attn.bias''' ) UpperCAmelCase = shave_segments(A , n_shave_prefix_segments=A ) mapping.append({'''old''': old_item, '''new''': new_item} ) return mapping def lowerCamelCase__ ( A : Tuple , A : Union[str, Any] , A : int , A : Dict=None , A : Optional[int]=None , A : Optional[Any]=None ): '''simple docstring''' assert isinstance(A , A ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): UpperCAmelCase = old_checkpoint[path] UpperCAmelCase = old_tensor.shape[0] // 3 UpperCAmelCase = (-1, channels) if len(old_tensor.shape ) == 3 else (-1) UpperCAmelCase = old_tensor.shape[0] // config['''num_head_channels'''] // 3 UpperCAmelCase = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = old_tensor.split(channels // num_heads , dim=1 ) UpperCAmelCase = query.reshape(A ) UpperCAmelCase = key.reshape(A ) UpperCAmelCase = value.reshape(A ) for path in paths: UpperCAmelCase = path['''new'''] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here UpperCAmelCase = new_path.replace('''middle_block.0''' , '''mid_block.resnets.0''' ) UpperCAmelCase = new_path.replace('''middle_block.1''' , '''mid_block.attentions.0''' ) UpperCAmelCase = new_path.replace('''middle_block.2''' , '''mid_block.resnets.1''' ) if additional_replacements is not None: for replacement in additional_replacements: UpperCAmelCase = new_path.replace(replacement['''old'''] , replacement['''new'''] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: UpperCAmelCase = old_checkpoint[path['''old''']][:, :, 0] else: UpperCAmelCase = old_checkpoint[path['''old''']] def lowerCamelCase__ ( A : Union[str, Any] , A : Dict ): '''simple docstring''' UpperCAmelCase = {} UpperCAmelCase = checkpoint['''time_embed.0.weight'''] UpperCAmelCase = checkpoint['''time_embed.0.bias'''] UpperCAmelCase = checkpoint['''time_embed.2.weight'''] UpperCAmelCase = checkpoint['''time_embed.2.bias'''] UpperCAmelCase = checkpoint['''input_blocks.0.0.weight'''] UpperCAmelCase = checkpoint['''input_blocks.0.0.bias'''] UpperCAmelCase = checkpoint['''out.0.weight'''] UpperCAmelCase = checkpoint['''out.0.bias'''] UpperCAmelCase = checkpoint['''out.2.weight'''] UpperCAmelCase = checkpoint['''out.2.bias'''] # Retrieves the keys for the input blocks only UpperCAmelCase = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''input_blocks''' in layer} ) UpperCAmelCase = { layer_id: [key for key in checkpoint if f"""input_blocks.{layer_id}""" in key] for layer_id in range(A ) } # Retrieves the keys for the middle blocks only UpperCAmelCase = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''middle_block''' in layer} ) UpperCAmelCase = { layer_id: [key for key in checkpoint if f"""middle_block.{layer_id}""" in key] for layer_id in range(A ) } # Retrieves the keys for the output blocks only UpperCAmelCase = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''output_blocks''' in layer} ) UpperCAmelCase = { layer_id: [key for key in checkpoint if f"""output_blocks.{layer_id}""" in key] for layer_id in range(A ) } for i in range(1 , A ): UpperCAmelCase = (i - 1) // (config['''num_res_blocks'''] + 1) UpperCAmelCase = (i - 1) % (config['''num_res_blocks'''] + 1) UpperCAmelCase = [key for key in input_blocks[i] if f"""input_blocks.{i}.0""" in key] UpperCAmelCase = [key for key in input_blocks[i] if f"""input_blocks.{i}.1""" in key] if f"""input_blocks.{i}.0.op.weight""" in checkpoint: UpperCAmelCase = checkpoint[ f"""input_blocks.{i}.0.op.weight""" ] UpperCAmelCase = checkpoint[ f"""input_blocks.{i}.0.op.bias""" ] continue UpperCAmelCase = renew_resnet_paths(A ) UpperCAmelCase = {'''old''': f"""input_blocks.{i}.0""", '''new''': f"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""} UpperCAmelCase = {'''old''': '''resnets.2.op''', '''new''': '''downsamplers.0.op'''} assign_to_checkpoint( A , A , A , additional_replacements=[meta_path, resnet_op] , config=A ) if len(A ): UpperCAmelCase = renew_attention_paths(A ) UpperCAmelCase = { '''old''': f"""input_blocks.{i}.1""", '''new''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}""", } UpperCAmelCase = { f"""input_blocks.{i}.1.qkv.bias""": { '''key''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""", '''query''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""", '''value''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""", }, f"""input_blocks.{i}.1.qkv.weight""": { '''key''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""", '''query''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""", '''value''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""", }, } assign_to_checkpoint( A , A , A , additional_replacements=[meta_path] , attention_paths_to_split=A , config=A , ) UpperCAmelCase = middle_blocks[0] UpperCAmelCase = middle_blocks[1] UpperCAmelCase = middle_blocks[2] UpperCAmelCase = renew_resnet_paths(A ) assign_to_checkpoint(A , A , A , config=A ) UpperCAmelCase = renew_resnet_paths(A ) assign_to_checkpoint(A , A , A , config=A ) UpperCAmelCase = renew_attention_paths(A ) UpperCAmelCase = { '''middle_block.1.qkv.bias''': { '''key''': '''mid_block.attentions.0.key.bias''', '''query''': '''mid_block.attentions.0.query.bias''', '''value''': '''mid_block.attentions.0.value.bias''', }, '''middle_block.1.qkv.weight''': { '''key''': '''mid_block.attentions.0.key.weight''', '''query''': '''mid_block.attentions.0.query.weight''', '''value''': '''mid_block.attentions.0.value.weight''', }, } assign_to_checkpoint( A , A , A , attention_paths_to_split=A , config=A ) for i in range(A ): UpperCAmelCase = i // (config['''num_res_blocks'''] + 1) UpperCAmelCase = i % (config['''num_res_blocks'''] + 1) UpperCAmelCase = [shave_segments(A , 2 ) for name in output_blocks[i]] UpperCAmelCase = {} for layer in output_block_layers: UpperCAmelCase , UpperCAmelCase = layer.split('''.''' )[0], shave_segments(A , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(A ) else: UpperCAmelCase = [layer_name] if len(A ) > 1: UpperCAmelCase = [key for key in output_blocks[i] if f"""output_blocks.{i}.0""" in key] UpperCAmelCase = [key for key in output_blocks[i] if f"""output_blocks.{i}.1""" in key] UpperCAmelCase = renew_resnet_paths(A ) UpperCAmelCase = renew_resnet_paths(A ) UpperCAmelCase = {'''old''': f"""output_blocks.{i}.0""", '''new''': f"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""} assign_to_checkpoint(A , A , A , additional_replacements=[meta_path] , config=A ) if ["conv.weight", "conv.bias"] in output_block_list.values(): UpperCAmelCase = list(output_block_list.values() ).index(['''conv.weight''', '''conv.bias'''] ) UpperCAmelCase = checkpoint[ f"""output_blocks.{i}.{index}.conv.weight""" ] UpperCAmelCase = checkpoint[ f"""output_blocks.{i}.{index}.conv.bias""" ] # Clear attentions as they have been attributed above. if len(A ) == 2: UpperCAmelCase = [] if len(A ): UpperCAmelCase = renew_attention_paths(A ) UpperCAmelCase = { '''old''': f"""output_blocks.{i}.1""", '''new''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}""", } UpperCAmelCase = { f"""output_blocks.{i}.1.qkv.bias""": { '''key''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""", '''query''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""", '''value''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""", }, f"""output_blocks.{i}.1.qkv.weight""": { '''key''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""", '''query''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""", '''value''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""", }, } assign_to_checkpoint( A , A , A , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('''qkv''' in key for key in attentions ) else None , config=A , ) else: UpperCAmelCase = renew_resnet_paths(A , n_shave_prefix_segments=1 ) for path in resnet_0_paths: UpperCAmelCase = '''.'''.join(['''output_blocks''', str(A ), path['''old''']] ) UpperCAmelCase = '''.'''.join(['''up_blocks''', str(A ), '''resnets''', str(A ), path['''new''']] ) UpperCAmelCase = checkpoint[old_path] return new_checkpoint if __name__ == "__main__": _lowercase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument( """--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert.""" ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help="""The config json file corresponding to the architecture.""", ) parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") _lowercase : Dict = parser.parse_args() _lowercase : List[Any] = torch.load(args.checkpoint_path) with open(args.config_file) as f: _lowercase : List[str] = json.loads(f.read()) _lowercase : Union[str, Any] = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] _lowercase : Any = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: _lowercase : Tuple = DDPMScheduler.from_config("""/""".join(args.checkpoint_path.split("""/""")[:-1])) _lowercase : Optional[Any] = VQModel.from_pretrained("""/""".join(args.checkpoint_path.split("""/""")[:-1])) _lowercase : Optional[Any] = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
50
0
"""simple docstring""" from __future__ import annotations class lowerCamelCase__ : def __init__( self , SCREAMING_SNAKE_CASE ): """simple docstring""" snake_case : Optional[int] = data snake_case : Node | None = None snake_case : Node | None = None def UpperCamelCase__ ( lowercase__ : Dict ): # In Order traversal of the tree if tree: display(tree.left ) print(tree.data ) display(tree.right ) def UpperCamelCase__ ( lowercase__ : List[Any] ): return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0 def UpperCamelCase__ ( lowercase__ : str ): if not tree: return True if tree.left and tree.right: return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right ) else: return not tree.left and not tree.right def UpperCamelCase__ ( ): # Main function for testing. snake_case : int = Node(1 ) snake_case : Optional[int] = Node(2 ) snake_case : List[str] = Node(3 ) snake_case : Optional[int] = Node(4 ) snake_case : int = Node(5 ) snake_case : Any = Node(6 ) snake_case : Optional[Any] = Node(7 ) snake_case : int = Node(8 ) snake_case : Any = Node(9 ) print(is_full_binary_tree(_a ) ) print(depth_of_tree(_a ) ) print("Tree is: " ) display(_a ) if __name__ == "__main__": main()
134
import numpy as np def UpperCamelCase ( _a , _a , _a , _a , _a ) -> List[Any]: '''simple docstring''' lowercase_ :Optional[Any] = int(np.ceil((x_end - xa) / h ) ) lowercase_ :List[str] = np.zeros((n + 1,) ) lowercase_ :List[str] = ya lowercase_ :Any = xa for k in range(_a ): lowercase_ :int = f(_a , y[k] ) lowercase_ :List[str] = f(x + 0.5 * h , y[k] + 0.5 * h * ka ) lowercase_ :Optional[Any] = f(x + 0.5 * h , y[k] + 0.5 * h * ka ) lowercase_ :Tuple = f(x + h , y[k] + h * ka ) lowercase_ :Dict = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka) x += h return y if __name__ == "__main__": import doctest doctest.testmod()
257
0
import os from collections import namedtuple import pytest from datasets import ClassLabel, Features, Sequence, Value from datasets.commands.test import TestCommand from datasets.info import DatasetInfo, DatasetInfosDict lowerCamelCase__ = namedtuple( '''_TestCommandArgs''', [ '''dataset''', '''name''', '''cache_dir''', '''data_dir''', '''all_configs''', '''save_infos''', '''ignore_verifications''', '''force_redownload''', '''clear_cache''', ], defaults=[None, None, None, False, False, False, False, False], ) def A(__a: List[str] , __a: List[Any] ): return (abs(source - target ) / target) < 0.01 @pytest.mark.integration def A(__a: Tuple ): lowerCAmelCase_ = _TestCommandArgs(dataset=lowerCamelCase_ , all_configs=lowerCamelCase_ , save_infos=lowerCamelCase_ ) lowerCAmelCase_ = TestCommand(*lowerCamelCase_ ) test_command.run() lowerCAmelCase_ = os.path.join(lowerCamelCase_ , "README.md" ) assert os.path.exists(lowerCamelCase_ ) lowerCAmelCase_ = DatasetInfosDict.from_directory(lowerCamelCase_ ) lowerCAmelCase_ = DatasetInfosDict( { "default": DatasetInfo( features=Features( { "tokens": Sequence(Value("string" ) ), "ner_tags": Sequence( ClassLabel(names=["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] ) ), "langs": Sequence(Value("string" ) ), "spans": Sequence(Value("string" ) ), } ) , splits=[ { "name": "train", "num_bytes": 235_1563, "num_examples": 1_0000, }, { "name": "validation", "num_bytes": 23_8418, "num_examples": 1000, }, ] , download_size=394_0680 , dataset_size=258_9981 , ) } ) assert dataset_infos.keys() == expected_dataset_infos.keys() for key in DatasetInfo._INCLUDED_INFO_IN_YAML: lowerCAmelCase_ = getattr(dataset_infos["default"] , lowerCamelCase_ ), getattr(expected_dataset_infos["default"] , lowerCamelCase_ ) if key == "num_bytes": assert is_apercent_close(lowerCamelCase_ , lowerCamelCase_ ) elif key == "splits": assert list(lowerCamelCase_ ) == list(lowerCamelCase_ ) for split in result: assert result[split].name == expected[split].name assert result[split].num_examples == expected[split].num_examples assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes ) else: result == expected
702
import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin lowerCamelCase__ = get_tests_dir('''fixtures/test_sentencepiece.model''') if is_torch_available(): from transformers.models.plbart.modeling_plbart import shift_tokens_right lowerCamelCase__ = 5_00_03 lowerCamelCase__ = 5_00_02 @require_sentencepiece @require_tokenizers class __magic_name__ (__lowercase , unittest.TestCase ): lowerCamelCase__ = PLBartTokenizer lowerCamelCase__ = None lowerCamelCase__ = False def __a ( self ) -> Optional[int]: super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase_ = PLBartTokenizer(_a , language_codes="base" , keep_accents=_a ) tokenizer.save_pretrained(self.tmpdirname ) def __a ( self ) -> Any: lowerCAmelCase_ = PLBartTokenizer(_a , language_codes="base" , keep_accents=_a ) lowerCAmelCase_ = tokenizer.tokenize("This is a test" ) self.assertListEqual(_a , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_a ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) lowerCAmelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( _a , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) lowerCAmelCase_ = tokenizer.convert_tokens_to_ids(_a ) self.assertListEqual( _a , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) lowerCAmelCase_ = tokenizer.convert_ids_to_tokens(_a ) self.assertListEqual( _a , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) lowerCAmelCase_ = tokenizer.vocab_size lowerCAmelCase_ = [tokenizer.convert_ids_to_tokens(_a ) for x in range(end - 4 , _a )] self.assertListEqual(_a , ["__java__", "__python__", "__en_XX__", "<mask>"] ) lowerCAmelCase_ = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go" lowerCAmelCase_ = tokenizer(_a ).input_ids self.assertEqual( tokenizer.decode(_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a ) , _a , ) def __a ( self ) -> Optional[Any]: lowerCAmelCase_ = PLBartTokenizer(_a , language_codes="multi" , keep_accents=_a ) lowerCAmelCase_ = tokenizer.tokenize("This is a test" ) self.assertListEqual(_a , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_a ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) lowerCAmelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( _a , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) lowerCAmelCase_ = tokenizer.convert_tokens_to_ids(_a ) self.assertListEqual( _a , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) lowerCAmelCase_ = tokenizer.convert_ids_to_tokens(_a ) self.assertListEqual( _a , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) lowerCAmelCase_ = tokenizer.vocab_size lowerCAmelCase_ = [tokenizer.convert_ids_to_tokens(_a ) for x in range(end - 7 , _a )] self.assertListEqual( _a , ["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"] ) lowerCAmelCase_ = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go" lowerCAmelCase_ = tokenizer(_a ).input_ids self.assertEqual( tokenizer.decode(_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a ) , _a , ) @require_torch @require_sentencepiece @require_tokenizers class __magic_name__ (unittest.TestCase ): lowerCamelCase__ = '''uclanlp/plbart-python-en_XX''' lowerCamelCase__ = [ '''def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])''', '''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''', ] lowerCamelCase__ = [ '''Returns the maximum value of a b c.''', '''Sums the values of a b c.''', ] lowerCamelCase__ = [ 134, 5452, 33460, 33441, 33463, 33465, 33463, 33449, 988, 20, 33456, 19, 33456, 771, 39, 4258, 889, 3318, 33441, 33463, 33465, 33463, 33449, 2471, 2, PYTHON_CODE, ] @classmethod def __a ( cls ) -> str: lowerCAmelCase_ = PLBartTokenizer.from_pretrained( cls.checkpoint_name , language_codes="base" , src_lang="python" , tgt_lang="en_XX" ) lowerCAmelCase_ = 1 return cls def __a ( self ) -> Optional[int]: self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__java__"] , 50001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__python__"] , 50002 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__en_XX__"] , 50003 ) def __a ( self ) -> Any: lowerCAmelCase_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , _a ) def __a ( self ) -> int: self.assertIn(_a , self.tokenizer.all_special_ids ) lowerCAmelCase_ = [EN_CODE, 9037, 33442, 57, 752, 153, 14, 56, 18, 9, 2] lowerCAmelCase_ = self.tokenizer.decode(_a , skip_special_tokens=_a ) lowerCAmelCase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_a ) self.assertEqual(_a , _a ) self.assertNotIn(self.tokenizer.eos_token , _a ) def __a ( self ) -> str: lowerCAmelCase_ = ["def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])" * 20] self.assertIsInstance(src_text[0] , _a ) lowerCAmelCase_ = 10 lowerCAmelCase_ = self.tokenizer(_a , max_length=_a , truncation=_a ).input_ids[0] self.assertEqual(ids[-2] , 2 ) self.assertEqual(ids[-1] , _a ) self.assertEqual(len(_a ) , _a ) def __a ( self ) -> Tuple: self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "__java__"] ) , [50004, 50001] ) def __a ( self ) -> str: lowerCAmelCase_ = tempfile.mkdtemp() lowerCAmelCase_ = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(_a ) lowerCAmelCase_ = PLBartTokenizer.from_pretrained(_a ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _a ) @require_torch def __a ( self ) -> List[str]: lowerCAmelCase_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_a , return_tensors="pt" ) lowerCAmelCase_ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] ) self.assertEqual(batch.decoder_input_ids[1][0] , _a ) self.assertEqual(batch.decoder_input_ids[1][-1] , 2 ) self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] ) @require_torch def __a ( self ) -> int: lowerCAmelCase_ = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=_a , truncation=_a , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , ) lowerCAmelCase_ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id ) self.assertIsInstance(_a , _a ) self.assertEqual((2, 26) , batch.input_ids.shape ) self.assertEqual((2, 26) , batch.attention_mask.shape ) lowerCAmelCase_ = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , _a ) self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] ) def __a ( self ) -> Optional[Any]: lowerCAmelCase_ = self.tokenizer(self.src_text , padding=_a , truncation=_a , max_length=3 , return_tensors="pt" ) lowerCAmelCase_ = self.tokenizer( text_target=self.tgt_text , padding=_a , truncation=_a , max_length=10 , return_tensors="pt" ) lowerCAmelCase_ = targets["input_ids"] lowerCAmelCase_ = shift_tokens_right(_a , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def __a ( self ) -> Optional[int]: lowerCAmelCase_ = self.tokenizer._build_translation_inputs( "A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="java" ) self.assertEqual( nested_simplify(_a ) , { # A, test, EOS, en_XX "input_ids": [[150, 242, 2, 50003]], "attention_mask": [[1, 1, 1, 1]], # java "forced_bos_token_id": 50001, } , )
226
0
import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __snake_case( self ): _UpperCAmelCase : Optional[int] = [ """safety_checker/pytorch_model.bin""", """safety_checker/model.safetensors""", """vae/diffusion_pytorch_model.bin""", """vae/diffusion_pytorch_model.safetensors""", """text_encoder/pytorch_model.bin""", """text_encoder/model.safetensors""", """unet/diffusion_pytorch_model.bin""", """unet/diffusion_pytorch_model.safetensors""", ] self.assertTrue(is_safetensors_compatible(A_ ) ) def __snake_case( self ): _UpperCAmelCase : str = [ """unet/diffusion_pytorch_model.bin""", """unet/diffusion_pytorch_model.safetensors""", ] self.assertTrue(is_safetensors_compatible(A_ ) ) def __snake_case( self ): _UpperCAmelCase : Optional[int] = [ """safety_checker/pytorch_model.bin""", """safety_checker/model.safetensors""", """vae/diffusion_pytorch_model.bin""", """vae/diffusion_pytorch_model.safetensors""", """text_encoder/pytorch_model.bin""", """text_encoder/model.safetensors""", """unet/diffusion_pytorch_model.bin""", # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(A_ ) ) def __snake_case( self ): _UpperCAmelCase : Dict = [ """text_encoder/pytorch_model.bin""", """text_encoder/model.safetensors""", ] self.assertTrue(is_safetensors_compatible(A_ ) ) def __snake_case( self ): _UpperCAmelCase : Optional[Any] = [ """safety_checker/pytorch_model.bin""", """safety_checker/model.safetensors""", """vae/diffusion_pytorch_model.bin""", """vae/diffusion_pytorch_model.safetensors""", """text_encoder/pytorch_model.bin""", # Removed: 'text_encoder/model.safetensors', """unet/diffusion_pytorch_model.bin""", """unet/diffusion_pytorch_model.safetensors""", ] self.assertFalse(is_safetensors_compatible(A_ ) ) def __snake_case( self ): _UpperCAmelCase : List[str] = [ """safety_checker/pytorch_model.fp16.bin""", """safety_checker/model.fp16.safetensors""", """vae/diffusion_pytorch_model.fp16.bin""", """vae/diffusion_pytorch_model.fp16.safetensors""", """text_encoder/pytorch_model.fp16.bin""", """text_encoder/model.fp16.safetensors""", """unet/diffusion_pytorch_model.fp16.bin""", """unet/diffusion_pytorch_model.fp16.safetensors""", ] _UpperCAmelCase : Union[str, Any] = """fp16""" self.assertTrue(is_safetensors_compatible(A_ , variant=A_ ) ) def __snake_case( self ): _UpperCAmelCase : Dict = [ """unet/diffusion_pytorch_model.fp16.bin""", """unet/diffusion_pytorch_model.fp16.safetensors""", ] _UpperCAmelCase : int = """fp16""" self.assertTrue(is_safetensors_compatible(A_ , variant=A_ ) ) def __snake_case( self ): # pass variant but use the non-variant filenames _UpperCAmelCase : int = [ """unet/diffusion_pytorch_model.bin""", """unet/diffusion_pytorch_model.safetensors""", ] _UpperCAmelCase : int = """fp16""" self.assertTrue(is_safetensors_compatible(A_ , variant=A_ ) ) def __snake_case( self ): _UpperCAmelCase : int = [ """safety_checker/pytorch_model.fp16.bin""", """safety_checker/model.fp16.safetensors""", """vae/diffusion_pytorch_model.fp16.bin""", """vae/diffusion_pytorch_model.fp16.safetensors""", """text_encoder/pytorch_model.fp16.bin""", """text_encoder/model.fp16.safetensors""", """unet/diffusion_pytorch_model.fp16.bin""", # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] _UpperCAmelCase : int = """fp16""" self.assertFalse(is_safetensors_compatible(A_ , variant=A_ ) ) def __snake_case( self ): _UpperCAmelCase : Optional[int] = [ """text_encoder/pytorch_model.fp16.bin""", """text_encoder/model.fp16.safetensors""", ] _UpperCAmelCase : Any = """fp16""" self.assertTrue(is_safetensors_compatible(A_ , variant=A_ ) ) def __snake_case( self ): # pass variant but use the non-variant filenames _UpperCAmelCase : Union[str, Any] = [ """text_encoder/pytorch_model.bin""", """text_encoder/model.safetensors""", ] _UpperCAmelCase : Tuple = """fp16""" self.assertTrue(is_safetensors_compatible(A_ , variant=A_ ) ) def __snake_case( self ): _UpperCAmelCase : List[str] = [ """safety_checker/pytorch_model.fp16.bin""", """safety_checker/model.fp16.safetensors""", """vae/diffusion_pytorch_model.fp16.bin""", """vae/diffusion_pytorch_model.fp16.safetensors""", """text_encoder/pytorch_model.fp16.bin""", # 'text_encoder/model.fp16.safetensors', """unet/diffusion_pytorch_model.fp16.bin""", """unet/diffusion_pytorch_model.fp16.safetensors""", ] _UpperCAmelCase : List[Any] = """fp16""" self.assertFalse(is_safetensors_compatible(A_ , variant=A_ ) )
643
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Image from .base import TaskTemplate @dataclass(frozen=A ) class _SCREAMING_SNAKE_CASE ( A ): __SCREAMING_SNAKE_CASE = field(default='''image-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} ) __SCREAMING_SNAKE_CASE = Features({'''image''': Image()} ) __SCREAMING_SNAKE_CASE = Features({'''labels''': ClassLabel} ) __SCREAMING_SNAKE_CASE = "image" __SCREAMING_SNAKE_CASE = "labels" def __snake_case( self , A_ ): if self.label_column not in features: raise ValueError(F'''Column {self.label_column} is not present in features.''' ) if not isinstance(features[self.label_column] , A_ ): raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' ) _UpperCAmelCase : Tuple = copy.deepcopy(self ) _UpperCAmelCase : str = self.label_schema.copy() _UpperCAmelCase : Optional[Any] = features[self.label_column] _UpperCAmelCase : int = label_schema return task_template @property def __snake_case( self ): return { self.image_column: "image", self.label_column: "labels", }
643
1
import inspect import unittest from transformers import MobileNetVaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileNetVaForImageClassification, MobileNetVaModel from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class snake_case_ (lowercase__ ): """simple docstring""" def A_ ( self): """simple docstring""" UpperCAmelCase_ : int = self.config_class(**self.inputs_dict) self.parent.assertTrue(hasattr(_UpperCAmelCase ,"tf_padding")) self.parent.assertTrue(hasattr(_UpperCAmelCase ,"depth_multiplier")) class snake_case_ : """simple docstring""" def __init__( self ,lowercase ,lowercase=13 ,lowercase=3 ,lowercase=32 ,lowercase=0.25 ,lowercase=8 ,lowercase=True ,lowercase=1024 ,lowercase=32 ,lowercase="relu6" ,lowercase=0.1 ,lowercase=0.02 ,lowercase=True ,lowercase=True ,lowercase=10 ,lowercase=None ,): """simple docstring""" UpperCAmelCase_ : int = parent UpperCAmelCase_ : Optional[Any] = batch_size UpperCAmelCase_ : Union[str, Any] = num_channels UpperCAmelCase_ : Optional[int] = image_size UpperCAmelCase_ : Optional[int] = depth_multiplier UpperCAmelCase_ : Any = min_depth UpperCAmelCase_ : List[str] = tf_padding UpperCAmelCase_ : int = int(last_hidden_size * depth_multiplier) UpperCAmelCase_ : Tuple = output_stride UpperCAmelCase_ : int = hidden_act UpperCAmelCase_ : Optional[int] = classifier_dropout_prob UpperCAmelCase_ : Optional[Any] = use_labels UpperCAmelCase_ : str = is_training UpperCAmelCase_ : Union[str, Any] = num_labels UpperCAmelCase_ : Tuple = initializer_range UpperCAmelCase_ : Any = scope def A_ ( self): """simple docstring""" UpperCAmelCase_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) UpperCAmelCase_ : str = None UpperCAmelCase_ : Tuple = None if self.use_labels: UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size] ,self.num_labels) UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels) UpperCAmelCase_ : Union[str, Any] = self.get_config() return config, pixel_values, labels, pixel_labels def A_ ( self): """simple docstring""" return MobileNetVaConfig( num_channels=self.num_channels ,image_size=self.image_size ,depth_multiplier=self.depth_multiplier ,min_depth=self.min_depth ,tf_padding=self.tf_padding ,hidden_act=self.hidden_act ,classifier_dropout_prob=self.classifier_dropout_prob ,initializer_range=self.initializer_range ,) def A_ ( self ,lowercase ,lowercase ,lowercase ,lowercase): """simple docstring""" UpperCAmelCase_ : Optional[int] = MobileNetVaModel(config=_UpperCAmelCase) model.to(_UpperCAmelCase) model.eval() UpperCAmelCase_ : Tuple = model(_UpperCAmelCase) self.parent.assertEqual( result.last_hidden_state.shape ,( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) ,) def A_ ( self ,lowercase ,lowercase ,lowercase ,lowercase): """simple docstring""" UpperCAmelCase_ : Optional[int] = self.num_labels UpperCAmelCase_ : int = MobileNetVaForImageClassification(_UpperCAmelCase) model.to(_UpperCAmelCase) model.eval() UpperCAmelCase_ : Optional[Any] = model(_UpperCAmelCase ,labels=_UpperCAmelCase) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels)) def A_ ( self): """simple docstring""" UpperCAmelCase_ : int = self.prepare_config_and_inputs() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = config_and_inputs UpperCAmelCase_ : Dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class snake_case_ (lowercase__ , lowercase__ , unittest.TestCase ): """simple docstring""" _lowerCamelCase = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else () _lowerCamelCase = ( {"""feature-extraction""": MobileNetVaModel, """image-classification""": MobileNetVaForImageClassification} if is_torch_available() else {} ) _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False def A_ ( self): """simple docstring""" UpperCAmelCase_ : Tuple = MobileNetVaModelTester(self) UpperCAmelCase_ : Union[str, Any] = MobileNetVaConfigTester(self ,config_class=_UpperCAmelCase ,has_text_modality=_UpperCAmelCase) def A_ ( self): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="MobileNetV1 does not use inputs_embeds") def A_ ( self): """simple docstring""" pass @unittest.skip(reason="MobileNetV1 does not support input and output embeddings") def A_ ( self): """simple docstring""" pass @unittest.skip(reason="MobileNetV1 does not output attentions") def A_ ( self): """simple docstring""" pass def A_ ( self): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : List[str] = model_class(_UpperCAmelCase) UpperCAmelCase_ : int = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ : List[Any] = [*signature.parameters.keys()] UpperCAmelCase_ : List[Any] = ["pixel_values"] self.assertListEqual(arg_names[:1] ,_UpperCAmelCase) def A_ ( self): """simple docstring""" UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase) def A_ ( self): """simple docstring""" def check_hidden_states_output(lowercase ,lowercase ,lowercase): UpperCAmelCase_ : Optional[int] = model_class(_UpperCAmelCase) model.to(_UpperCAmelCase) model.eval() with torch.no_grad(): UpperCAmelCase_ : Any = model(**self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase)) UpperCAmelCase_ : List[Any] = outputs.hidden_states UpperCAmelCase_ : Tuple = 26 self.assertEqual(len(_UpperCAmelCase) ,_UpperCAmelCase) UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : int = True check_hidden_states_output(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase_ : Any = True check_hidden_states_output(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase) def A_ ( self): """simple docstring""" UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase) @slow def A_ ( self): """simple docstring""" for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : Union[str, Any] = MobileNetVaModel.from_pretrained(_UpperCAmelCase) self.assertIsNotNone(_UpperCAmelCase) def _snake_case ( ) -> str: '''simple docstring''' UpperCAmelCase_ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class snake_case_ (unittest.TestCase ): """simple docstring""" @cached_property def A_ ( self): """simple docstring""" return ( MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v1_1.0_224") if is_vision_available() else None ) @slow def A_ ( self): """simple docstring""" UpperCAmelCase_ : List[Any] = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v1_1.0_224").to(_UpperCAmelCase) UpperCAmelCase_ : str = self.default_image_processor UpperCAmelCase_ : List[Any] = prepare_img() UpperCAmelCase_ : Any = image_processor(images=_UpperCAmelCase ,return_tensors="pt").to(_UpperCAmelCase) # forward pass with torch.no_grad(): UpperCAmelCase_ : Union[str, Any] = model(**_UpperCAmelCase) # verify the logits UpperCAmelCase_ : Optional[int] = torch.Size((1, 1001)) self.assertEqual(outputs.logits.shape ,_UpperCAmelCase) UpperCAmelCase_ : int = torch.tensor([-4.1739, -1.1233, 3.1205]).to(_UpperCAmelCase) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_UpperCAmelCase ,atol=1E-4))
714
from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging __lowerCamelCase = logging.get_logger(__name__) __lowerCamelCase = { '''google/umt5-small''': '''https://huggingface.co/google/umt5-small/resolve/main/config.json''', # See all umt5 models at https://huggingface.co/models?filter=umt5 } class snake_case_ (lowercase__ ): """simple docstring""" _lowerCamelCase = """umt5""" _lowerCamelCase = ["""past_key_values"""] def __init__( self ,lowercase=250112 ,lowercase=512 ,lowercase=64 ,lowercase=1024 ,lowercase=8 ,lowercase=None ,lowercase=6 ,lowercase=32 ,lowercase=128 ,lowercase=0.1 ,lowercase=1E-6 ,lowercase=1.0 ,lowercase="gated-gelu" ,lowercase=True ,lowercase=True ,lowercase="T5Tokenizer" ,lowercase=True ,lowercase=0 ,lowercase=1 ,lowercase=0 ,**lowercase ,): """simple docstring""" super().__init__( is_encoder_decoder=lowercase ,tokenizer_class=lowercase ,tie_word_embeddings=lowercase ,pad_token_id=lowercase ,eos_token_id=lowercase ,decoder_start_token_id=lowercase ,**lowercase ,) UpperCAmelCase_ : Optional[int] = vocab_size UpperCAmelCase_ : Any = d_model UpperCAmelCase_ : Any = d_kv UpperCAmelCase_ : int = d_ff UpperCAmelCase_ : Tuple = num_layers UpperCAmelCase_ : int = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry UpperCAmelCase_ : Optional[int] = num_heads UpperCAmelCase_ : str = relative_attention_num_buckets UpperCAmelCase_ : Any = relative_attention_max_distance UpperCAmelCase_ : Optional[Any] = dropout_rate UpperCAmelCase_ : Union[str, Any] = layer_norm_epsilon UpperCAmelCase_ : Optional[Any] = initializer_factor UpperCAmelCase_ : int = feed_forward_proj UpperCAmelCase_ : str = use_cache UpperCAmelCase_ : List[str] = self.feed_forward_proj.split("-") UpperCAmelCase_ : Any = act_info[-1] UpperCAmelCase_ : Optional[int] = act_info[0] == "gated" if len(lowercase) > 1 and act_info[0] != "gated" or len(lowercase) > 2: raise ValueError( F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.""" "Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. " "'gated-gelu' or 'relu'") if feed_forward_proj == "gated-gelu": UpperCAmelCase_ : Tuple = "gelu_new" @property def A_ ( self): """simple docstring""" return self.d_model @property def A_ ( self): """simple docstring""" return self.num_heads @property def A_ ( self): """simple docstring""" return self.num_layers class snake_case_ (lowercase__ ): """simple docstring""" @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs def A_ ( self): """simple docstring""" UpperCAmelCase_ : int = { "input_ids": {0: "batch", 1: "encoder_sequence"}, "attention_mask": {0: "batch", 1: "encoder_sequence"}, } if self.use_past: UpperCAmelCase_ : Union[str, Any] = "past_encoder_sequence + sequence" UpperCAmelCase_ : Optional[int] = {0: "batch"} UpperCAmelCase_ : Union[str, Any] = {0: "batch", 1: "past_decoder_sequence + sequence"} else: UpperCAmelCase_ : Optional[int] = {0: "batch", 1: "decoder_sequence"} UpperCAmelCase_ : Dict = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(lowercase ,direction="inputs") return common_inputs @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset def A_ ( self): """simple docstring""" return 13 @property def A_ ( self): """simple docstring""" return 5E-4
455
0
import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class __magic_name__ ( A__ ): def __init__( self : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] ) -> Tuple: '''simple docstring''' UpperCAmelCase = dataset UpperCAmelCase = process UpperCAmelCase = params def __len__( self : List[Any] ) -> str: '''simple docstring''' return len(self.dataset ) def __getitem__( self : Union[str, Any] , UpperCamelCase__ : Tuple ) -> Dict: '''simple docstring''' UpperCAmelCase = self.dataset[i] UpperCAmelCase = self.process(UpperCamelCase__ , **self.params ) return processed class __magic_name__ ( A__ ): def __init__( self : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : Dict=None ) -> Optional[int]: '''simple docstring''' UpperCAmelCase = loader UpperCAmelCase = infer UpperCAmelCase = params if loader_batch_size == 1: # Let's spare some time by deactivating altogether UpperCAmelCase = None UpperCAmelCase = loader_batch_size # Internal bookkeeping UpperCAmelCase = None UpperCAmelCase = None def __len__( self : List[Any] ) -> Any: '''simple docstring''' return len(self.loader ) def __iter__( self : Union[str, Any] ) -> List[Any]: '''simple docstring''' UpperCAmelCase = iter(self.loader ) return self def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Tuple: '''simple docstring''' if isinstance(self._loader_batch_data , torch.Tensor ): # Batch data is simple tensor, just fetch the slice UpperCAmelCase = self._loader_batch_data[self._loader_batch_index] else: # Batch data is assumed to be BaseModelOutput (or dict) UpperCAmelCase = {} for k, element in self._loader_batch_data.items(): if isinstance(UpperCamelCase__ , UpperCamelCase__ ): # Convert ModelOutput to tuple first UpperCAmelCase = element.to_tuple() if isinstance(element[0] , torch.Tensor ): UpperCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): UpperCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(UpperCamelCase__ , UpperCamelCase__ ): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0] , torch.Tensor ): UpperCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): UpperCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if element is None: # This can happen for optional data that get passed around UpperCAmelCase = None elif isinstance(element[self._loader_batch_index] , torch.Tensor ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers UpperCAmelCase = element[self._loader_batch_index].unsqueeze(0 ) elif isinstance(element[self._loader_batch_index] , np.ndarray ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers UpperCAmelCase = np.expand_dims(element[self._loader_batch_index] , 0 ) else: # This is typically a list, so no need to `unsqueeze`. UpperCAmelCase = element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 UpperCAmelCase = self._loader_batch_data.__class__(UpperCamelCase__ ) self._loader_batch_index += 1 return result def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Optional[int]: '''simple docstring''' if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: # We are currently unrolling a batch so we just need to return # the current item within a batch return self.loader_batch_item() # We're out of items within a batch UpperCAmelCase = next(self.iterator ) UpperCAmelCase = self.infer(UpperCamelCase__ , **self.params ) # We now have a batch of "inferred things". if self.loader_batch_size is not None: # Try to infer the size of the batch if isinstance(UpperCamelCase__ , torch.Tensor ): UpperCAmelCase = processed else: UpperCAmelCase = list(processed.keys() )[0] UpperCAmelCase = processed[key] if isinstance(UpperCamelCase__ , UpperCamelCase__ ): UpperCAmelCase = len(UpperCamelCase__ ) else: UpperCAmelCase = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. UpperCAmelCase = observed_batch_size # Setting internal index to unwrap the batch UpperCAmelCase = processed UpperCAmelCase = 0 return self.loader_batch_item() else: # We're not unrolling batches return processed class __magic_name__ ( A__ ): def __init__( self : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : Tuple=None ) -> Any: '''simple docstring''' super().__init__(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def __iter__( self : Tuple ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase = iter(self.loader ) UpperCAmelCase = None return self def SCREAMING_SNAKE_CASE_ ( self : int ) -> List[Any]: '''simple docstring''' if self.subiterator is None: UpperCAmelCase = self.infer(next(self.iterator ) , **self.params ) try: # Try to return next item UpperCAmelCase = next(self.subiterator ) except StopIteration: # When a preprocess iterator ends, we can start lookig at the next item # ChunkIterator will keep feeding until ALL elements of iterator # all have created their subiterator and have been iterating against. # # Another way to look at it, is we're basically flattening lists of lists # into a single list, but with generators UpperCAmelCase = self.infer(next(self.iterator ) , **self.params ) UpperCAmelCase = next(self.subiterator ) return processed class __magic_name__ ( A__ ): def __iter__( self : Dict ) -> Any: '''simple docstring''' UpperCAmelCase = iter(self.loader ) return self def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Optional[int]: '''simple docstring''' UpperCAmelCase = False UpperCAmelCase = [] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: UpperCAmelCase = self.loader_batch_item() UpperCAmelCase = item.pop("is_last" ) accumulator.append(UpperCamelCase__ ) if is_last: return accumulator while not is_last: UpperCAmelCase = self.infer(next(self.iterator ) , **self.params ) if self.loader_batch_size is not None: if isinstance(UpperCamelCase__ , torch.Tensor ): UpperCAmelCase = processed else: UpperCAmelCase = list(processed.keys() )[0] UpperCAmelCase = processed[key] if isinstance(UpperCamelCase__ , UpperCamelCase__ ): UpperCAmelCase = len(UpperCamelCase__ ) else: UpperCAmelCase = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. UpperCAmelCase = observed_batch_size UpperCAmelCase = processed UpperCAmelCase = 0 while self._loader_batch_index < self.loader_batch_size: UpperCAmelCase = self.loader_batch_item() UpperCAmelCase = item.pop("is_last" ) accumulator.append(UpperCamelCase__ ) if is_last: return accumulator else: UpperCAmelCase = processed UpperCAmelCase = item.pop("is_last" ) accumulator.append(UpperCamelCase__ ) return accumulator class __magic_name__ ( A__ ): def __init__( self : Dict , UpperCamelCase__ : Dataset , UpperCamelCase__ : str ) -> Any: '''simple docstring''' UpperCAmelCase = dataset UpperCAmelCase = key def __len__( self : List[Any] ) -> Union[str, Any]: '''simple docstring''' return len(self.dataset ) def __getitem__( self : Tuple , UpperCamelCase__ : List[Any] ) -> Dict: '''simple docstring''' return self.dataset[i][self.key] class __magic_name__ ( A__ ): def __init__( self : Optional[int] , UpperCamelCase__ : Dataset , UpperCamelCase__ : str , UpperCamelCase__ : str ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase = dataset UpperCAmelCase = keya UpperCAmelCase = keya def __len__( self : Union[str, Any] ) -> Any: '''simple docstring''' return len(self.dataset ) def __getitem__( self : int , UpperCamelCase__ : Optional[Any] ) -> Dict: '''simple docstring''' return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
323
import math def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ ) -> float: if ( not isinstance(lowerCamelCase_ , (int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError("power_factor must be a valid float value between -1 and 1." ) return apparent_power * power_factor def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ ) -> float: if ( not isinstance(lowerCamelCase_ , (int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError("power_factor must be a valid float value between -1 and 1." ) return apparent_power * math.sqrt(1 - power_factor**2 ) if __name__ == "__main__": import doctest doctest.testmod()
323
1
"""simple docstring""" import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import ( AutoProcessor, BertTokenizerFast, BlipImageProcessor, GPTaTokenizer, InstructBlipProcessor, PreTrainedTokenizerFast, ) @require_vision class _A ( unittest.TestCase ): def A__ ( self ): """simple docstring""" lowercase = tempfile.mkdtemp() lowercase = BlipImageProcessor() lowercase = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" ) lowercase = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) lowercase = InstructBlipProcessor(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) processor.save_pretrained(self.tmpdirname ) def A__ ( self , **__lowerCAmelCase ): """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase ).tokenizer def A__ ( self , **__lowerCAmelCase ): """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase ).image_processor def A__ ( self , **__lowerCAmelCase ): """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase ).qformer_tokenizer def A__ ( self ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def A__ ( self ): """simple docstring""" lowercase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] lowercase = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def A__ ( self ): """simple docstring""" lowercase = InstructBlipProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , ) processor.save_pretrained(self.tmpdirname ) lowercase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) lowercase = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 ) lowercase = InstructBlipProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__lowerCAmelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __lowerCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __lowerCAmelCase ) self.assertIsInstance(processor.qformer_tokenizer , __lowerCAmelCase ) def A__ ( self ): """simple docstring""" lowercase = self.get_image_processor() lowercase = self.get_tokenizer() lowercase = self.get_qformer_tokenizer() lowercase = InstructBlipProcessor( tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase , qformer_tokenizer=__lowerCAmelCase ) lowercase = self.prepare_image_inputs() lowercase = image_processor(__lowerCAmelCase , return_tensors="""np""" ) lowercase = processor(images=__lowerCAmelCase , return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def A__ ( self ): """simple docstring""" lowercase = self.get_image_processor() lowercase = self.get_tokenizer() lowercase = self.get_qformer_tokenizer() lowercase = InstructBlipProcessor( tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase , qformer_tokenizer=__lowerCAmelCase ) lowercase = """lower newer""" lowercase = processor(text=__lowerCAmelCase ) lowercase = tokenizer(__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase ) lowercase = qformer_tokenizer(__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase ) for key in encoded_tokens.keys(): self.assertListEqual(encoded_tokens[key] , encoded_processor[key] ) for key in encoded_tokens_qformer.keys(): self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] ) def A__ ( self ): """simple docstring""" lowercase = self.get_image_processor() lowercase = self.get_tokenizer() lowercase = self.get_qformer_tokenizer() lowercase = InstructBlipProcessor( tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase , qformer_tokenizer=__lowerCAmelCase ) lowercase = """lower newer""" lowercase = self.prepare_image_inputs() lowercase = processor(text=__lowerCAmelCase , images=__lowerCAmelCase ) self.assertListEqual( list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , ) # test if it raises when no input is passed with pytest.raises(__lowerCAmelCase ): processor() def A__ ( self ): """simple docstring""" lowercase = self.get_image_processor() lowercase = self.get_tokenizer() lowercase = self.get_qformer_tokenizer() lowercase = InstructBlipProcessor( tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase , qformer_tokenizer=__lowerCAmelCase ) lowercase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowercase = processor.batch_decode(__lowerCAmelCase ) lowercase = tokenizer.batch_decode(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) def A__ ( self ): """simple docstring""" lowercase = self.get_image_processor() lowercase = self.get_tokenizer() lowercase = self.get_qformer_tokenizer() lowercase = InstructBlipProcessor( tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase , qformer_tokenizer=__lowerCAmelCase ) lowercase = """lower newer""" lowercase = self.prepare_image_inputs() lowercase = processor(text=__lowerCAmelCase , images=__lowerCAmelCase ) self.assertListEqual( list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
197
"""simple docstring""" import argparse import os import torch from transformers import FlavaConfig, FlavaForPreTraining from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> Optional[int]: '''simple docstring''' return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() ) def UpperCAmelCase__ ( lowerCAmelCase__ :Any , lowerCAmelCase__ :Union[str, Any] ) -> str: '''simple docstring''' lowercase = {} for key, value in state_dict.items(): if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key: continue lowercase = key.replace("""heads.cmd.mim_head.cls.predictions""" , """mmm_image_head""" ) lowercase = key.replace("""heads.cmd.mlm_head.cls.predictions""" , """mmm_text_head""" ) lowercase = key.replace("""heads.cmd.itm_head.cls""" , """itm_head""" ) lowercase = key.replace("""heads.cmd.itm_head.pooler""" , """itm_head.pooler""" ) lowercase = key.replace("""heads.cmd.clip_head.logit_scale""" , """flava.logit_scale""" ) lowercase = key.replace("""heads.fairseq_mlm.cls.predictions""" , """mlm_head""" ) lowercase = key.replace("""heads.imagenet.mim_head.cls.predictions""" , """mim_head""" ) lowercase = key.replace("""mm_text_projection""" , """flava.text_to_mm_projection""" ) lowercase = key.replace("""mm_image_projection""" , """flava.image_to_mm_projection""" ) lowercase = key.replace("""image_encoder.module""" , """flava.image_model""" ) lowercase = key.replace("""text_encoder.module""" , """flava.text_model""" ) lowercase = key.replace("""mm_encoder.module.encoder.cls_token""" , """flava.multimodal_model.cls_token""" ) lowercase = key.replace("""mm_encoder.module""" , """flava.multimodal_model""" ) lowercase = key.replace("""text_projection""" , """flava.text_projection""" ) lowercase = key.replace("""image_projection""" , """flava.image_projection""" ) lowercase = value.float() for key, value in codebook_state_dict.items(): lowercase = value return upgrade @torch.no_grad() def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :str=None ) -> int: '''simple docstring''' if config_path is not None: lowercase = FlavaConfig.from_pretrained(lowerCAmelCase__ ) else: lowercase = FlavaConfig() lowercase = FlavaForPreTraining(lowerCAmelCase__ ).eval() lowercase = convert_dalle_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , save_checkpoint=lowerCAmelCase__ ) if os.path.exists(lowerCAmelCase__ ): lowercase = torch.load(lowerCAmelCase__ , map_location="""cpu""" ) else: lowercase = torch.hub.load_state_dict_from_url(lowerCAmelCase__ , map_location="""cpu""" ) lowercase = upgrade_state_dict(lowerCAmelCase__ , lowerCAmelCase__ ) hf_model.load_state_dict(lowerCAmelCase__ ) lowercase = hf_model.state_dict() lowercase = count_parameters(lowerCAmelCase__ ) lowercase = count_parameters(lowerCAmelCase__ ) + count_parameters(lowerCAmelCase__ ) assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 ) hf_model.save_pretrained(lowerCAmelCase__ ) if __name__ == "__main__": __lowerCAmelCase : Tuple =argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""") parser.add_argument("""--codebook_path""", default=None, type=str, help="""Path to flava codebook checkpoint""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") __lowerCAmelCase : List[str] =parser.parse_args() convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
197
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { '''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json''', '''google/bigbird-roberta-large''': '''https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json''', '''google/bigbird-base-trivia-itc''': '''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json''', # See all BigBird models at https://huggingface.co/models?filter=big_bird } class snake_case__ ( lowercase_): '''simple docstring''' lowerCamelCase : Dict = "big_bird" def __init__( self , a__=5_03_58 , a__=7_68 , a__=12 , a__=12 , a__=30_72 , a__="gelu_new" , a__=0.1 , a__=0.1 , a__=40_96 , a__=2 , a__=0.02 , a__=1e-12 , a__=True , a__=0 , a__=1 , a__=2 , a__=66 , a__="block_sparse" , a__=True , a__=False , a__=64 , a__=3 , a__=None , **a__ , ) -> Dict: '''simple docstring''' super().__init__( pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , sep_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , ) __snake_case :List[Any] = vocab_size __snake_case :str = max_position_embeddings __snake_case :str = hidden_size __snake_case :Union[str, Any] = num_hidden_layers __snake_case :str = num_attention_heads __snake_case :Tuple = intermediate_size __snake_case :List[Any] = hidden_act __snake_case :Union[str, Any] = hidden_dropout_prob __snake_case :Optional[Any] = attention_probs_dropout_prob __snake_case :Tuple = initializer_range __snake_case :Any = type_vocab_size __snake_case :Optional[Any] = layer_norm_eps __snake_case :List[Any] = use_cache __snake_case :Tuple = rescale_embeddings __snake_case :int = attention_type __snake_case :Dict = use_bias __snake_case :Any = block_size __snake_case :int = num_random_blocks __snake_case :List[str] = classifier_dropout class snake_case__ ( lowercase_): '''simple docstring''' @property def __lowercase ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task == "multiple-choice": __snake_case :Union[str, Any] = {0: """batch""", 1: """choice""", 2: """sequence"""} else: __snake_case :Dict = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
455
"""simple docstring""" def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' if edge <= 0 or not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): raise ValueError("Length must be a positive." ) return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2) def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' if edge <= 0 or not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): raise ValueError("Length must be a positive." ) return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3) if __name__ == "__main__": import doctest doctest.testmod()
682
0
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = { "facebook/nllb-moe-54B": "https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json", } class __SCREAMING_SNAKE_CASE ( _a ): snake_case : str = """nllb-moe""" snake_case : str = ["""past_key_values"""] snake_case : Tuple = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""} def __init__( self , __lowerCAmelCase=128112 , __lowerCAmelCase=1024 , __lowerCAmelCase=12 , __lowerCAmelCase=4096 , __lowerCAmelCase=16 , __lowerCAmelCase=12 , __lowerCAmelCase=4096 , __lowerCAmelCase=16 , __lowerCAmelCase=0.05 , __lowerCAmelCase=0.05 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase="relu" , __lowerCAmelCase=1024 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.02 , __lowerCAmelCase=2 , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase="float32" , __lowerCAmelCase=False , __lowerCAmelCase=128 , __lowerCAmelCase=64 , __lowerCAmelCase=4 , __lowerCAmelCase=4 , __lowerCAmelCase=0.001 , __lowerCAmelCase=0.001 , __lowerCAmelCase="all" , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=1.0 , __lowerCAmelCase=0.2 , __lowerCAmelCase=1 , __lowerCAmelCase=0 , __lowerCAmelCase=2 , __lowerCAmelCase=False , **__lowerCAmelCase , ): UpperCamelCase__ = vocab_size UpperCamelCase__ = max_position_embeddings UpperCamelCase__ = d_model UpperCamelCase__ = encoder_ffn_dim UpperCamelCase__ = encoder_layers UpperCamelCase__ = encoder_attention_heads UpperCamelCase__ = decoder_ffn_dim UpperCamelCase__ = decoder_layers UpperCamelCase__ = decoder_attention_heads UpperCamelCase__ = dropout UpperCamelCase__ = attention_dropout UpperCamelCase__ = activation_dropout UpperCamelCase__ = activation_function UpperCamelCase__ = init_std UpperCamelCase__ = encoder_layerdrop UpperCamelCase__ = decoder_layerdrop UpperCamelCase__ = use_cache UpperCamelCase__ = encoder_layers UpperCamelCase__ = scale_embedding # scale factor will be sqrt(d_model) if True UpperCamelCase__ = router_z_loss_coef UpperCamelCase__ = router_aux_loss_coef UpperCamelCase__ = decoder_sparse_step UpperCamelCase__ = encoder_sparse_step UpperCamelCase__ = num_experts UpperCamelCase__ = expert_capacity UpperCamelCase__ = router_bias if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(f"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" ) UpperCamelCase__ = router_dtype UpperCamelCase__ = router_ignore_padding_tokens UpperCamelCase__ = batch_prioritized_routing UpperCamelCase__ = second_expert_policy UpperCamelCase__ = normalize_router_prob_before_dropping UpperCamelCase__ = moe_eval_capacity_token_fraction UpperCamelCase__ = moe_token_dropout UpperCamelCase__ = output_router_logits super().__init__( pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , is_encoder_decoder=__lowerCAmelCase , decoder_start_token_id=__lowerCAmelCase , **__lowerCAmelCase , )
720
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available UpperCamelCase__ = { "configuration_pix2struct": [ "PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Pix2StructConfig", "Pix2StructTextConfig", "Pix2StructVisionConfig", ], "processing_pix2struct": ["Pix2StructProcessor"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = ["Pix2StructImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = [ "PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST", "Pix2StructPreTrainedModel", "Pix2StructForConditionalGeneration", "Pix2StructVisionModel", "Pix2StructTextModel", ] if TYPE_CHECKING: from .configuration_pixastruct import ( PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP, PixaStructConfig, PixaStructTextConfig, PixaStructVisionConfig, ) from .processing_pixastruct import PixaStructProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_pixastruct import PixaStructImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pixastruct import ( PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST, PixaStructForConditionalGeneration, PixaStructPreTrainedModel, PixaStructTextModel, PixaStructVisionModel, ) else: import sys UpperCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
548
0
from __future__ import annotations import sys from collections import deque from typing import Generic, TypeVar __A = TypeVar("T") class _A ( Generic[T] ): """simple docstring""" lowerCamelCase : deque[T] # Cache store of keys lowerCamelCase : set[T] # References of the keys in cache lowerCamelCase : int = 10 # Maximum capacity of cache def __init__( self : Any , __SCREAMING_SNAKE_CASE : int ) -> None: __UpperCAmelCase =deque() __UpperCAmelCase =set() if not n: __UpperCAmelCase =sys.maxsize elif n < 0: raise ValueError("""n should be an integer greater than 0.""" ) else: __UpperCAmelCase =n def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : T ) -> None: if x not in self.key_reference: if len(self.dq_store ) == LRUCache._MAX_CAPACITY: __UpperCAmelCase =self.dq_store.pop() self.key_reference.remove(__SCREAMING_SNAKE_CASE ) else: self.dq_store.remove(__SCREAMING_SNAKE_CASE ) self.dq_store.appendleft(__SCREAMING_SNAKE_CASE ) self.key_reference.add(__SCREAMING_SNAKE_CASE ) def _a ( self : Optional[int] ) -> None: for k in self.dq_store: print(__SCREAMING_SNAKE_CASE ) def __repr__( self : List[str] ) -> str: return f'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}''' if __name__ == "__main__": import doctest doctest.testmod() __A = LRUCache(4) lru_cache.refer("A") lru_cache.refer(2) lru_cache.refer(3) lru_cache.refer("A") lru_cache.refer(4) lru_cache.refer(5) lru_cache.display() print(lru_cache) assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
68
"""simple docstring""" import json import os import unittest from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class a__ ( a_, unittest.TestCase ): __lowerCAmelCase = GPTaTokenizer __lowerCAmelCase = GPTaTokenizerFast __lowerCAmelCase = True __lowerCAmelCase = {"""add_prefix_space""": True} __lowerCAmelCase = False def __magic_name__ ( self ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowercase : Any = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", "<|endoftext|>", ] lowercase : Any = dict(zip(_a , range(len(_a ) ) ) ) lowercase : Dict = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] lowercase : int = {"unk_token": "<unk>"} lowercase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowercase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(_a ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(_a ) ) def __magic_name__ ( self , **_a ): kwargs.update(self.special_tokens_map ) return GPTaTokenizer.from_pretrained(self.tmpdirname , **_a ) def __magic_name__ ( self , **_a ): kwargs.update(self.special_tokens_map ) return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **_a ) def __magic_name__ ( self , _a ): lowercase : Tuple = "lower newer" lowercase : Dict = "lower newer" return input_text, output_text def __magic_name__ ( self ): lowercase : str = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) lowercase : Union[str, Any] = "lower newer" lowercase : Any = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"] lowercase : int = tokenizer.tokenize(_a , add_prefix_space=_a ) self.assertListEqual(_a , _a ) lowercase : int = tokens + [tokenizer.unk_token] lowercase : List[Any] = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a ) def __magic_name__ ( self ): if not self.test_rust_tokenizer: return lowercase : Union[str, Any] = self.get_tokenizer() lowercase : int = self.get_rust_tokenizer(add_prefix_space=_a ) lowercase : Union[str, Any] = "lower newer" # Testing tokenization lowercase : Union[str, Any] = tokenizer.tokenize(_a , add_prefix_space=_a ) lowercase : List[Any] = rust_tokenizer.tokenize(_a ) self.assertListEqual(_a , _a ) # Testing conversion to ids without special tokens lowercase : Dict = tokenizer.encode(_a , add_special_tokens=_a , add_prefix_space=_a ) lowercase : Dict = rust_tokenizer.encode(_a , add_special_tokens=_a ) self.assertListEqual(_a , _a ) # Testing conversion to ids with special tokens lowercase : Any = self.get_rust_tokenizer(add_prefix_space=_a ) lowercase : Any = tokenizer.encode(_a , add_prefix_space=_a ) lowercase : Any = rust_tokenizer.encode(_a ) self.assertListEqual(_a , _a ) # Testing the unknown token lowercase : Union[str, Any] = tokens + [rust_tokenizer.unk_token] lowercase : str = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(_a ) , _a ) def __magic_name__ ( self , *_a , **_a ): # It's very difficult to mix/test pretokenization with byte-level # And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string) pass def __magic_name__ ( self , _a=15 ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): lowercase : int = self.rust_tokenizer_class.from_pretrained(_a , **_a ) # Simple input lowercase : int = "This is a simple input" lowercase : Optional[int] = ["This is a simple input 1", "This is a simple input 2"] lowercase : Union[str, Any] = ("This is a simple input", "This is a pair") lowercase : Any = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] # Simple input tests self.assertRaises(_a , tokenizer_r.encode , _a , max_length=_a , padding="max_length" ) # Simple input self.assertRaises(_a , tokenizer_r.encode_plus , _a , max_length=_a , padding="max_length" ) # Simple input self.assertRaises( _a , tokenizer_r.batch_encode_plus , _a , max_length=_a , padding="max_length" , ) # Pair input self.assertRaises(_a , tokenizer_r.encode , _a , max_length=_a , padding="max_length" ) # Pair input self.assertRaises(_a , tokenizer_r.encode_plus , _a , max_length=_a , padding="max_length" ) # Pair input self.assertRaises( _a , tokenizer_r.batch_encode_plus , _a , max_length=_a , padding="max_length" , ) def __magic_name__ ( self ): lowercase : List[Any] = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" ) # Simple input lowercase : int = "This is a simple input" lowercase : Tuple = ["This is a simple input looooooooong", "This is a simple input"] lowercase : Any = ("This is a simple input", "This is a pair") lowercase : str = [ ("This is a simple input loooooong", "This is a simple input"), ("This is a simple pair loooooong", "This is a simple pair"), ] lowercase : List[Any] = tokenizer.pad_token_id lowercase : List[Any] = tokenizer(_a , padding="max_length" , max_length=30 , return_tensors="np" ) lowercase : List[str] = tokenizer(_a , padding=_a , truncate=_a , return_tensors="np" ) lowercase : List[Any] = tokenizer(*_a , padding="max_length" , max_length=60 , return_tensors="np" ) lowercase : Any = tokenizer(_a , padding=_a , truncate=_a , return_tensors="np" ) # s # test single string max_length padding self.assertEqual(out_s["input_ids"].shape[-1] , 30 ) self.assertTrue(pad_token_id in out_s["input_ids"] ) self.assertTrue(0 in out_s["attention_mask"] ) # s2 # test automatic padding self.assertEqual(out_sa["input_ids"].shape[-1] , 33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa["input_ids"][0] ) self.assertFalse(0 in out_sa["attention_mask"][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa["input_ids"][1] ) self.assertTrue(0 in out_sa["attention_mask"][1] ) # p # test single pair max_length padding self.assertEqual(out_p["input_ids"].shape[-1] , 60 ) self.assertTrue(pad_token_id in out_p["input_ids"] ) self.assertTrue(0 in out_p["attention_mask"] ) # p2 # test automatic padding pair self.assertEqual(out_pa["input_ids"].shape[-1] , 52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa["input_ids"][0] ) self.assertFalse(0 in out_pa["attention_mask"][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa["input_ids"][1] ) self.assertTrue(0 in out_pa["attention_mask"][1] ) def __magic_name__ ( self ): lowercase : List[Any] = "$$$" lowercase : List[str] = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=_a , add_bos_token=_a ) lowercase : List[Any] = "This is a simple input" lowercase : str = ["This is a simple input 1", "This is a simple input 2"] lowercase : Union[str, Any] = tokenizer.bos_token_id lowercase : List[str] = tokenizer(_a ) lowercase : Any = tokenizer(_a ) self.assertEqual(out_s.input_ids[0] , _a ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) lowercase : List[Any] = tokenizer.decode(out_s.input_ids ) lowercase : Tuple = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , _a ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) def __magic_name__ ( self ): pass def __magic_name__ ( self ): # TODO: change to self.get_tokenizers() when the fast version is implemented lowercase : Dict = [self.get_tokenizer(do_lower_case=_a , add_bos_token=_a )] for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): lowercase : Optional[Any] = "Encode this." lowercase : Union[str, Any] = "This one too please." lowercase : Dict = tokenizer.encode(_a , add_special_tokens=_a ) encoded_sequence += tokenizer.encode(_a , add_special_tokens=_a ) lowercase : Optional[Any] = tokenizer.encode_plus( _a , _a , add_special_tokens=_a , return_special_tokens_mask=_a , ) lowercase : Tuple = encoded_sequence_dict["input_ids"] lowercase : int = encoded_sequence_dict["special_tokens_mask"] self.assertEqual(len(_a ) , len(_a ) ) lowercase : List[str] = [ (x if not special_tokens_mask[i] else None) for i, x in enumerate(_a ) ] lowercase : List[str] = [x for x in filtered_sequence if x is not None] self.assertEqual(_a , _a ) @require_tokenizers class a__ ( unittest.TestCase ): def __magic_name__ ( self ): # More context: # https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1 # https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519 # https://github.com/huggingface/transformers/pull/17088#discussion_r871246439 lowercase : Tuple = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=_a ) lowercase : Any = "A photo of a cat" lowercase : List[str] = tokenizer.encode( _a , ) self.assertEqual(_a , [2, 250, 1_345, 9, 10, 4_758] ) tokenizer.save_pretrained("test_opt" ) lowercase : Union[str, Any] = AutoTokenizer.from_pretrained("./test_opt" ) lowercase : Tuple = tokenizer.encode( _a , ) self.assertEqual(_a , [2, 250, 1_345, 9, 10, 4_758] ) def __magic_name__ ( self ): lowercase : Union[str, Any] = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=_a ) lowercase : Union[str, Any] = "A photo of a cat" lowercase : Union[str, Any] = tokenizer.encode( _a , ) # Same as above self.assertEqual(_a , [2, 250, 1_345, 9, 10, 4_758] ) @unittest.skip("This test is failing because of a bug in the fast tokenizer" ) def __magic_name__ ( self ): lowercase : int = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=_a ) lowercase : List[Any] = "bos" lowercase : Optional[int] = tokenizer.get_vocab()["bos"] lowercase : List[str] = "A photo of a cat" lowercase : Dict = tokenizer.encode( _a , ) # We changed the bos token self.assertEqual(_a , [31_957, 250, 1_345, 9, 10, 4_758] ) tokenizer.save_pretrained("./tok" ) lowercase : Tuple = AutoTokenizer.from_pretrained("./tok" ) self.assertTrue(tokenizer.is_fast ) lowercase : str = tokenizer.encode( _a , ) self.assertEqual(_a , [31_957, 250, 1_345, 9, 10, 4_758] )
361
0
def _lowerCAmelCase ( lowerCamelCase_ : List[Any] ): if not nums: # Makes sure that the list is not empty raise ValueError('''List is empty''' ) __lowercase = sum(__snake_case ) / len(__snake_case ) # Calculate the average return sum(abs(x - average ) for x in nums ) / len(__snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
707
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _SCREAMING_SNAKE_CASE = { '''configuration_clipseg''': [ '''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CLIPSegConfig''', '''CLIPSegTextConfig''', '''CLIPSegVisionConfig''', ], '''processing_clipseg''': ['''CLIPSegProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ '''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CLIPSegModel''', '''CLIPSegPreTrainedModel''', '''CLIPSegTextModel''', '''CLIPSegVisionModel''', '''CLIPSegForImageSegmentation''', ] if TYPE_CHECKING: from .configuration_clipseg import ( CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPSegConfig, CLIPSegTextConfig, CLIPSegVisionConfig, ) from .processing_clipseg import CLIPSegProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clipseg import ( CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegPreTrainedModel, CLIPSegTextModel, CLIPSegVisionModel, ) else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
56
0
'''simple docstring''' import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation def UpperCamelCase_ ( _UpperCAmelCase : int ) -> Any: """simple docstring""" _UpperCAmelCase : List[Any] = 384 if "tiny" in model_name: _UpperCAmelCase : int = [3, 3, 9, 3] _UpperCAmelCase : List[Any] = [96, 192, 384, 768] if "small" in model_name: _UpperCAmelCase : Optional[Any] = [3, 3, 27, 3] _UpperCAmelCase : Any = [96, 192, 384, 768] if "base" in model_name: _UpperCAmelCase : Tuple = [3, 3, 27, 3] _UpperCAmelCase : Union[str, Any] = [128, 256, 512, 1_024] _UpperCAmelCase : List[Any] = 512 if "large" in model_name: _UpperCAmelCase : Tuple = [3, 3, 27, 3] _UpperCAmelCase : Tuple = [192, 384, 768, 1_536] _UpperCAmelCase : Union[str, Any] = 768 if "xlarge" in model_name: _UpperCAmelCase : List[str] = [3, 3, 27, 3] _UpperCAmelCase : Optional[Any] = [256, 512, 1_024, 2_048] _UpperCAmelCase : Optional[int] = 1_024 # set label information _UpperCAmelCase : Optional[Any] = 150 _UpperCAmelCase : List[Any] = "huggingface/label-files" _UpperCAmelCase : Any = "ade20k-id2label.json" _UpperCAmelCase : int = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type="dataset" ) , "r" ) ) _UpperCAmelCase : List[str] = {int(_UpperCAmelCase ): v for k, v in idalabel.items()} _UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()} _UpperCAmelCase : Tuple = ConvNextConfig( depths=_UpperCAmelCase , hidden_sizes=_UpperCAmelCase , out_features=["stage1", "stage2", "stage3", "stage4"] ) _UpperCAmelCase : Optional[Any] = UperNetConfig( backbone_config=_UpperCAmelCase , auxiliary_in_channels=_UpperCAmelCase , num_labels=_UpperCAmelCase , idalabel=_UpperCAmelCase , labelaid=_UpperCAmelCase , ) return config def UpperCamelCase_ ( _UpperCAmelCase : Any ) -> Any: """simple docstring""" _UpperCAmelCase : List[str] = [] # fmt: off # stem rename_keys.append(("backbone.downsample_layers.0.0.weight", "backbone.embeddings.patch_embeddings.weight") ) rename_keys.append(("backbone.downsample_layers.0.0.bias", "backbone.embeddings.patch_embeddings.bias") ) rename_keys.append(("backbone.downsample_layers.0.1.weight", "backbone.embeddings.layernorm.weight") ) rename_keys.append(("backbone.downsample_layers.0.1.bias", "backbone.embeddings.layernorm.bias") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((F"""backbone.stages.{i}.{j}.gamma""", F"""backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter""") ) rename_keys.append((F"""backbone.stages.{i}.{j}.depthwise_conv.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.dwconv.weight""") ) rename_keys.append((F"""backbone.stages.{i}.{j}.depthwise_conv.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.dwconv.bias""") ) rename_keys.append((F"""backbone.stages.{i}.{j}.norm.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.layernorm.weight""") ) rename_keys.append((F"""backbone.stages.{i}.{j}.norm.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.layernorm.bias""") ) rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv1.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight""") ) rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv1.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias""") ) rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv2.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight""") ) rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv2.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias""") ) if i > 0: rename_keys.append((F"""backbone.downsample_layers.{i}.0.weight""", F"""backbone.encoder.stages.{i}.downsampling_layer.0.weight""") ) rename_keys.append((F"""backbone.downsample_layers.{i}.0.bias""", F"""backbone.encoder.stages.{i}.downsampling_layer.0.bias""") ) rename_keys.append((F"""backbone.downsample_layers.{i}.1.weight""", F"""backbone.encoder.stages.{i}.downsampling_layer.1.weight""") ) rename_keys.append((F"""backbone.downsample_layers.{i}.1.bias""", F"""backbone.encoder.stages.{i}.downsampling_layer.1.bias""") ) rename_keys.append((F"""backbone.norm{i}.weight""", F"""backbone.hidden_states_norms.stage{i+1}.weight""") ) rename_keys.append((F"""backbone.norm{i}.bias""", F"""backbone.hidden_states_norms.stage{i+1}.bias""") ) # decode head rename_keys.extend( [ ("decode_head.conv_seg.weight", "decode_head.classifier.weight"), ("decode_head.conv_seg.bias", "decode_head.classifier.bias"), ("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"), ("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"), ] ) # fmt: on return rename_keys def UpperCamelCase_ ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int ) -> List[str]: """simple docstring""" _UpperCAmelCase : List[str] = dct.pop(_UpperCAmelCase ) _UpperCAmelCase : Optional[int] = val def UpperCamelCase_ ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase : List[Any] = { "upernet-convnext-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth", "upernet-convnext-small": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth", "upernet-convnext-base": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth", "upernet-convnext-large": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth", "upernet-convnext-xlarge": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth", } _UpperCAmelCase : str = model_name_to_url[model_name] _UpperCAmelCase : str = torch.hub.load_state_dict_from_url(_UpperCAmelCase , map_location="cpu" )["state_dict"] _UpperCAmelCase : Any = get_upernet_config(_UpperCAmelCase ) _UpperCAmelCase : Tuple = UperNetForSemanticSegmentation(_UpperCAmelCase ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): _UpperCAmelCase : int = state_dict.pop(_UpperCAmelCase ) if "bn" in key: _UpperCAmelCase : List[str] = key.replace("bn" , "batch_norm" ) _UpperCAmelCase : Tuple = val # rename keys _UpperCAmelCase : Tuple = create_rename_keys(_UpperCAmelCase ) for src, dest in rename_keys: rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) model.load_state_dict(_UpperCAmelCase ) # verify on image _UpperCAmelCase : Optional[int] = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg" _UpperCAmelCase : Any = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert("RGB" ) _UpperCAmelCase : Optional[Any] = SegformerImageProcessor() _UpperCAmelCase : Optional[Any] = processor(_UpperCAmelCase , return_tensors="pt" ).pixel_values with torch.no_grad(): _UpperCAmelCase : Any = model(_UpperCAmelCase ) if model_name == "upernet-convnext-tiny": _UpperCAmelCase : List[Any] = torch.tensor( [[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] ) elif model_name == "upernet-convnext-small": _UpperCAmelCase : Dict = torch.tensor( [[-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.7_6_3_8, -8.7_6_3_8, -8.6_2_4_0]] ) elif model_name == "upernet-convnext-base": _UpperCAmelCase : Any = torch.tensor( [[-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.7_6_6_9, -8.7_6_6_9, -8.6_0_2_1]] ) elif model_name == "upernet-convnext-large": _UpperCAmelCase : int = torch.tensor( [[-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_3_1_0, -8.6_3_1_0, -8.5_9_6_4]] ) elif model_name == "upernet-convnext-xlarge": _UpperCAmelCase : Optional[int] = torch.tensor( [[-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_3_7_9, -8.4_3_7_9, -8.3_4_1_2]] ) print("Logits:" , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , _UpperCAmelCase , atol=1e-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_UpperCAmelCase ) print(F"""Saving processor to {pytorch_dump_folder_path}""" ) processor.save_pretrained(_UpperCAmelCase ) if push_to_hub: print(F"""Pushing model and processor for {model_name} to hub""" ) model.push_to_hub(F"""openmmlab/{model_name}""" ) processor.push_to_hub(F"""openmmlab/{model_name}""" ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""upernet-convnext-tiny""", type=str, choices=[F'upernet-convnext-{size}' for size in ["""tiny""", """small""", """base""", """large""", """xlarge"""]], help="""Name of the ConvNext UperNet model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) __SCREAMING_SNAKE_CASE : Dict = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
244
'''simple docstring''' def UpperCamelCase_ ( _UpperCAmelCase : int ) -> list: """simple docstring""" _UpperCAmelCase : Tuple = int(_UpperCAmelCase ) if n_element < 1: _UpperCAmelCase : Tuple = ValueError("a should be a positive number" ) raise my_error _UpperCAmelCase : Optional[Any] = [1] _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = (0, 0, 0) _UpperCAmelCase : str = 1 while index < n_element: while hamming_list[i] * 2 <= hamming_list[-1]: i += 1 while hamming_list[j] * 3 <= hamming_list[-1]: j += 1 while hamming_list[k] * 5 <= hamming_list[-1]: k += 1 hamming_list.append( min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) ) index += 1 return hamming_list if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Dict = input("""Enter the last number (nth term) of the Hamming Number Series: """) print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""") __SCREAMING_SNAKE_CASE : Union[str, Any] = hamming(int(n)) print("""-----------------------------------------------------""") print(F'The list with nth numbers is: {hamming_numbers}') print("""-----------------------------------------------------""")
244
1
import os import socket from contextlib import contextmanager import torch from ..commands.config.default import write_basic_config # noqa: F401 from ..state import PartialState from .dataclasses import DistributedType from .imports import is_deepspeed_available, is_tpu_available from .transformer_engine import convert_model from .versions import is_torch_version if is_deepspeed_available(): from deepspeed import DeepSpeedEngine if is_tpu_available(check_device=False): import torch_xla.core.xla_model as xm def UpperCamelCase_( __magic_name__ : Any ): """simple docstring""" if is_torch_version('<' , '2.0.0' ) or not hasattr(__magic_name__ , '_dynamo' ): return False return isinstance(__magic_name__ , torch._dynamo.eval_frame.OptimizedModule ) def UpperCamelCase_( __magic_name__ : Union[str, Any] , __magic_name__ : bool = True ): """simple docstring""" _lowerCAmelCase :int = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel) _lowerCAmelCase :int = is_compiled_module(__magic_name__ ) if is_compiled: _lowerCAmelCase :str = model _lowerCAmelCase :int = model._orig_mod if is_deepspeed_available(): options += (DeepSpeedEngine,) while isinstance(__magic_name__ , __magic_name__ ): _lowerCAmelCase :Union[str, Any] = model.module if not keep_fpaa_wrapper: _lowerCAmelCase :List[Any] = getattr(__magic_name__ , 'forward' ) _lowerCAmelCase :Union[str, Any] = model.__dict__.pop('_original_forward' , __magic_name__ ) if original_forward is not None: while hasattr(__magic_name__ , '__wrapped__' ): _lowerCAmelCase :List[str] = forward.__wrapped__ if forward == original_forward: break _lowerCAmelCase :Any = forward if getattr(__magic_name__ , '_converted_to_transformer_engine' , __magic_name__ ): convert_model(__magic_name__ , to_transformer_engine=__magic_name__ ) if is_compiled: _lowerCAmelCase :List[Any] = model _lowerCAmelCase :List[Any] = compiled_model return model def UpperCamelCase_( ): """simple docstring""" PartialState().wait_for_everyone() def UpperCamelCase_( __magic_name__ : Optional[Any] , __magic_name__ : str ): """simple docstring""" if PartialState().distributed_type == DistributedType.TPU: xm.save(__magic_name__ , __magic_name__ ) elif PartialState().local_process_index == 0: torch.save(__magic_name__ , __magic_name__ ) @contextmanager def UpperCamelCase_( **__magic_name__ : str ): """simple docstring""" for key, value in kwargs.items(): _lowerCAmelCase :int = str(__magic_name__ ) yield for key in kwargs: if key.upper() in os.environ: del os.environ[key.upper()] def UpperCamelCase_( __magic_name__ : Optional[int] ): """simple docstring""" if not hasattr(__magic_name__ , '__qualname__' ) and not hasattr(__magic_name__ , '__name__' ): _lowerCAmelCase :Any = getattr(__magic_name__ , '__class__' , __magic_name__ ) if hasattr(__magic_name__ , '__qualname__' ): return obj.__qualname__ if hasattr(__magic_name__ , '__name__' ): return obj.__name__ return str(__magic_name__ ) def UpperCamelCase_( __magic_name__ : List[str] , __magic_name__ : str ): """simple docstring""" for key, value in source.items(): if isinstance(__magic_name__ , __magic_name__ ): _lowerCAmelCase :Any = destination.setdefault(__magic_name__ , {} ) merge_dicts(__magic_name__ , __magic_name__ ) else: _lowerCAmelCase :List[str] = value return destination def UpperCamelCase_( __magic_name__ : int = None ): """simple docstring""" if port is None: _lowerCAmelCase :Union[str, Any] = 29500 with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s: return s.connect_ex(('localhost', port) ) == 0
382
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from ...utils.dataclasses import ( ComputeEnvironment, DistributedType, DynamoBackend, PrecisionType, SageMakerDistributedType, ) from ..menu import BulletMenu a = [ """EAGER""", """AOT_EAGER""", """INDUCTOR""", """NVFUSER""", """AOT_NVFUSER""", """AOT_CUDAGRAPHS""", """OFI""", """FX2TRT""", """ONNXRT""", """IPEX""", ] def UpperCamelCase_( __magic_name__ : Tuple , __magic_name__ : str=None , __magic_name__ : str=None , __magic_name__ : Dict=None ): """simple docstring""" _lowerCAmelCase :Union[str, Any] = True while ask_again: _lowerCAmelCase :int = input(__magic_name__ ) try: if default is not None and len(__magic_name__ ) == 0: return default return convert_value(__magic_name__ ) if convert_value is not None else result except Exception: if error_message is not None: print(__magic_name__ ) def UpperCamelCase_( __magic_name__ : int , __magic_name__ : str=[] , __magic_name__ : Optional[Any]=None , __magic_name__ : List[str]=0 ): """simple docstring""" _lowerCAmelCase :Tuple = BulletMenu(__magic_name__ , __magic_name__ ) _lowerCAmelCase :int = menu.run(default_choice=__magic_name__ ) return convert_value(__magic_name__ ) if convert_value is not None else result def UpperCamelCase_( __magic_name__ : int ): """simple docstring""" _lowerCAmelCase :int = int(__magic_name__ ) return ComputeEnvironment(['LOCAL_MACHINE', 'AMAZON_SAGEMAKER'][value] ) def UpperCamelCase_( __magic_name__ : Union[str, Any] ): """simple docstring""" _lowerCAmelCase :int = int(__magic_name__ ) return DistributedType(['NO', 'MULTI_CPU', 'MULTI_XPU', 'MULTI_GPU', 'MULTI_NPU', 'TPU'][value] ) def UpperCamelCase_( __magic_name__ : Dict ): """simple docstring""" _lowerCAmelCase :Tuple = int(__magic_name__ ) return DynamoBackend(DYNAMO_BACKENDS[value] ).value def UpperCamelCase_( __magic_name__ : Any ): """simple docstring""" _lowerCAmelCase :Optional[Any] = int(__magic_name__ ) return PrecisionType(['no', 'fp16', 'bf16', 'fp8'][value] ) def UpperCamelCase_( __magic_name__ : Dict ): """simple docstring""" _lowerCAmelCase :Tuple = int(__magic_name__ ) return SageMakerDistributedType(['NO', 'DATA_PARALLEL', 'MODEL_PARALLEL'][value] ) def UpperCamelCase_( __magic_name__ : Any ): """simple docstring""" return {"yes": True, "no": False}[value.lower()] class UpperCAmelCase_ (argparse.RawDescriptionHelpFormatter ): """simple docstring""" def SCREAMING_SNAKE_CASE__ ( self: List[Any] , _UpperCAmelCase: str , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: Dict ): _lowerCAmelCase :int = super()._format_usage(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) _lowerCAmelCase :List[Any] = usage.replace('<command> [<args>] ' , '' ) return usage
382
1
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import GLPNImageProcessor class lowerCAmelCase_ ( unittest.TestCase ): """simple docstring""" def __init__( self :Any , lowerCamelCase__ :Dict , lowerCamelCase__ :Optional[Any]=7 , lowerCamelCase__ :str=3 , lowerCamelCase__ :Optional[Any]=18 , lowerCamelCase__ :List[str]=30 , lowerCamelCase__ :str=4_00 , lowerCamelCase__ :Optional[int]=True , lowerCamelCase__ :Union[str, Any]=32 , lowerCamelCase__ :int=True , ): UpperCamelCase__ :List[Any] = parent UpperCamelCase__ :List[Any] = batch_size UpperCamelCase__ :Any = num_channels UpperCamelCase__ :List[str] = image_size UpperCamelCase__ :Dict = min_resolution UpperCamelCase__ :List[str] = max_resolution UpperCamelCase__ :str = do_resize UpperCamelCase__ :int = size_divisor UpperCamelCase__ :Optional[int] = do_rescale def __a ( self :str ): return { "do_resize": self.do_resize, "size_divisor": self.size_divisor, "do_rescale": self.do_rescale, } @require_torch @require_vision class lowerCAmelCase_ ( lowercase , unittest.TestCase ): """simple docstring""" _snake_case : Optional[int] = GLPNImageProcessor if is_vision_available() else None def __a ( self :Dict ): UpperCamelCase__ :Dict = GLPNImageProcessingTester(self ) @property def __a ( self :List[str] ): return self.image_processor_tester.prepare_image_processor_dict() def __a ( self :Optional[int] ): UpperCamelCase__ :Optional[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase__ , """do_resize""" ) ) self.assertTrue(hasattr(lowerCamelCase__ , """size_divisor""" ) ) self.assertTrue(hasattr(lowerCamelCase__ , """resample""" ) ) self.assertTrue(hasattr(lowerCamelCase__ , """do_rescale""" ) ) def __a ( self :Optional[int] ): pass def __a ( self :Tuple ): # Initialize image_processing UpperCamelCase__ :int = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCamelCase__ :str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase__ , Image.Image ) # Test not batched input (GLPNImageProcessor doesn't support batching) UpperCamelCase__ :Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def __a ( self :str ): # Initialize image_processing UpperCamelCase__ :str = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCamelCase__ :Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , numpify=lowerCamelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase__ , np.ndarray ) # Test not batched input (GLPNImageProcessor doesn't support batching) UpperCamelCase__ :List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def __a ( self :Any ): # Initialize image_processing UpperCamelCase__ :List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCamelCase__ :Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase__ , torch.Tensor ) # Test not batched input (GLPNImageProcessor doesn't support batching) UpperCamelCase__ :List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
45
import numpy as np import torch import tqdm from ...models.unet_ad import UNetaDModel from ...pipelines import DiffusionPipeline from ...utils import randn_tensor from ...utils.dummy_pt_objects import DDPMScheduler class lowerCAmelCase_ ( lowercase ): """simple docstring""" def __init__( self :int , lowerCamelCase__ :UNetaDModel , lowerCamelCase__ :UNetaDModel , lowerCamelCase__ :DDPMScheduler , lowerCamelCase__ :List[Any] , ): super().__init__() UpperCamelCase__ :Tuple = value_function UpperCamelCase__ :Optional[int] = unet UpperCamelCase__ :List[str] = scheduler UpperCamelCase__ :Dict = env UpperCamelCase__ :Dict = env.get_dataset() UpperCamelCase__ :Union[str, Any] = {} for key in self.data.keys(): try: UpperCamelCase__ :int = self.data[key].mean() except: # noqa: E722 pass UpperCamelCase__ :Any = {} for key in self.data.keys(): try: UpperCamelCase__ :int = self.data[key].std() except: # noqa: E722 pass UpperCamelCase__ :List[Any] = env.observation_space.shape[0] UpperCamelCase__ :List[str] = env.action_space.shape[0] def __a ( self :Union[str, Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :str ): return (x_in - self.means[key]) / self.stds[key] def __a ( self :int , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple ): return x_in * self.stds[key] + self.means[key] def __a ( self :Any , lowerCamelCase__ :int ): if type(lowerCamelCase__ ) is dict: return {k: self.to_torch(lowerCamelCase__ ) for k, v in x_in.items()} elif torch.is_tensor(lowerCamelCase__ ): return x_in.to(self.unet.device ) return torch.tensor(lowerCamelCase__ , device=self.unet.device ) def __a ( self :Union[str, Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple ): for key, val in cond.items(): UpperCamelCase__ :str = val.clone() return x_in def __a ( self :Union[str, Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :int , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Optional[int] ): UpperCamelCase__ :Any = x.shape[0] UpperCamelCase__ :List[Any] = None for i in tqdm.tqdm(self.scheduler.timesteps ): # create batch of timesteps to pass into model UpperCamelCase__ :Optional[Any] = torch.full((batch_size,) , lowerCamelCase__ , device=self.unet.device , dtype=torch.long ) for _ in range(lowerCamelCase__ ): with torch.enable_grad(): x.requires_grad_() # permute to match dimension for pre-trained models UpperCamelCase__ :Dict = self.value_function(x.permute(0 , 2 , 1 ) , lowerCamelCase__ ).sample UpperCamelCase__ :List[Any] = torch.autograd.grad([y.sum()] , [x] )[0] UpperCamelCase__ :Union[str, Any] = self.scheduler._get_variance(lowerCamelCase__ ) UpperCamelCase__ :Any = torch.exp(0.5 * posterior_variance ) UpperCamelCase__ :Dict = model_std * grad UpperCamelCase__ :Optional[Any] = 0 UpperCamelCase__ :Dict = x.detach() UpperCamelCase__ :int = x + scale * grad UpperCamelCase__ :int = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim ) UpperCamelCase__ :List[str] = self.unet(x.permute(0 , 2 , 1 ) , lowerCamelCase__ ).sample.permute(0 , 2 , 1 ) # TODO: verify deprecation of this kwarg UpperCamelCase__ :List[str] = self.scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , predict_epsilon=lowerCamelCase__ )["""prev_sample"""] # apply conditions to the trajectory (set the initial state) UpperCamelCase__ :Optional[Any] = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim ) UpperCamelCase__ :Optional[int] = self.to_torch(lowerCamelCase__ ) return x, y def __call__( self :Optional[Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :str=64 , lowerCamelCase__ :Tuple=32 , lowerCamelCase__ :Dict=2 , lowerCamelCase__ :str=0.1 ): # normalize the observations and create batch dimension UpperCamelCase__ :List[str] = self.normalize(lowerCamelCase__ , """observations""" ) UpperCamelCase__ :List[str] = obs[None].repeat(lowerCamelCase__ , axis=0 ) UpperCamelCase__ :int = {0: self.to_torch(lowerCamelCase__ )} UpperCamelCase__ :Dict = (batch_size, planning_horizon, self.state_dim + self.action_dim) # generate initial noise and apply our conditions (to make the trajectories start at current state) UpperCamelCase__ :Any = randn_tensor(lowerCamelCase__ , device=self.unet.device ) UpperCamelCase__ :Optional[int] = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim ) UpperCamelCase__ :List[Any] = self.to_torch(lowerCamelCase__ ) # run the diffusion process UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self.run_diffusion(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # sort output trajectories by value UpperCamelCase__ :List[Any] = y.argsort(0 , descending=lowerCamelCase__ ).squeeze() UpperCamelCase__ :Dict = x[sorted_idx] UpperCamelCase__ :Tuple = sorted_values[:, :, : self.action_dim] UpperCamelCase__ :Optional[Any] = actions.detach().cpu().numpy() UpperCamelCase__ :Optional[int] = self.de_normalize(lowerCamelCase__ , key="""actions""" ) # select the action with the highest value if y is not None: UpperCamelCase__ :List[str] = 0 else: # if we didn't run value guiding, select a random action UpperCamelCase__ :Dict = np.random.randint(0 , lowerCamelCase__ ) UpperCamelCase__ :Tuple = denorm_actions[selected_index, 0] return denorm_actions
45
1
import argparse import re from flax.traverse_util import flatten_dict, unflatten_dict from tax import checkpoints from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model from transformers.utils import logging logging.set_verbosity_info() # should not include what is already done by the `from_pt` argument a_ = { """/attention/""": """/0/SelfAttention/""", """/self_attention/""": """/0/SelfAttention/""", """/encoder_decoder_attention/""": """/1/EncDecAttention/""", """value""": """v""", """query""": """q""", """key""": """k""", """out""": """o""", """pre_self_attention_layer_norm""": """0/layer_norm""", """pre_cross_attention_layer_norm""": """1/layer_norm""", """pre_attention_layer_norm""": """0/layer_norm""", # previously 1, but seems wrong """token_embedder""": """shared""", """encoder_norm""": """final_layer_norm""", """decoder_norm""": """final_layer_norm""", """relpos_bias/rel_embedding""": """block/0/layer/0/SelfAttention/relative_attention_bias/weight""", """router/router_weights/w/""": """router/classifier/""", """roer/roer_weights/w/""": """router/classifier/""", """logits_dense""": """lm_head""", } def __lowerCAmelCase ( A_ : List[Any] ) -> Optional[Any]: # 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in # the original model __UpperCAmelCase = list(s_dict.keys() ) for key in keys: __UpperCAmelCase = r".*/layers_(\d+)" __UpperCAmelCase = key if re.match(A_ , A_ ): __UpperCAmelCase = re.sub(r"layers_(\d+)" , r"block/\1/layer" , A_ ) __UpperCAmelCase = r"(encoder|decoder)\/" if re.match(A_ , A_ ): __UpperCAmelCase = re.match(A_ , A_ ).groups() if groups[0] == "encoder": __UpperCAmelCase = re.sub(r"/mlp/" , r"/1/mlp/" , A_ ) __UpperCAmelCase = re.sub(r"/pre_mlp_layer_norm/" , r"/1/layer_norm/" , A_ ) elif groups[0] == "decoder": __UpperCAmelCase = re.sub(r"/mlp/" , r"/2/mlp/" , A_ ) __UpperCAmelCase = re.sub(r"/pre_mlp_layer_norm/" , r"/2/layer_norm/" , A_ ) # 2. Convert other classic mappings for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items(): if old_key in new_key: __UpperCAmelCase = new_key.replace(A_ , A_ ) print(F'''{key} -> {new_key}''' ) __UpperCAmelCase = s_dict.pop(A_ ) if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: __UpperCAmelCase = s_dict[ "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" ].T if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: __UpperCAmelCase = s_dict[ "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" ].T # 3. Take extra care of the EXPERTS layer for key in list(s_dict.keys() ): if "expert" in key: __UpperCAmelCase = s_dict[key].shape[0] __UpperCAmelCase = s_dict[key] for idx in range(A_ ): __UpperCAmelCase = expert_weihts[idx] print(F'''{key} -> {key.replace("expert/" , "nested fstring" )}''' ) s_dict.pop(A_ ) return s_dict a_ = { """NUM_ENCODER_LAYERS""": """num_layers""", """NUM_DECODER_LAYERS""": """num_decoder_layers""", """NUM_HEADS""": """num_heads""", """HEAD_DIM""": """d_kv""", """EMBED_DIM""": """d_model""", """MLP_DIM""": """d_ff""", """NUM_SELECTED_EXPERTS""": """num_selected_experts""", """NUM_ENCODER_SPARSE_LAYERS""": """num_sparse_encoder_layers""", """NUM_DECODER_SPARSE_LAYERS""": """num_sparse_decoder_layers""", """dense.MlpBlock.activations""": """feed_forward_proj""", } def __lowerCAmelCase ( A_ : List[Any] , A_ : Union[str, Any] ) -> str: # Convert a google style config to the hugging face fromat import regex as re with open(A_ , "r" ) as f: __UpperCAmelCase = f.read() __UpperCAmelCase = re.findall(r"(.*) = ([0-9.]*)" , A_ ) __UpperCAmelCase = {} for param, value in regex_match: if param in GIN_TO_CONFIG_MAPPING and value != "": __UpperCAmelCase = float(A_ ) if "." in value else int(A_ ) __UpperCAmelCase = re.findall(r"(.*activations) = \(\'(.*)\',\)" , A_ )[0] __UpperCAmelCase = str(activation[1] ) __UpperCAmelCase = num_experts __UpperCAmelCase = SwitchTransformersConfig(**A_ ) return config def __lowerCAmelCase ( A_ : str , A_ : Union[str, Any] , A_ : Tuple=None , A_ : List[str]="./" , A_ : int=8 ) -> Any: # Initialise PyTorch model print(F'''Loading flax weights from : {flax_checkpoint_path}''' ) __UpperCAmelCase = checkpoints.load_tax_checkpoint(A_ ) if gin_file is not None: __UpperCAmelCase = convert_gin_to_config(A_ , A_ ) else: __UpperCAmelCase = SwitchTransformersConfig.from_pretrained(A_ ) __UpperCAmelCase = SwitchTransformersForConditionalGeneration(A_ ) __UpperCAmelCase = flax_params["target"] __UpperCAmelCase = flatten_dict(A_ , sep="/" ) __UpperCAmelCase = rename_keys(A_ ) __UpperCAmelCase = unflatten_dict(A_ , sep="/" ) # Load the flax params in the PT model load_flax_weights_in_pytorch_model(A_ , A_ ) print(F'''Save PyTorch model to {pytorch_dump_path}''' ) pt_model.save_pretrained(A_ ) if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--switch_t5x_checkpoint_path""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the""" """ model architecture. If not provided, a `gin_file` has to be provided.""" ), ) parser.add_argument( """--gin_file""", default=None, type=str, required=False, help="""Path to the gin config file. If not provided, a `config_file` has to be passed """, ) parser.add_argument( """--config_name""", default=None, type=str, required=False, help="""Config name of SwitchTransformers model.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output pytorch model.""" ) parser.add_argument("""--num_experts""", default=8, type=int, required=False, help="""Number of experts""") a_ = parser.parse_args() convert_flax_checkpoint_to_pytorch( args.switch_tax_checkpoint_path, args.config_name, args.gin_file, args.pytorch_dump_folder_path, args.num_experts, )
286
import argparse import hashlib import os import urllib import warnings import torch from torch import nn from tqdm import tqdm from transformers import WhisperConfig, WhisperForConditionalGeneration a_ = { """tiny.en""": """https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt""", """tiny""": """https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt""", """base.en""": """https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt""", """base""": """https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt""", """small.en""": """https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt""", """small""": """https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt""", """medium.en""": """https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt""", """medium""": """https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt""", """large""": """https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt""", """large-v2""": """https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt""", } def __lowerCAmelCase ( A_ : Optional[int] ) -> str: __UpperCAmelCase = ["layers", "blocks"] for k in ignore_keys: state_dict.pop(A_ , A_ ) a_ = { """blocks""": """layers""", """mlp.0""": """fc1""", """mlp.2""": """fc2""", """mlp_ln""": """final_layer_norm""", """.attn.query""": """.self_attn.q_proj""", """.attn.key""": """.self_attn.k_proj""", """.attn.value""": """.self_attn.v_proj""", """.attn_ln""": """.self_attn_layer_norm""", """.attn.out""": """.self_attn.out_proj""", """.cross_attn.query""": """.encoder_attn.q_proj""", """.cross_attn.key""": """.encoder_attn.k_proj""", """.cross_attn.value""": """.encoder_attn.v_proj""", """.cross_attn_ln""": """.encoder_attn_layer_norm""", """.cross_attn.out""": """.encoder_attn.out_proj""", """decoder.ln.""": """decoder.layer_norm.""", """encoder.ln.""": """encoder.layer_norm.""", """token_embedding""": """embed_tokens""", """encoder.positional_embedding""": """encoder.embed_positions.weight""", """decoder.positional_embedding""": """decoder.embed_positions.weight""", """ln_post""": """layer_norm""", } def __lowerCAmelCase ( A_ : str ) -> List[str]: __UpperCAmelCase = list(s_dict.keys() ) for key in keys: __UpperCAmelCase = key for k, v in WHISPER_MAPPING.items(): if k in key: __UpperCAmelCase = new_key.replace(A_ , A_ ) print(F'''{key} -> {new_key}''' ) __UpperCAmelCase = s_dict.pop(A_ ) return s_dict def __lowerCAmelCase ( A_ : List[Any] ) -> List[Any]: __UpperCAmelCase , __UpperCAmelCase = emb.weight.shape __UpperCAmelCase = nn.Linear(A_ , A_ , bias=A_ ) __UpperCAmelCase = emb.weight.data return lin_layer def __lowerCAmelCase ( A_ : str , A_ : str ) -> bytes: os.makedirs(A_ , exist_ok=A_ ) __UpperCAmelCase = os.path.basename(A_ ) __UpperCAmelCase = url.split("/" )[-2] __UpperCAmelCase = os.path.join(A_ , A_ ) if os.path.exists(A_ ) and not os.path.isfile(A_ ): raise RuntimeError(F'''{download_target} exists and is not a regular file''' ) if os.path.isfile(A_ ): __UpperCAmelCase = open(A_ , "rb" ).read() if hashlib.shaaaa(A_ ).hexdigest() == expected_shaaaa: return model_bytes else: warnings.warn(F'''{download_target} exists, but the SHA256 checksum does not match; re-downloading the file''' ) with urllib.request.urlopen(A_ ) as source, open(A_ , "wb" ) as output: with tqdm( total=int(source.info().get("Content-Length" ) ) , ncols=80 , unit="iB" , unit_scale=A_ , unit_divisor=10_24 ) as loop: while True: __UpperCAmelCase = source.read(81_92 ) if not buffer: break output.write(A_ ) loop.update(len(A_ ) ) __UpperCAmelCase = open(A_ , "rb" ).read() if hashlib.shaaaa(A_ ).hexdigest() != expected_shaaaa: raise RuntimeError( "Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model." ) return model_bytes def __lowerCAmelCase ( A_ : Dict , A_ : Optional[Any] ) -> Optional[Any]: if ".pt" not in checkpoint_path: __UpperCAmelCase = _download(_MODELS[checkpoint_path] ) else: __UpperCAmelCase = torch.load(A_ , map_location="cpu" ) __UpperCAmelCase = original_checkpoint["dims"] __UpperCAmelCase = original_checkpoint["model_state_dict"] __UpperCAmelCase = state_dict["decoder.token_embedding.weight"] remove_ignore_keys_(A_ ) rename_keys(A_ ) __UpperCAmelCase = True __UpperCAmelCase = state_dict["decoder.layers.0.fc1.weight"].shape[0] __UpperCAmelCase = WhisperConfig( vocab_size=dimensions["n_vocab"] , encoder_ffn_dim=A_ , decoder_ffn_dim=A_ , num_mel_bins=dimensions["n_mels"] , d_model=dimensions["n_audio_state"] , max_target_positions=dimensions["n_text_ctx"] , encoder_layers=dimensions["n_audio_layer"] , encoder_attention_heads=dimensions["n_audio_head"] , decoder_layers=dimensions["n_text_layer"] , decoder_attention_heads=dimensions["n_text_state"] , max_source_positions=dimensions["n_audio_ctx"] , ) __UpperCAmelCase = WhisperForConditionalGeneration(A_ ) __UpperCAmelCase , __UpperCAmelCase = model.model.load_state_dict(A_ , strict=A_ ) if len(A_ ) > 0 and not set(A_ ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( "Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing," F''' but all the following weights are missing {missing}''' ) if tie_embeds: __UpperCAmelCase = make_linear_from_emb(model.model.decoder.embed_tokens ) else: __UpperCAmelCase = proj_out_weights model.save_pretrained(A_ ) if __name__ == "__main__": a_ = argparse.ArgumentParser() # # Required parameters parser.add_argument("""--checkpoint_path""", type=str, help="""Patht to the downloaded checkpoints""") parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") a_ = parser.parse_args() convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
286
1
'''simple docstring''' import json import os import unittest from typing import Tuple from transformers import WavaVecaPhonemeCTCTokenizer from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput from transformers.testing_utils import require_phonemizer from ...test_tokenization_common import TokenizerTesterMixin @require_phonemizer class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ): '''simple docstring''' __UpperCamelCase = WavaVecaPhonemeCTCTokenizer __UpperCamelCase = False def _UpperCamelCase ( self ): '''simple docstring''' super().setUp() snake_case: Any = ( '<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː ' 'ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː ' 'ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 ' 'oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ ' 'pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ ' 'yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ ' 'əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ ' 'ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ ' 'ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ ' 'uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ ' 'ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ ' 'ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ ' 'ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4' ).split(' ' ) snake_case: Optional[int] = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) ) snake_case: List[str] = {'pad_token': '<pad>', 'unk_token': '<unk>', 'bos_token': '<s>', 'eos_token': '</s>'} snake_case: Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + '\n' ) def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=20 , SCREAMING_SNAKE_CASE__=5 ): '''simple docstring''' snake_case: Optional[Any] = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )) for i in range(len(SCREAMING_SNAKE_CASE__ ) )] snake_case: List[Any] = list(filter(lambda SCREAMING_SNAKE_CASE__ : [t[0]] == tokenizer.encode(t[1] , do_phonemize=SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) ) if max_length is not None and len(SCREAMING_SNAKE_CASE__ ) > max_length: snake_case: Dict = toks[:max_length] if min_length is not None and len(SCREAMING_SNAKE_CASE__ ) < min_length and len(SCREAMING_SNAKE_CASE__ ) > 0: while len(SCREAMING_SNAKE_CASE__ ) < min_length: snake_case: List[Any] = toks + toks # toks_str = [t[1] for t in toks] snake_case: Any = [t[0] for t in toks] # Ensure consistency snake_case: List[str] = tokenizer.decode(SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ ) if " " not in output_txt and len(SCREAMING_SNAKE_CASE__ ) > 1: snake_case: int = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ ) + ' ' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ ) ) if with_prefix_space: snake_case: int = ' ' + output_txt snake_case: Tuple = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) return output_txt, output_ids def _UpperCamelCase ( self , **SCREAMING_SNAKE_CASE__ ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def _UpperCamelCase ( self ): '''simple docstring''' snake_case: Union[str, Any] = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' ) # check adding a single token tokenizer.add_tokens('xxx' ) snake_case: Union[str, Any] = tokenizer('m xxx ɪ' , do_phonemize=SCREAMING_SNAKE_CASE__ ).input_ids self.assertEqual(SCREAMING_SNAKE_CASE__ , [13, 3_92, 17] ) # xxx should be last token tokenizer.add_tokens(['aaa', 'bbb', 'ccc'] ) snake_case: Tuple = tokenizer('m aaa ɪ ccc' , do_phonemize=SCREAMING_SNAKE_CASE__ ).input_ids self.assertEqual(SCREAMING_SNAKE_CASE__ , [13, 3_93, 17, 3_95] ) # aaa and ccc should be after xxx and 2 after aaa snake_case: Any = tokenizer('maɪ c' , do_phonemize=SCREAMING_SNAKE_CASE__ ).input_ids self.assertEqual(SCREAMING_SNAKE_CASE__ , [3, 2_00] ) # mai should be <unk> (=3) def _UpperCamelCase ( self ): '''simple docstring''' snake_case: Dict = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' ) snake_case: Tuple = 'Hello how are you' snake_case: Union[str, Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE__ , phonemizer_lang='en-us' ) self.assertEqual(SCREAMING_SNAKE_CASE__ , 'h ə l oʊ h aʊ ɑːɹ j uː' ) def _UpperCamelCase ( self ): '''simple docstring''' snake_case: Dict = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' ) snake_case: Tuple = 'Hello how are you' snake_case: Tuple = tokenizer.phonemize(SCREAMING_SNAKE_CASE__ , phonemizer_lang='en-us' ) self.assertEqual(tokenizer(SCREAMING_SNAKE_CASE__ ).input_ids , tokenizer(SCREAMING_SNAKE_CASE__ , do_phonemize=SCREAMING_SNAKE_CASE__ ).input_ids ) def _UpperCamelCase ( self ): '''simple docstring''' snake_case: Union[str, Any] = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' ) snake_case: List[Any] = 'Hello how are you' snake_case: Any = tokenizer.phonemize(SCREAMING_SNAKE_CASE__ , phonemizer_lang='en-us' ) snake_case: List[str] = tokenizer.decode(tokenizer(SCREAMING_SNAKE_CASE__ ).input_ids ) self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def _UpperCamelCase ( self ): '''simple docstring''' snake_case: int = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' ) snake_case: List[Any] = [ [11, 5, 15, tokenizer.pad_token_id, 15, 8, 98], [24, 22, 5, 24, 22, 5, 77], ] snake_case: List[Any] = tokenizer.decode(sample_ids[0] ) snake_case: Union[str, Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ ) self.assertEqual(SCREAMING_SNAKE_CASE__ , batch_tokens[0] ) self.assertEqual(SCREAMING_SNAKE_CASE__ , ['k s ɾ ɾ l ɭʲ', 'j ð s j ð s oːɹ'] ) def _UpperCamelCase ( self ): '''simple docstring''' snake_case: Optional[Any] = self.tokenizer_class.from_pretrained( 'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' ) tokenizer.add_tokens('|' ) snake_case: Dict = 'Hello how are you' snake_case: Any = tokenizer.phonemize(SCREAMING_SNAKE_CASE__ , phonemizer_lang='en-us' ) self.assertEqual(SCREAMING_SNAKE_CASE__ , 'h ə l oʊ | h aʊ | ɑːɹ | j uː |' ) def _UpperCamelCase ( self ): '''simple docstring''' snake_case: Tuple = self.tokenizer_class.from_pretrained( 'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' ) tokenizer.add_tokens('|' ) snake_case: int = 'Hello how are you' snake_case: Optional[int] = tokenizer.phonemize(SCREAMING_SNAKE_CASE__ , phonemizer_lang='en-us' ) self.assertEqual(tokenizer(SCREAMING_SNAKE_CASE__ ).input_ids , tokenizer(SCREAMING_SNAKE_CASE__ , do_phonemize=SCREAMING_SNAKE_CASE__ ).input_ids ) def _UpperCamelCase ( self ): '''simple docstring''' snake_case: Optional[int] = self.tokenizer_class.from_pretrained( 'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' ) tokenizer.add_tokens('|' ) # fmt: off snake_case: str = [ [11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98], [tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77], ] # fmt: on # decode with word_del_token filter snake_case: Union[str, Any] = tokenizer.decode(sample_ids[0] ) snake_case: Optional[int] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ ) self.assertEqual(SCREAMING_SNAKE_CASE__ , batch_tokens[0] ) self.assertEqual(SCREAMING_SNAKE_CASE__ , ['k s ɾ ɾ l ɭʲ', 'j ð s j ð s oːɹ'] ) # decode with no word_del_token filter snake_case: List[Any] = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=SCREAMING_SNAKE_CASE__ ) snake_case: Any = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ , filter_word_delimiter_token=SCREAMING_SNAKE_CASE__ ) self.assertEqual(SCREAMING_SNAKE_CASE__ , batch_tokens[0] ) self.assertEqual(SCREAMING_SNAKE_CASE__ , ['k s ɾ | ɾ l | ɭʲ', '| j ð | s j ð s oːɹ'] ) def _UpperCamelCase ( self ): '''simple docstring''' snake_case: Optional[int] = self.tokenizer_class.from_pretrained( 'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' ) tokenizer.add_tokens('|' ) snake_case: Dict = 'Hello how are you' snake_case: Tuple = tokenizer.phonemize(SCREAMING_SNAKE_CASE__ , phonemizer_lang='en-us' ) snake_case: Union[str, Any] = tokenizer.decode(tokenizer(SCREAMING_SNAKE_CASE__ ).input_ids , filter_word_delimiter_token=SCREAMING_SNAKE_CASE__ ) self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def _UpperCamelCase ( self ): '''simple docstring''' snake_case: List[str] = self.tokenizer_class.from_pretrained( 'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' ) tokenizer.add_tokens('|' ) snake_case: Dict = 'Hello how are you' snake_case: Any = tokenizer.phonemize(SCREAMING_SNAKE_CASE__ , phonemizer_lang='en-us' ) snake_case: Tuple = tokenizer.decode(tokenizer(SCREAMING_SNAKE_CASE__ ).input_ids , filter_word_delimiter_token=SCREAMING_SNAKE_CASE__ ) self.assertEqual(' '.join([p.strip() for p in phonemes.split(' |' )] ).strip() , SCREAMING_SNAKE_CASE__ ) def _UpperCamelCase ( self ): '''simple docstring''' snake_case: List[Any] = self.tokenizer_class.from_pretrained( 'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token=SCREAMING_SNAKE_CASE__ ) snake_case: int = 'Hello how are you' snake_case: Union[str, Any] = tokenizer(SCREAMING_SNAKE_CASE__ , phonemizer_lang='en-us' ).input_ids snake_case: List[Any] = tokenizer(SCREAMING_SNAKE_CASE__ , phonemizer_lang='fr-fr' ).input_ids self.assertNotEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) snake_case: Dict = tokenizer.decode(SCREAMING_SNAKE_CASE__ ) snake_case: Optional[Any] = tokenizer.decode(SCREAMING_SNAKE_CASE__ ) self.assertEqual(SCREAMING_SNAKE_CASE__ , 'h ə l oʊ h aʊ ɑːɹ j uː' ) self.assertEqual(SCREAMING_SNAKE_CASE__ , 'ɛ l o h aʊ a ʁ j u' ) def _UpperCamelCase ( self ): '''simple docstring''' snake_case: int = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' ) snake_case: Optional[Any] = 'Hello how Are you' snake_case: Tuple = 'hello how are you' snake_case: Tuple = tokenizer(SCREAMING_SNAKE_CASE__ ).input_ids snake_case: Union[str, Any] = tokenizer(SCREAMING_SNAKE_CASE__ ).input_ids self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def _UpperCamelCase ( self ): '''simple docstring''' snake_case: List[str] = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' ) tokenizer.add_tokens(['!', '?'] ) tokenizer.add_special_tokens({'cls_token': '$$$'} ) # fmt: off snake_case: List[Any] = [ [11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 3_92, 3_92, 3_93, 3_92, 3_92, 3_93, 3_94, 3_94], [24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 3_94, 3_94], ] # fmt: on snake_case: List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ ) self.assertEqual(SCREAMING_SNAKE_CASE__ , ['k s ɾ ɾ l ɭʲ!?!? $$$', 'j ð s j ð s oːɹ $$$'] ) @staticmethod def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): '''simple docstring''' snake_case: Union[str, Any] = [d[key] for d in offsets] return retrieved_list def _UpperCamelCase ( self ): '''simple docstring''' snake_case: Any = self.get_tokenizer(word_delimiter_token='|' ) tokenizer.add_tokens('|' ) # fmt: off # ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ" snake_case: Tuple = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98] # fmt: on snake_case: int = tokenizer.decode(SCREAMING_SNAKE_CASE__ , output_char_offsets=SCREAMING_SNAKE_CASE__ , filter_word_delimiter_token=SCREAMING_SNAKE_CASE__ ) # check Wav2Vec2CTCTokenizerOutput keys for char self.assertEqual(len(outputs.keys() ) , 2 ) self.assertTrue('text' in outputs ) self.assertTrue('char_offsets' in outputs ) self.assertTrue(isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) # check that order of chars is correct and identical for both outputs self.assertEqual(' '.join(self.get_from_offsets(outputs['char_offsets'] , 'char' ) ) , outputs.text ) self.assertListEqual( self.get_from_offsets(outputs['char_offsets'] , 'char' ) , ['k', 's', 'ɾ', 'ɾ', '|', 'ɾ', 'l', '|', 'ɭʲ'] ) # check that offsets are actually correct for char # 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token, # 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98 self.assertListEqual( self.get_from_offsets(outputs['char_offsets'] , 'start_offset' ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] ) self.assertListEqual( self.get_from_offsets(outputs['char_offsets'] , 'end_offset' ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] ) def _UpperCamelCase ( self ): '''simple docstring''' snake_case: str = self.get_tokenizer(word_delimiter_token='|' ) def check_list_tuples_equal(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): self.assertTrue(isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) self.assertTrue(isinstance(outputs_list[0] , SCREAMING_SNAKE_CASE__ ) ) # transform list to ModelOutput snake_case: Optional[int] = WavaVecaPhonemeCTCTokenizerOutput( {k: [d[k] for d in outputs_list] for k in outputs_list[0]} ) self.assertListEqual(outputs_batch['text'] , outputs_batch_a['text'] ) def recursive_check(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): [recursive_check(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for la, la in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )] self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if "char_offsets" in outputs_batch: recursive_check(outputs_batch['char_offsets'] , outputs_batch_a['char_offsets'] ) # fmt: off snake_case: Tuple = [ [11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34], [24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34], ] # fmt: on # We assume that `decode` works as expected. All we will check now is # the output type is correct and the output is identical to `decode` # char snake_case: Optional[int] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ , output_char_offsets=SCREAMING_SNAKE_CASE__ ) snake_case: Dict = [tokenizer.decode(SCREAMING_SNAKE_CASE__ , output_char_offsets=SCREAMING_SNAKE_CASE__ ) for ids in sample_ids] check_list_tuples_equal(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @unittest.skip('Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes' ) def _UpperCamelCase ( self ): '''simple docstring''' pass @unittest.skip('Wav2Vec2PhonemeTokenizer always puts spaces between phonemes' ) def _UpperCamelCase ( self ): '''simple docstring''' pass @unittest.skip('encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency' ) def _UpperCamelCase ( self ): '''simple docstring''' pass @unittest.skip('Wav2Vec2PhonemeModel has no max model length => no testing' ) def _UpperCamelCase ( self ): '''simple docstring''' pass def _UpperCamelCase ( self ): '''simple docstring''' snake_case: Tuple = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE__ ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): snake_case: int = tokenizer.vocab_size snake_case: Union[str, Any] = len(SCREAMING_SNAKE_CASE__ ) self.assertNotEqual(SCREAMING_SNAKE_CASE__ , 0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) snake_case: str = ['aaaaa bbbbbb', 'cccccccccdddddddd'] snake_case: Optional[int] = tokenizer.add_tokens(SCREAMING_SNAKE_CASE__ ) snake_case: Any = tokenizer.vocab_size snake_case: Optional[int] = len(SCREAMING_SNAKE_CASE__ ) self.assertNotEqual(SCREAMING_SNAKE_CASE__ , 0 ) self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.assertEqual(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ) self.assertEqual(SCREAMING_SNAKE_CASE__ , all_size + len(SCREAMING_SNAKE_CASE__ ) ) snake_case: List[Any] = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=SCREAMING_SNAKE_CASE__ ) self.assertGreaterEqual(len(SCREAMING_SNAKE_CASE__ ) , 4 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) snake_case: List[str] = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'} snake_case: Optional[Any] = tokenizer.add_special_tokens(SCREAMING_SNAKE_CASE__ ) snake_case: Union[str, Any] = tokenizer.vocab_size snake_case: Tuple = len(SCREAMING_SNAKE_CASE__ ) self.assertNotEqual(SCREAMING_SNAKE_CASE__ , 0 ) self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.assertEqual(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ) self.assertEqual(SCREAMING_SNAKE_CASE__ , all_size_a + len(SCREAMING_SNAKE_CASE__ ) ) snake_case: Tuple = tokenizer.encode( '>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=SCREAMING_SNAKE_CASE__ ) self.assertGreaterEqual(len(SCREAMING_SNAKE_CASE__ ) , 6 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] , tokens[1] ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokens[-4] ) self.assertEqual(tokens[0] , tokenizer.eos_token_id ) self.assertEqual(tokens[-3] , tokenizer.pad_token_id ) @unittest.skip('The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.' ) def _UpperCamelCase ( self ): '''simple docstring''' pass @unittest.skip('The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.' ) def _UpperCamelCase ( self ): '''simple docstring''' pass def _UpperCamelCase ( self ): '''simple docstring''' snake_case: Union[str, Any] = self.get_tokenizers(fast=SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): snake_case: Dict = ['ð', 'ɪ', 's', 'ɪ', 'z', 'ɐ', 't', 'ɛ', 'k', 's', 't'] snake_case: Any = tokenizer.convert_tokens_to_string(SCREAMING_SNAKE_CASE__ ) self.assertIsInstance(output['text'] , SCREAMING_SNAKE_CASE__ )
329
'''simple docstring''' from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class SCREAMING_SNAKE_CASE ( snake_case ): '''simple docstring''' __UpperCamelCase = ["image_processor", "tokenizer"] __UpperCamelCase = "BlipImageProcessor" __UpperCamelCase = ("BertTokenizer", "BertTokenizerFast") def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): '''simple docstring''' snake_case: Union[str, Any] = False super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) snake_case: Tuple = self.image_processor def __call__( self , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 0 , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ): '''simple docstring''' if images is None and text is None: raise ValueError('You have to specify either images or text.' ) # Get only text if images is None: snake_case: str = self.tokenizer snake_case: Dict = self.tokenizer( text=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , return_overflowing_tokens=SCREAMING_SNAKE_CASE__ , return_special_tokens_mask=SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ , return_length=SCREAMING_SNAKE_CASE__ , verbose=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) return text_encoding # add pixel_values snake_case: Tuple = self.image_processor(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ ) if text is not None: snake_case: Optional[Any] = self.tokenizer( text=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , return_overflowing_tokens=SCREAMING_SNAKE_CASE__ , return_special_tokens_mask=SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ , return_length=SCREAMING_SNAKE_CASE__ , verbose=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) else: snake_case: List[str] = None if text_encoding is not None: encoding_image_processor.update(SCREAMING_SNAKE_CASE__ ) return encoding_image_processor def _UpperCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): '''simple docstring''' return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def _UpperCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): '''simple docstring''' return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) @property def _UpperCamelCase ( self ): '''simple docstring''' snake_case: int = self.tokenizer.model_input_names snake_case: str = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
329
1
'''simple docstring''' def a ( __a ) -> float: '''simple docstring''' if edge <= 0 or not isinstance(__a , __a ): raise ValueError('''Length must be a positive.''' ) return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2) def a ( __a ) -> float: '''simple docstring''' if edge <= 0 or not isinstance(__a , __a ): raise ValueError('''Length must be a positive.''' ) return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3) if __name__ == "__main__": import doctest doctest.testmod()
705
'''simple docstring''' import numpy as np def a ( __a , __a , __a , __a , __a ) -> Union[str, Any]: '''simple docstring''' UpperCamelCase__ :Tuple = int(np.ceil((x_end - xa) / h ) ) UpperCamelCase__ :Optional[int] = np.zeros((n + 1,) ) UpperCamelCase__ :List[str] = ya UpperCamelCase__ :Tuple = xa for k in range(__a ): UpperCamelCase__ :Dict = f(__a , y[k] ) UpperCamelCase__ :List[Any] = f(x + 0.5 * h , y[k] + 0.5 * h * ka ) UpperCamelCase__ :Dict = f(x + 0.5 * h , y[k] + 0.5 * h * ka ) UpperCamelCase__ :List[Any] = f(x + h , y[k] + h * ka ) UpperCamelCase__ :List[str] = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka) x += h return y if __name__ == "__main__": import doctest doctest.testmod()
280
0
import requests from bsa import BeautifulSoup def lowerCamelCase( a__ = "https://www.worldometers.info/coronavirus"): _SCREAMING_SNAKE_CASE =BeautifulSoup(requests.get(a__).text ,'''html.parser''') _SCREAMING_SNAKE_CASE =soup.findAll('''h1''') _SCREAMING_SNAKE_CASE =soup.findAll('''div''' ,{'''class''': '''maincounter-number'''}) keys += soup.findAll('''span''' ,{'''class''': '''panel-title'''}) values += soup.findAll('''div''' ,{'''class''': '''number-table-main'''}) return {key.text.strip(): value.text.strip() for key, value in zip(a__ ,a__)} if __name__ == "__main__": print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''') for key, value in world_covidaa_stats().items(): print(f"""{key}\n{value}\n""")
691
import timeit import numpy as np import datasets from datasets.arrow_writer import ArrowWriter from datasets.features.features import _ArrayXD def lowerCamelCase( a__): def wrapper(*a__ ,**a__): _SCREAMING_SNAKE_CASE =timeit.default_timer() _SCREAMING_SNAKE_CASE =func(*a__ ,**a__) _SCREAMING_SNAKE_CASE =timeit.default_timer() - starttime return delta _SCREAMING_SNAKE_CASE =func.__name__ return wrapper def lowerCamelCase( a__ ,a__=100 ,a__=None): _SCREAMING_SNAKE_CASE =[] _SCREAMING_SNAKE_CASE =seq_shapes or {} for i in range(a__): _SCREAMING_SNAKE_CASE ={} for col_id, (k, v) in enumerate(features.items()): if isinstance(a__ ,_ArrayXD): _SCREAMING_SNAKE_CASE =np.random.rand(*v.shape).astype(v.dtype) elif isinstance(a__ ,datasets.Value): if v.dtype == "string": _SCREAMING_SNAKE_CASE ='''The small grey turtle was surprisingly fast when challenged.''' else: _SCREAMING_SNAKE_CASE =np.random.randint(10 ,size=1).astype(v.dtype).item() elif isinstance(a__ ,datasets.Sequence): while isinstance(a__ ,datasets.Sequence): _SCREAMING_SNAKE_CASE =v.feature _SCREAMING_SNAKE_CASE =seq_shapes[k] _SCREAMING_SNAKE_CASE =np.random.rand(*a__).astype(v.dtype) _SCREAMING_SNAKE_CASE =data dummy_data.append((i, example)) return dummy_data def lowerCamelCase( a__ ,a__ ,a__=100 ,a__=None): _SCREAMING_SNAKE_CASE =generate_examples(a__ ,num_examples=a__ ,seq_shapes=a__) with ArrowWriter(features=a__ ,path=a__) as writer: for key, record in dummy_data: _SCREAMING_SNAKE_CASE =features.encode_example(a__) writer.write(a__) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =writer.finalize() if not num_final_examples == num_examples: raise ValueError( f"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.") _SCREAMING_SNAKE_CASE =datasets.Dataset.from_file(filename=a__ ,info=datasets.DatasetInfo(features=a__)) return dataset
691
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = { "shi-labs/nat-mini-in1k-224": "https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json", # See all Nat models at https://huggingface.co/models?filter=nat } class a ( __magic_name__ ,__magic_name__ ): _snake_case = '''nat''' _snake_case = { '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self : Any, SCREAMING_SNAKE_CASE_ : Optional[Any]=4, SCREAMING_SNAKE_CASE_ : Optional[int]=3, SCREAMING_SNAKE_CASE_ : Any=64, SCREAMING_SNAKE_CASE_ : List[str]=[3, 4, 6, 5], SCREAMING_SNAKE_CASE_ : Any=[2, 4, 8, 16], SCREAMING_SNAKE_CASE_ : Dict=7, SCREAMING_SNAKE_CASE_ : List[Any]=3.0, SCREAMING_SNAKE_CASE_ : List[Any]=True, SCREAMING_SNAKE_CASE_ : Dict=0.0, SCREAMING_SNAKE_CASE_ : List[Any]=0.0, SCREAMING_SNAKE_CASE_ : List[str]=0.1, SCREAMING_SNAKE_CASE_ : int="gelu", SCREAMING_SNAKE_CASE_ : Optional[Any]=0.02, SCREAMING_SNAKE_CASE_ : Any=1e-5, SCREAMING_SNAKE_CASE_ : Dict=0.0, SCREAMING_SNAKE_CASE_ : str=None, SCREAMING_SNAKE_CASE_ : Optional[int]=None, **SCREAMING_SNAKE_CASE_ : List[Any], ): super().__init__(**SCREAMING_SNAKE_CASE_ ) snake_case : Optional[Any] = patch_size snake_case : str = num_channels snake_case : Tuple = embed_dim snake_case : Dict = depths snake_case : Any = len(SCREAMING_SNAKE_CASE_ ) snake_case : List[str] = num_heads snake_case : Tuple = kernel_size snake_case : List[Any] = mlp_ratio snake_case : Optional[int] = qkv_bias snake_case : Dict = hidden_dropout_prob snake_case : Optional[Any] = attention_probs_dropout_prob snake_case : str = drop_path_rate snake_case : int = hidden_act snake_case : int = layer_norm_eps snake_case : List[Any] = initializer_range # we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model snake_case : Any = int(embed_dim * 2 ** (len(SCREAMING_SNAKE_CASE_ ) - 1) ) snake_case : Union[str, Any] = layer_scale_init_value snake_case : Union[str, Any] = ['''stem'''] + [F"""stage{idx}""" for idx in range(1, len(SCREAMING_SNAKE_CASE_ ) + 1 )] snake_case, snake_case : Any = get_aligned_output_features_output_indices( out_features=SCREAMING_SNAKE_CASE_, out_indices=SCREAMING_SNAKE_CASE_, stage_names=self.stage_names )
555
'''simple docstring''' import inspect import unittest from transformers import DPTConfig from transformers.file_utils import is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class a : def __init__( self : Optional[int], SCREAMING_SNAKE_CASE_ : str, SCREAMING_SNAKE_CASE_ : List[Any]=2, SCREAMING_SNAKE_CASE_ : str=32, SCREAMING_SNAKE_CASE_ : Union[str, Any]=16, SCREAMING_SNAKE_CASE_ : str=3, SCREAMING_SNAKE_CASE_ : Union[str, Any]=True, SCREAMING_SNAKE_CASE_ : int=True, SCREAMING_SNAKE_CASE_ : Optional[Any]=32, SCREAMING_SNAKE_CASE_ : Any=4, SCREAMING_SNAKE_CASE_ : Dict=[0, 1, 2, 3], SCREAMING_SNAKE_CASE_ : Dict=4, SCREAMING_SNAKE_CASE_ : Union[str, Any]=37, SCREAMING_SNAKE_CASE_ : str="gelu", SCREAMING_SNAKE_CASE_ : Optional[int]=0.1, SCREAMING_SNAKE_CASE_ : List[Any]=0.1, SCREAMING_SNAKE_CASE_ : Dict=0.02, SCREAMING_SNAKE_CASE_ : int=3, SCREAMING_SNAKE_CASE_ : List[str]=[1, 3_84, 24, 24], SCREAMING_SNAKE_CASE_ : Optional[int]=True, SCREAMING_SNAKE_CASE_ : Optional[int]=None, ): snake_case : int = parent snake_case : Tuple = batch_size snake_case : List[str] = image_size snake_case : Union[str, Any] = patch_size snake_case : Dict = num_channels snake_case : int = is_training snake_case : Any = use_labels snake_case : Any = hidden_size snake_case : Union[str, Any] = num_hidden_layers snake_case : Tuple = backbone_out_indices snake_case : Dict = num_attention_heads snake_case : Optional[Any] = intermediate_size snake_case : Any = hidden_act snake_case : List[str] = hidden_dropout_prob snake_case : List[str] = attention_probs_dropout_prob snake_case : Tuple = initializer_range snake_case : Any = num_labels snake_case : Union[str, Any] = backbone_featmap_shape snake_case : Optional[int] = scope snake_case : Tuple = is_hybrid # sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token) snake_case : Dict = (image_size // patch_size) ** 2 snake_case : Any = num_patches + 1 def __snake_case ( self : List[Any] ): snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case : Union[str, Any] = None if self.use_labels: snake_case : List[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels ) snake_case : Optional[int] = self.get_config() return config, pixel_values, labels def __snake_case ( self : List[str] ): snake_case : Optional[int] = { '''global_padding''': '''same''', '''layer_type''': '''bottleneck''', '''depths''': [3, 4, 9], '''out_features''': ['''stage1''', '''stage2''', '''stage3'''], '''embedding_dynamic_padding''': True, '''hidden_sizes''': [96, 1_92, 3_84, 7_68], '''num_groups''': 2, } return DPTConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, backbone_out_indices=self.backbone_out_indices, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=SCREAMING_SNAKE_CASE_, initializer_range=self.initializer_range, is_hybrid=self.is_hybrid, backbone_config=SCREAMING_SNAKE_CASE_, backbone_featmap_shape=self.backbone_featmap_shape, ) def __snake_case ( self : List[Any], SCREAMING_SNAKE_CASE_ : Dict, SCREAMING_SNAKE_CASE_ : str, SCREAMING_SNAKE_CASE_ : Any ): snake_case : Dict = DPTModel(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() snake_case : List[Any] = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def __snake_case ( self : Tuple, SCREAMING_SNAKE_CASE_ : Optional[Any], SCREAMING_SNAKE_CASE_ : Dict, SCREAMING_SNAKE_CASE_ : Optional[Any] ): snake_case : int = self.num_labels snake_case : Any = DPTForDepthEstimation(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() snake_case : Union[str, Any] = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.predicted_depth.shape, (self.batch_size, self.image_size, self.image_size) ) def __snake_case ( self : Optional[int], SCREAMING_SNAKE_CASE_ : Optional[Any], SCREAMING_SNAKE_CASE_ : List[Any], SCREAMING_SNAKE_CASE_ : List[Any] ): snake_case : List[str] = self.num_labels snake_case : List[Any] = DPTForSemanticSegmentation(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() snake_case : Union[str, Any] = model(SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def __snake_case ( self : Union[str, Any] ): snake_case : Optional[int] = self.prepare_config_and_inputs() snake_case, snake_case, snake_case : Tuple = config_and_inputs snake_case : List[str] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class a ( __magic_name__ ,__magic_name__ ,unittest.TestCase ): _snake_case = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else () _snake_case = ( { '''depth-estimation''': DPTForDepthEstimation, '''feature-extraction''': DPTModel, '''image-segmentation''': DPTForSemanticSegmentation, } if is_torch_available() else {} ) _snake_case = False _snake_case = False _snake_case = False def __snake_case ( self : Any ): snake_case : Optional[Any] = DPTModelTester(self ) snake_case : Any = ConfigTester(self, config_class=SCREAMING_SNAKE_CASE_, has_text_modality=SCREAMING_SNAKE_CASE_, hidden_size=37 ) def __snake_case ( self : Dict ): self.config_tester.run_common_tests() @unittest.skip(reason='''DPT does not use inputs_embeds''' ) def __snake_case ( self : int ): pass def __snake_case ( self : List[str] ): snake_case, snake_case : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case : Any = model_class(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(model.get_input_embeddings(), (nn.Module) ) snake_case : str = model.get_output_embeddings() self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_, nn.Linear ) ) def __snake_case ( self : Tuple ): snake_case, snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case : int = model_class(SCREAMING_SNAKE_CASE_ ) snake_case : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case : Optional[int] = [*signature.parameters.keys()] snake_case : Union[str, Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1], SCREAMING_SNAKE_CASE_ ) def __snake_case ( self : List[str] ): snake_case : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) def __snake_case ( self : Tuple ): snake_case : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_depth_estimation(*SCREAMING_SNAKE_CASE_ ) def __snake_case ( self : Dict ): snake_case : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE_ ) def __snake_case ( self : Any ): for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue snake_case, snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() snake_case : Dict = True if model_class in get_values(SCREAMING_SNAKE_CASE_ ): continue snake_case : Tuple = model_class(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.train() snake_case : Any = self._prepare_for_class(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, return_labels=SCREAMING_SNAKE_CASE_ ) snake_case : Dict = model(**SCREAMING_SNAKE_CASE_ ).loss loss.backward() def __snake_case ( self : int ): for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue snake_case, snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common() snake_case : Dict = False snake_case : Dict = True if model_class in get_values(SCREAMING_SNAKE_CASE_ ) or not model_class.supports_gradient_checkpointing: continue snake_case : Optional[Any] = model_class(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.gradient_checkpointing_enable() model.train() snake_case : Union[str, Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, return_labels=SCREAMING_SNAKE_CASE_ ) snake_case : List[str] = model(**SCREAMING_SNAKE_CASE_ ).loss loss.backward() def __snake_case ( self : Tuple ): snake_case, snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common() snake_case : Union[str, Any] = _config_zero_init(SCREAMING_SNAKE_CASE_ ) for model_class in self.all_model_classes: snake_case : List[str] = model_class(config=SCREAMING_SNAKE_CASE_ ) # Skip the check for the backbone snake_case : Optional[int] = [] for name, module in model.named_modules(): if module.__class__.__name__ == "DPTViTHybridEmbeddings": snake_case : List[Any] = [F"""{name}.{key}""" for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=F"""Parameter {name} of model {model_class} seems not properly initialized""", ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def __snake_case ( self : Union[str, Any] ): pass @slow def __snake_case ( self : List[Any] ): for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]: snake_case : int = DPTModel.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) def __snake_case ( self : Dict ): # We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type snake_case, snake_case : str = self.model_tester.prepare_config_and_inputs_for_common() snake_case : int = '''add''' with self.assertRaises(SCREAMING_SNAKE_CASE_ ): snake_case : Any = DPTForDepthEstimation(SCREAMING_SNAKE_CASE_ ) def A ( ): snake_case : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision @slow class a ( unittest.TestCase ): def __snake_case ( self : Dict ): snake_case : Dict = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' ) snake_case : int = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(SCREAMING_SNAKE_CASE_ ) snake_case : str = prepare_img() snake_case : List[str] = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ ) # forward pass with torch.no_grad(): snake_case : List[Any] = model(**SCREAMING_SNAKE_CASE_ ) snake_case : Optional[Any] = outputs.predicted_depth # verify the predicted depth snake_case : List[str] = torch.Size((1, 3_84, 3_84) ) self.assertEqual(predicted_depth.shape, SCREAMING_SNAKE_CASE_ ) snake_case : Dict = torch.tensor( [[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(SCREAMING_SNAKE_CASE_ ) self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_00, SCREAMING_SNAKE_CASE_, atol=1e-4 ) )
555
1
"""simple docstring""" from pathlib import Path import numpy as np from PIL import Image def lowercase ( __snake_case : np.ndarray ): lowercase_ , lowercase_ , lowercase_ : Optional[Any] = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2] return 0.2989 * r + 0.5870 * g + 0.1140 * b def lowercase ( __snake_case : np.ndarray ): return (gray > 1_2_7) & (gray <= 2_5_5) def lowercase ( __snake_case : np.ndarray , __snake_case : np.ndarray ): lowercase_ : Dict = np.zeros_like(__snake_case ) lowercase_ : int = np.zeros( (image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) ) # Copy image to padded image lowercase_ : Dict = image # Iterate over image & apply kernel for x in range(image.shape[1] ): for y in range(image.shape[0] ): lowercase_ : Union[str, Any] = ( kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]] ).sum() lowercase_ : Dict = int(summation > 0 ) return output if __name__ == "__main__": # read original image __A : str = Path(__file__).resolve().parent / '''image_data''' / '''lena.jpg''' __A : Any = np.array(Image.open(lena_path)) # kernel to be applied __A : Optional[Any] = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) __A : Tuple = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element) # Save the output image __A : Tuple = Image.fromarray(output).convert('''RGB''') pil_img.save('''result_dilation.png''')
231
"""simple docstring""" from torch import nn class _UpperCAmelCase ( nn.Module ): def __init__( self : Optional[int] , A : List[str] , A : Any ) -> Tuple: super().__init__() lowercase_ : Tuple = class_size lowercase_ : str = embed_size # self.mlp1 = nn.Linear(embed_size, embed_size) # self.mlp2 = (nn.Linear(embed_size, class_size)) lowercase_ : str = nn.Linear(A , A ) def A ( self : Dict , A : Optional[int] ) -> Tuple: # hidden_state = nn.functional.relu(self.mlp1(hidden_state)) # hidden_state = self.mlp2(hidden_state) lowercase_ : List[Any] = self.mlp(A ) return logits
231
1
"""simple docstring""" import os from collections import deque import torch from torch.utils.data import Dataset class a ( _SCREAMING_SNAKE_CASE ): """simple docstring""" def __init__( self , snake_case_="" , snake_case_="train" ) -> Optional[Any]: assert os.path.isdir(snake_case_ ) _UpperCAmelCase = [] _UpperCAmelCase = os.listdir(snake_case_ ) for story_filename in story_filenames_list: if "summary" in story_filename: continue _UpperCAmelCase = os.path.join(snake_case_ , snake_case_ ) if not os.path.isfile(snake_case_ ): continue self.documents.append(snake_case_ ) def __len__( self ) -> Optional[int]: return len(self.documents ) def __getitem__( self , snake_case_ ) -> Optional[int]: _UpperCAmelCase = self.documents[idx] _UpperCAmelCase = document_path.split("/" )[-1] with open(snake_case_ , encoding="utf-8" ) as source: _UpperCAmelCase = source.read() _UpperCAmelCase , _UpperCAmelCase = process_story(snake_case_ ) return document_name, story_lines, summary_lines def A__ ( A__ ) -> Any: '''simple docstring''' _UpperCAmelCase = list(filter(lambda A__ : len(A__ ) != 0 , [line.strip() for line in raw_story.split("\n" )] ) ) # for some unknown reason some lines miss a period, add it _UpperCAmelCase = [_add_missing_period(A__ ) for line in nonempty_lines] # gather article lines _UpperCAmelCase = [] _UpperCAmelCase = deque(A__ ) while True: try: _UpperCAmelCase = lines.popleft() if element.startswith("@highlight" ): break story_lines.append(A__ ) except IndexError: # if "@highlight" is absent from the file we pop # all elements until there is None, raising an exception. return story_lines, [] # gather summary lines _UpperCAmelCase = list(filter(lambda A__ : not t.startswith("@highlight" ) , A__ ) ) return story_lines, summary_lines def A__ ( A__ ) -> Tuple: '''simple docstring''' _UpperCAmelCase = [".", "!", "?", "...", "'", "`", "\"", "\u2019", "\u2019", ")"] if line.startswith("@highlight" ): return line if line[-1] in END_TOKENS: return line return line + "." def A__ ( A__ , A__ , A__ ) -> Optional[Any]: '''simple docstring''' if len(A__ ) > block_size: return sequence[:block_size] else: sequence.extend([pad_token_id] * (block_size - len(A__ )) ) return sequence def A__ ( A__ , A__ ) -> Any: '''simple docstring''' _UpperCAmelCase = torch.ones_like(A__ ) _UpperCAmelCase = sequence == pad_token_id _UpperCAmelCase = 0 return mask def A__ ( A__ , A__ , A__ ) -> List[Any]: '''simple docstring''' _UpperCAmelCase = [tokenizer.encode(A__ ) for line in story_lines] _UpperCAmelCase = [token for sentence in story_lines_token_ids for token in sentence] _UpperCAmelCase = [tokenizer.encode(A__ ) for line in summary_lines] _UpperCAmelCase = [token for sentence in summary_lines_token_ids for token in sentence] return story_token_ids, summary_token_ids def A__ ( A__ , A__ ) -> Optional[Any]: '''simple docstring''' _UpperCAmelCase = [] for sequence in batch: _UpperCAmelCase = -1 _UpperCAmelCase = [] for s in sequence: if s == separator_token_id: sentence_num += 1 embeddings.append(sentence_num % 2 ) batch_embeddings.append(A__ ) return torch.tensor(A__ )
579
"""simple docstring""" import requests SCREAMING_SNAKE_CASE_ = '''''' # <-- Put your OpenWeatherMap appid here! SCREAMING_SNAKE_CASE_ = '''https://api.openweathermap.org/data/2.5/''' def A__ ( A__ = "Chicago" , A__ = APPID ) -> dict: '''simple docstring''' return requests.get(URL_BASE + "weather" , params=locals() ).json() def A__ ( A__ = "Kolkata, India" , A__ = APPID ) -> dict: '''simple docstring''' return requests.get(URL_BASE + "forecast" , params=locals() ).json() def A__ ( A__ = 55.68 , A__ = 12.57 , A__ = APPID ) -> dict: '''simple docstring''' return requests.get(URL_BASE + "onecall" , params=locals() ).json() if __name__ == "__main__": from pprint import pprint while True: SCREAMING_SNAKE_CASE_ = input('''Enter a location:''').strip() if location: pprint(current_weather(location)) else: break
579
1
"""simple docstring""" def _snake_case ( _snake_case : str ) -> str: '''simple docstring''' _A = 0 # if input_string is "aba" than new_input_string become "a|b|a" _A = '' _A = '' # append each character + "|" in new_string for range(0, length-1) for i in input_string[: len(_snake_case ) - 1]: new_input_string += i + "|" # append last character new_input_string += input_string[-1] # we will store the starting and ending of previous furthest ending palindromic # substring _A , _A = 0, 0 # length[i] shows the length of palindromic substring with center i _A = [1 for i in range(len(_snake_case ) )] # for each character in new_string find corresponding palindromic string _A = 0 for j in range(len(_snake_case ) ): _A = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 ) while ( j - k >= 0 and j + k < len(_snake_case ) and new_input_string[k + j] == new_input_string[j - k] ): k += 1 _A = 2 * k - 1 # does this string is ending after the previously explored end (that is r) ? # if yes the update the new r to the last index of this if j + k - 1 > r: _A = j - k + 1 # noqa: E741 _A = j + k - 1 # update max_length and start position if max_length < length[j]: _A = length[j] _A = j # create that string _A = new_input_string[start - max_length // 2 : start + max_length // 2 + 1] for i in s: if i != "|": output_string += i return output_string if __name__ == "__main__": import doctest doctest.testmod()
7
import unittest import numpy as np import torch from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class a_ ( unittest.TestCase ): '''simple docstring''' @property def _lowercase ( self ) -> List[str]: '''simple docstring''' torch.manual_seed(0 ) lowerCAmelCase_ = UNetaDModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , ) return model def _lowercase ( self ) -> int: '''simple docstring''' lowerCAmelCase_ = self.dummy_uncond_unet lowerCAmelCase_ = ScoreSdeVeScheduler() lowerCAmelCase_ = ScoreSdeVePipeline(unet=lowercase_ , scheduler=lowercase_ ) sde_ve.to(lowercase_ ) sde_ve.set_progress_bar_config(disable=lowercase_ ) lowerCAmelCase_ = torch.manual_seed(0 ) lowerCAmelCase_ = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=lowercase_ ).images lowerCAmelCase_ = torch.manual_seed(0 ) lowerCAmelCase_ = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=lowercase_ , return_dict=lowercase_ )[ 0 ] lowerCAmelCase_ = image[0, -3:, -3:, -1] lowerCAmelCase_ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 3_2, 3_2, 3) lowerCAmelCase_ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class a_ ( unittest.TestCase ): '''simple docstring''' def _lowercase ( self ) -> str: '''simple docstring''' lowerCAmelCase_ = 'google/ncsnpp-church-256' lowerCAmelCase_ = UNetaDModel.from_pretrained(lowercase_ ) lowerCAmelCase_ = ScoreSdeVeScheduler.from_pretrained(lowercase_ ) lowerCAmelCase_ = ScoreSdeVePipeline(unet=lowercase_ , scheduler=lowercase_ ) sde_ve.to(lowercase_ ) sde_ve.set_progress_bar_config(disable=lowercase_ ) lowerCAmelCase_ = torch.manual_seed(0 ) lowerCAmelCase_ = sde_ve(num_inference_steps=1_0 , output_type='numpy' , generator=lowercase_ ).images lowerCAmelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 2_5_6, 2_5_6, 3) lowerCAmelCase_ = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
318
0
from ....utils import logging _lowerCamelCase = logging.get_logger(__name__) class __A ( lowerCamelCase__ ): """simple docstring""" def __init__( self , a__ , a__=None , a__=2048): """simple docstring""" _lowerCamelCase : str = config.__dict__ _lowerCamelCase : str = modal_hidden_size if num_labels: _lowerCamelCase : Any = num_labels
613
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) _lowerCamelCase = { 'configuration_layoutlmv3': [ 'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv3Config', 'LayoutLMv3OnnxConfig', ], 'processing_layoutlmv3': ['LayoutLMv3Processor'], 'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase = ['LayoutLMv3TokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase = [ 'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST', 'LayoutLMv3ForQuestionAnswering', 'LayoutLMv3ForSequenceClassification', 'LayoutLMv3ForTokenClassification', 'LayoutLMv3Model', 'LayoutLMv3PreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase = [ 'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFLayoutLMv3ForQuestionAnswering', 'TFLayoutLMv3ForSequenceClassification', 'TFLayoutLMv3ForTokenClassification', 'TFLayoutLMv3Model', 'TFLayoutLMv3PreTrainedModel', ] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase = ['LayoutLMv3FeatureExtractor'] _lowerCamelCase = ['LayoutLMv3ImageProcessor'] if TYPE_CHECKING: from .configuration_layoutlmva import ( LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig, LayoutLMvaOnnxConfig, ) from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_layoutlmva import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, TFLayoutLMvaPreTrainedModel, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor from .image_processing_layoutlmva import LayoutLMvaImageProcessor else: import sys _lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
613
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _a : Dict = logging.get_logger(__name__) _a : Any = { 'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json', 'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json', 'junnyu/roformer_chinese_char_small': ( 'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json' ), 'junnyu/roformer_chinese_char_base': ( 'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json' ), 'junnyu/roformer_small_discriminator': ( 'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json' ), 'junnyu/roformer_small_generator': ( 'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json' ), # See all RoFormer models at https://huggingface.co/models?filter=roformer } class UpperCamelCase_ ( __UpperCamelCase ): """simple docstring""" A = '''roformer''' def __init__( self , UpperCAmelCase=5_0_0_0_0 , UpperCAmelCase=None , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=1_5_3_6 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=1E-12 , UpperCAmelCase=0 , UpperCAmelCase=False , UpperCAmelCase=True , **UpperCAmelCase , ): super().__init__(pad_token_id=UpperCAmelCase , **UpperCAmelCase ) __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size if embedding_size is None else embedding_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = hidden_act __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = type_vocab_size __lowerCamelCase = initializer_range __lowerCamelCase = layer_norm_eps __lowerCamelCase = rotary_value __lowerCamelCase = use_cache class UpperCamelCase_ ( __UpperCamelCase ): """simple docstring""" @property def lowerCamelCase_ ( self ): if self.task == "multiple-choice": __lowerCamelCase = {0: """batch""", 1: """choice""", 2: """sequence"""} else: __lowerCamelCase = {0: """batch""", 1: """sequence"""} __lowerCamelCase = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis), ] )
479
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType _a : Tuple = logging.get_logger(__name__) _a : int = { 'microsoft/layoutlmv3-base': 'https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json', } class UpperCamelCase_ ( __UpperCamelCase ): """simple docstring""" A = '''layoutlmv3''' def __init__( self , UpperCAmelCase=5_0_2_6_5 , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=5_1_2 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=1E-5 , UpperCAmelCase=1 , UpperCAmelCase=0 , UpperCAmelCase=2 , UpperCAmelCase=1_0_2_4 , UpperCAmelCase=1_2_8 , UpperCAmelCase=1_2_8 , UpperCAmelCase=True , UpperCAmelCase=3_2 , UpperCAmelCase=1_2_8 , UpperCAmelCase=6_4 , UpperCAmelCase=2_5_6 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=2_2_4 , UpperCAmelCase=3 , UpperCAmelCase=1_6 , UpperCAmelCase=None , **UpperCAmelCase , ): super().__init__( vocab_size=UpperCAmelCase , hidden_size=UpperCAmelCase , num_hidden_layers=UpperCAmelCase , num_attention_heads=UpperCAmelCase , intermediate_size=UpperCAmelCase , hidden_act=UpperCAmelCase , hidden_dropout_prob=UpperCAmelCase , attention_probs_dropout_prob=UpperCAmelCase , max_position_embeddings=UpperCAmelCase , type_vocab_size=UpperCAmelCase , initializer_range=UpperCAmelCase , layer_norm_eps=UpperCAmelCase , pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase , ) __lowerCamelCase = max_ad_position_embeddings __lowerCamelCase = coordinate_size __lowerCamelCase = shape_size __lowerCamelCase = has_relative_attention_bias __lowerCamelCase = rel_pos_bins __lowerCamelCase = max_rel_pos __lowerCamelCase = has_spatial_attention_bias __lowerCamelCase = rel_ad_pos_bins __lowerCamelCase = max_rel_ad_pos __lowerCamelCase = text_embed __lowerCamelCase = visual_embed __lowerCamelCase = input_size __lowerCamelCase = num_channels __lowerCamelCase = patch_size __lowerCamelCase = classifier_dropout class UpperCamelCase_ ( __UpperCamelCase ): """simple docstring""" A = version.parse('''1.12''' ) @property def lowerCamelCase_ ( self ): # The order of inputs is different for question answering and sequence classification if self.task in ["question-answering", "sequence-classification"]: return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ("""bbox""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) else: return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""bbox""", {0: """batch""", 1: """sequence"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels"""}), ] ) @property def lowerCamelCase_ ( self ): return 1E-5 @property def lowerCamelCase_ ( self ): return 1_2 def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase = -1 , UpperCAmelCase = -1 , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = 3 , UpperCAmelCase = 4_0 , UpperCAmelCase = 4_0 , ): setattr(processor.image_processor , """apply_ocr""" , UpperCAmelCase ) # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX __lowerCamelCase = compute_effective_axis_dimension( UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX __lowerCamelCase = processor.tokenizer.num_special_tokens_to_add(UpperCAmelCase ) __lowerCamelCase = compute_effective_axis_dimension( UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCAmelCase ) # Generate dummy inputs according to compute batch and sequence __lowerCamelCase = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size # Generate dummy bounding boxes __lowerCamelCase = [[[4_8, 8_4, 7_3, 1_2_8]]] * batch_size # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX # batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) __lowerCamelCase = self._generate_dummy_images(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) __lowerCamelCase = dict( processor( UpperCAmelCase , text=UpperCAmelCase , boxes=UpperCAmelCase , return_tensors=UpperCAmelCase , ) ) return inputs
479
1
'''simple docstring''' import json import os import shutil import tempfile import unittest from multiprocessing import get_context from pathlib import Path import datasets import numpy as np from datasets import load_dataset from parameterized import parameterized from transformers import AutoProcessor from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available from ..wavaveca.test_feature_extraction_wavaveca import floats_list if is_pyctcdecode_available(): from huggingface_hub import snapshot_download from pyctcdecode import BeamSearchDecoderCTC from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput if is_torch_available(): from transformers import WavaVecaForCTC @require_pyctcdecode class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase__ ( self : Any ) ->int: UpperCAmelCase_ = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split() UpperCAmelCase_ = dict(zip(_a , range(len(_a ) ) ) ) UpperCAmelCase_ = { '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>''', } UpperCAmelCase_ = { '''feature_size''': 1, '''padding_value''': 0.0, '''sampling_rate''': 1_6000, '''return_attention_mask''': False, '''do_normalize''': True, } UpperCAmelCase_ = tempfile.mkdtemp() UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) UpperCAmelCase_ = os.path.join(self.tmpdirname , _a ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(_a ) + '''\n''' ) with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(_a ) + '''\n''' ) # load decoder from hub UpperCAmelCase_ = '''hf-internal-testing/ngram-beam-search-decoder''' def lowerCAmelCase__ ( self : Any , **UpperCAmelCase__ : Optional[Any] ) ->Dict: UpperCAmelCase_ = self.add_kwargs_tokens_map.copy() kwargs.update(_a ) return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **_a ) def lowerCAmelCase__ ( self : int , **UpperCAmelCase__ : List[Any] ) ->Optional[Any]: return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **_a ) def lowerCAmelCase__ ( self : Optional[int] , **UpperCAmelCase__ : Union[str, Any] ) ->List[str]: return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **_a ) def lowerCAmelCase__ ( self : Union[str, Any] ) ->List[Any]: shutil.rmtree(self.tmpdirname ) def lowerCAmelCase__ ( self : int ) ->Optional[Any]: UpperCAmelCase_ = self.get_tokenizer() UpperCAmelCase_ = self.get_feature_extractor() UpperCAmelCase_ = self.get_decoder() UpperCAmelCase_ = WavaVecaProcessorWithLM(tokenizer=_a , feature_extractor=_a , decoder=_a ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase_ = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname ) # tokenizer self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , _a ) # feature extractor self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , _a ) # decoder self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels ) self.assertEqual( processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , ) self.assertIsInstance(processor.decoder , _a ) def lowerCAmelCase__ ( self : Optional[int] ) ->str: UpperCAmelCase_ = WavaVecaProcessorWithLM( tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) processor.save_pretrained(self.tmpdirname ) # make sure that error is thrown when decoder alphabet doesn't match UpperCAmelCase_ = WavaVecaProcessorWithLM.from_pretrained( self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 ) # decoder self.assertEqual(processor.language_model.alpha , 5.0 ) self.assertEqual(processor.language_model.beta , 3.0 ) self.assertEqual(processor.language_model.score_boundary , -7.0 ) self.assertEqual(processor.language_model.unk_score_offset , 3 ) def lowerCAmelCase__ ( self : Optional[int] ) ->int: UpperCAmelCase_ = self.get_tokenizer() # add token to trigger raise tokenizer.add_tokens(['''xx'''] ) with self.assertRaisesRegex(_a , '''include''' ): WavaVecaProcessorWithLM( tokenizer=_a , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) def lowerCAmelCase__ ( self : Optional[int] ) ->int: UpperCAmelCase_ = self.get_feature_extractor() UpperCAmelCase_ = self.get_tokenizer() UpperCAmelCase_ = self.get_decoder() UpperCAmelCase_ = WavaVecaProcessorWithLM(tokenizer=_a , feature_extractor=_a , decoder=_a ) UpperCAmelCase_ = floats_list((3, 1000) ) UpperCAmelCase_ = feature_extractor(_a , return_tensors='''np''' ) UpperCAmelCase_ = processor(_a , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def lowerCAmelCase__ ( self : List[str] ) ->List[Any]: UpperCAmelCase_ = self.get_feature_extractor() UpperCAmelCase_ = self.get_tokenizer() UpperCAmelCase_ = self.get_decoder() UpperCAmelCase_ = WavaVecaProcessorWithLM(tokenizer=_a , feature_extractor=_a , decoder=_a ) UpperCAmelCase_ = '''This is a test string''' UpperCAmelCase_ = processor(text=_a ) UpperCAmelCase_ = tokenizer(_a ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowerCAmelCase__ ( self : Union[str, Any] , UpperCAmelCase__ : str=(2, 10, 16) , UpperCAmelCase__ : Any=77 ) ->List[Any]: np.random.seed(_a ) return np.random.rand(*_a ) def lowerCAmelCase__ ( self : Union[str, Any] ) ->List[str]: UpperCAmelCase_ = self.get_feature_extractor() UpperCAmelCase_ = self.get_tokenizer() UpperCAmelCase_ = self.get_decoder() UpperCAmelCase_ = WavaVecaProcessorWithLM(tokenizer=_a , feature_extractor=_a , decoder=_a ) UpperCAmelCase_ = self._get_dummy_logits(shape=(10, 16) , seed=13 ) UpperCAmelCase_ = processor.decode(_a ) UpperCAmelCase_ = decoder.decode_beams(_a )[0] self.assertEqual(decoded_decoder[0] , decoded_processor.text ) self.assertEqual('''</s> <s> </s>''' , decoded_processor.text ) self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score ) self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score ) @parameterized.expand([[None], ['''fork'''], ['''spawn''']] ) def lowerCAmelCase__ ( self : Any , UpperCAmelCase__ : Optional[Any] ) ->List[str]: UpperCAmelCase_ = self.get_feature_extractor() UpperCAmelCase_ = self.get_tokenizer() UpperCAmelCase_ = self.get_decoder() UpperCAmelCase_ = WavaVecaProcessorWithLM(tokenizer=_a , feature_extractor=_a , decoder=_a ) UpperCAmelCase_ = self._get_dummy_logits() # note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM. # otherwise, the LM won't be available to the pool's sub-processes. # manual logic used to allow parameterized test for both pool=None and pool=Pool(...) if pool_context is None: UpperCAmelCase_ = processor.batch_decode(_a ) else: with get_context(_a ).Pool() as pool: UpperCAmelCase_ = processor.batch_decode(_a , _a ) UpperCAmelCase_ = list(_a ) with get_context('''fork''' ).Pool() as p: UpperCAmelCase_ = decoder.decode_beams_batch(_a , _a ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = [], [], [] for beams in decoded_beams: texts_decoder.append(beams[0][0] ) logit_scores_decoder.append(beams[0][-2] ) lm_scores_decoder.append(beams[0][-1] ) self.assertListEqual(_a , decoded_processor.text ) self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text ) self.assertListEqual(_a , decoded_processor.logit_score ) self.assertListEqual(_a , decoded_processor.lm_score ) def lowerCAmelCase__ ( self : Tuple ) ->Tuple: UpperCAmelCase_ = self.get_feature_extractor() UpperCAmelCase_ = self.get_tokenizer() UpperCAmelCase_ = self.get_decoder() UpperCAmelCase_ = WavaVecaProcessorWithLM(tokenizer=_a , feature_extractor=_a , decoder=_a ) UpperCAmelCase_ = self._get_dummy_logits() UpperCAmelCase_ = 15 UpperCAmelCase_ = -20.0 UpperCAmelCase_ = -4.0 UpperCAmelCase_ = processor.batch_decode( _a , beam_width=_a , beam_prune_logp=_a , token_min_logp=_a , ) UpperCAmelCase_ = decoded_processor_out.text UpperCAmelCase_ = list(_a ) with get_context('''fork''' ).Pool() as pool: UpperCAmelCase_ = decoder.decode_beams_batch( _a , _a , beam_width=_a , beam_prune_logp=_a , token_min_logp=_a , ) UpperCAmelCase_ = [d[0][0] for d in decoded_decoder_out] UpperCAmelCase_ = [d[0][2] for d in decoded_decoder_out] UpperCAmelCase_ = [d[0][3] for d in decoded_decoder_out] self.assertListEqual(_a , _a ) self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , _a ) self.assertTrue(np.array_equal(_a , decoded_processor_out.logit_score ) ) self.assertTrue(np.allclose([-20.054, -18.447] , _a , atol=1e-3 ) ) self.assertTrue(np.array_equal(_a , decoded_processor_out.lm_score ) ) self.assertTrue(np.allclose([-15.554, -13.9474] , _a , atol=1e-3 ) ) def lowerCAmelCase__ ( self : Tuple ) ->List[Any]: UpperCAmelCase_ = self.get_feature_extractor() UpperCAmelCase_ = self.get_tokenizer() UpperCAmelCase_ = self.get_decoder() UpperCAmelCase_ = WavaVecaProcessorWithLM(tokenizer=_a , feature_extractor=_a , decoder=_a ) UpperCAmelCase_ = self._get_dummy_logits() UpperCAmelCase_ = 2.0 UpperCAmelCase_ = 5.0 UpperCAmelCase_ = -20.0 UpperCAmelCase_ = True UpperCAmelCase_ = processor.batch_decode( _a , alpha=_a , beta=_a , unk_score_offset=_a , lm_score_boundary=_a , ) UpperCAmelCase_ = decoded_processor_out.text UpperCAmelCase_ = list(_a ) decoder.reset_params( alpha=_a , beta=_a , unk_score_offset=_a , lm_score_boundary=_a , ) with get_context('''fork''' ).Pool() as pool: UpperCAmelCase_ = decoder.decode_beams_batch( _a , _a , ) UpperCAmelCase_ = [d[0][0] for d in decoded_decoder_out] self.assertListEqual(_a , _a ) self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , _a ) UpperCAmelCase_ = processor.decoder.model_container[processor.decoder._model_key] self.assertEqual(lm_model.alpha , 2.0 ) self.assertEqual(lm_model.beta , 5.0 ) self.assertEqual(lm_model.unk_score_offset , -20.0 ) self.assertEqual(lm_model.score_boundary , _a ) def lowerCAmelCase__ ( self : str ) ->Tuple: UpperCAmelCase_ = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase_ = processor.decoder.model_container[processor.decoder._model_key] UpperCAmelCase_ = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() UpperCAmelCase_ = os.listdir(_a ) UpperCAmelCase_ = ['''alphabet.json''', '''language_model'''] downloaded_decoder_files.sort() expected_decoder_files.sort() # test that only decoder relevant files from # https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main # are downloaded and none of the rest (e.g. README.md, ...) self.assertListEqual(_a , _a ) def lowerCAmelCase__ ( self : Tuple ) ->int: UpperCAmelCase_ = snapshot_download('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase_ = WavaVecaProcessorWithLM.from_pretrained(_a ) UpperCAmelCase_ = processor.decoder.model_container[processor.decoder._model_key] UpperCAmelCase_ = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() UpperCAmelCase_ = os.listdir(_a ) UpperCAmelCase_ = os.listdir(_a ) local_decoder_files.sort() expected_decoder_files.sort() # test that both decoder form hub and local files in cache are the same self.assertListEqual(_a , _a ) def lowerCAmelCase__ ( self : Optional[Any] ) ->Dict: UpperCAmelCase_ = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase_ = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase_ = floats_list((3, 1000) ) UpperCAmelCase_ = processor_wavaveca(_a , return_tensors='''np''' ) UpperCAmelCase_ = processor_auto(_a , return_tensors='''np''' ) for key in input_wavaveca.keys(): self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 ) UpperCAmelCase_ = self._get_dummy_logits() UpperCAmelCase_ = processor_wavaveca.batch_decode(_a ) UpperCAmelCase_ = processor_auto.batch_decode(_a ) self.assertListEqual(decoded_wavaveca.text , decoded_auto.text ) def lowerCAmelCase__ ( self : List[str] ) ->int: UpperCAmelCase_ = self.get_feature_extractor() UpperCAmelCase_ = self.get_tokenizer() UpperCAmelCase_ = self.get_decoder() UpperCAmelCase_ = WavaVecaProcessorWithLM(tokenizer=_a , feature_extractor=_a , decoder=_a ) self.assertListEqual( processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , ) @staticmethod def lowerCAmelCase__ ( UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict ) ->List[str]: UpperCAmelCase_ = [d[key] for d in offsets] return retrieved_list def lowerCAmelCase__ ( self : Any ) ->List[str]: UpperCAmelCase_ = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase_ = self._get_dummy_logits()[0] UpperCAmelCase_ = processor.decode(_a , output_word_offsets=_a ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(_a , _a ) ) self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] ) def lowerCAmelCase__ ( self : Any ) ->Optional[Any]: UpperCAmelCase_ = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase_ = self._get_dummy_logits() UpperCAmelCase_ = processor.batch_decode(_a , output_word_offsets=_a ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(_a , _a ) ) self.assertListEqual( [''' '''.join(self.get_from_offsets(_a , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] ) @slow @require_torch @require_torchaudio def lowerCAmelCase__ ( self : Optional[int] ) ->Tuple: import torch UpperCAmelCase_ = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=_a ) UpperCAmelCase_ = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=1_6000 ) ) UpperCAmelCase_ = iter(_a ) UpperCAmelCase_ = next(_a ) UpperCAmelCase_ = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) UpperCAmelCase_ = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) # compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train UpperCAmelCase_ = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values with torch.no_grad(): UpperCAmelCase_ = model(_a ).logits.cpu().numpy() UpperCAmelCase_ = processor.decode(logits[0] , output_word_offsets=_a ) UpperCAmelCase_ = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate UpperCAmelCase_ = [ { '''start_time''': d['''start_offset'''] * time_offset, '''end_time''': d['''end_offset'''] * time_offset, '''word''': d['''word'''], } for d in output['''word_offsets'''] ] UpperCAmelCase_ = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL''' # output words self.assertEqual(''' '''.join(self.get_from_offsets(_a , '''word''' ) ) , _a ) self.assertEqual(''' '''.join(self.get_from_offsets(_a , '''word''' ) ) , output.text ) # output times UpperCAmelCase_ = torch.tensor(self.get_from_offsets(_a , '''start_time''' ) ) UpperCAmelCase_ = torch.tensor(self.get_from_offsets(_a , '''end_time''' ) ) # fmt: off UpperCAmelCase_ = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] ) UpperCAmelCase_ = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] ) # fmt: on self.assertTrue(torch.allclose(_a , _a , atol=0.01 ) ) self.assertTrue(torch.allclose(_a , _a , atol=0.01 ) )
720
'''simple docstring''' import gc import unittest import numpy as np import torch from diffusers import StableDiffusionKDiffusionPipeline from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() @slow @require_torch_gpu class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase__ ( self : Any ) ->Optional[int]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase__ ( self : str ) ->List[str]: UpperCAmelCase_ = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' ) UpperCAmelCase_ = sd_pipe.to(UpperCAmelCase__ ) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) sd_pipe.set_scheduler('''sample_euler''' ) UpperCAmelCase_ = '''A painting of a squirrel eating a burger''' UpperCAmelCase_ = torch.manual_seed(0 ) UpperCAmelCase_ = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' ) UpperCAmelCase_ = output.images UpperCAmelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) UpperCAmelCase_ = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def lowerCAmelCase__ ( self : List[str] ) ->int: UpperCAmelCase_ = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' ) UpperCAmelCase_ = sd_pipe.to(UpperCAmelCase__ ) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) sd_pipe.set_scheduler('''sample_euler''' ) UpperCAmelCase_ = '''A painting of a squirrel eating a burger''' UpperCAmelCase_ = torch.manual_seed(0 ) UpperCAmelCase_ = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' ) UpperCAmelCase_ = output.images UpperCAmelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) UpperCAmelCase_ = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1 def lowerCAmelCase__ ( self : List[Any] ) ->Optional[int]: UpperCAmelCase_ = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' ) UpperCAmelCase_ = sd_pipe.to(UpperCAmelCase__ ) sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) sd_pipe.set_scheduler('''sample_dpmpp_2m''' ) UpperCAmelCase_ = '''A painting of a squirrel eating a burger''' UpperCAmelCase_ = torch.manual_seed(0 ) UpperCAmelCase_ = sd_pipe( [prompt] , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=15 , output_type='''np''' , use_karras_sigmas=UpperCAmelCase__ , ) UpperCAmelCase_ = output.images UpperCAmelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) UpperCAmelCase_ = np.array( [0.1138_1689, 0.1211_2921, 0.138_9457, 0.1254_9606, 0.124_4964, 0.1083_1517, 0.1156_2866, 0.1086_7816, 0.1049_9048] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
43
0
'''simple docstring''' # Author: OMKAR PATHAK, Nwachukwu Chidiebere # Use a Python dictionary to construct the graph. from __future__ import annotations from pprint import pformat from typing import Generic, TypeVar lowercase__ : Optional[Any] = TypeVar("T") class lowerCamelCase ( Generic[T] ): '''simple docstring''' def __init__( self : Any , UpperCAmelCase__ : bool = True ) ->None: UpperCAmelCase_ = {} # dictionary of lists UpperCAmelCase_ = directed def lowerCAmelCase__ ( self : str , UpperCAmelCase__ : T , UpperCAmelCase__ : T ) ->GraphAdjacencyList[T]: if not self.directed: # For undirected graphs # if both source vertex and destination vertex are both present in the # adjacency list, add destination vertex to source vertex list of adjacent # vertices and add source vertex to destination vertex list of adjacent # vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(UpperCAmelCase__ ) self.adj_list[destination_vertex].append(UpperCAmelCase__ ) # if only source vertex is present in adjacency list, add destination vertex # to source vertex list of adjacent vertices, then create a new vertex with # destination vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(UpperCAmelCase__ ) UpperCAmelCase_ = [source_vertex] # if only destination vertex is present in adjacency list, add source vertex # to destination vertex list of adjacent vertices, then create a new vertex # with source vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif destination_vertex in self.adj_list: self.adj_list[destination_vertex].append(UpperCAmelCase__ ) UpperCAmelCase_ = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and assign a list # containing the destination vertex as it's first adjacent vertex also # create a new vertex with destination vertex as key and assign a list # containing the source vertex as it's first adjacent vertex. else: UpperCAmelCase_ = [destination_vertex] UpperCAmelCase_ = [source_vertex] else: # For directed graphs # if both source vertex and destination vertex are present in adjacency # list, add destination vertex to source vertex list of adjacent vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(UpperCAmelCase__ ) # if only source vertex is present in adjacency list, add destination # vertex to source vertex list of adjacent vertices and create a new vertex # with destination vertex as key, which has no adjacent vertex elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(UpperCAmelCase__ ) UpperCAmelCase_ = [] # if only destination vertex is present in adjacency list, create a new # vertex with source vertex as key and assign a list containing destination # vertex as first adjacent vertex elif destination_vertex in self.adj_list: UpperCAmelCase_ = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and a list containing # destination vertex as it's first adjacent vertex. Then create a new vertex # with destination vertex as key, which has no adjacent vertex else: UpperCAmelCase_ = [destination_vertex] UpperCAmelCase_ = [] return self def __repr__( self : Optional[Any] ) ->str: return pformat(self.adj_list )
390
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging lowercase__ : str = logging.get_logger(__name__) class lowerCamelCase ( lowerCamelCase ): '''simple docstring''' lowerCAmelCase__ = ['''pixel_values'''] def __init__( self : Dict , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Dict[str, int]] = None , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[int, float] = 1 / 255 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , **UpperCAmelCase__ : Any , ) ->None: super().__init__(**UpperCAmelCase__ ) UpperCAmelCase_ = size if size is not None else {'''shortest_edge''': 256} UpperCAmelCase_ = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ ) UpperCAmelCase_ = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} UpperCAmelCase_ = get_size_dict(UpperCAmelCase__ ) UpperCAmelCase_ = do_resize UpperCAmelCase_ = size UpperCAmelCase_ = resample UpperCAmelCase_ = do_center_crop UpperCAmelCase_ = crop_size UpperCAmelCase_ = do_rescale UpperCAmelCase_ = rescale_factor UpperCAmelCase_ = do_normalize UpperCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN UpperCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowerCAmelCase__ ( self : List[Any] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Optional[Any] , ) ->np.ndarray: UpperCAmelCase_ = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ ) if "shortest_edge" not in size: raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) UpperCAmelCase_ = get_resize_output_image_size(UpperCAmelCase__ , size=size['''shortest_edge'''] , default_to_square=UpperCAmelCase__ ) return resize(UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ ) def lowerCAmelCase__ ( self : List[Any] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : List[Any] , ) ->np.ndarray: UpperCAmelCase_ = get_size_dict(UpperCAmelCase__ ) return center_crop(UpperCAmelCase__ , size=(size['''height'''], size['''width''']) , data_format=UpperCAmelCase__ , **UpperCAmelCase__ ) def lowerCAmelCase__ ( self : str , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : float , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Tuple ) ->np.ndarray: return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ ) def lowerCAmelCase__ ( self : Dict , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[float, List[float]] , UpperCAmelCase__ : Union[float, List[float]] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Optional[int] , ) ->np.ndarray: return normalize(UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ ) def lowerCAmelCase__ ( self : Optional[Any] , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[float] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCAmelCase__ : List[Any] , ) ->int: UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize UpperCAmelCase_ = size if size is not None else self.size UpperCAmelCase_ = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ ) UpperCAmelCase_ = resample if resample is not None else self.resample UpperCAmelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCAmelCase_ = crop_size if crop_size is not None else self.crop_size UpperCAmelCase_ = get_size_dict(UpperCAmelCase__ ) UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean UpperCAmelCase_ = image_std if image_std is not None else self.image_std UpperCAmelCase_ = make_list_of_images(UpperCAmelCase__ ) if not valid_images(UpperCAmelCase__ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. UpperCAmelCase_ = [to_numpy_array(UpperCAmelCase__ ) for image in images] if do_resize: UpperCAmelCase_ = [self.resize(image=UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ ) for image in images] if do_center_crop: UpperCAmelCase_ = [self.center_crop(image=UpperCAmelCase__ , size=UpperCAmelCase__ ) for image in images] if do_rescale: UpperCAmelCase_ = [self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ ) for image in images] if do_normalize: UpperCAmelCase_ = [self.normalize(image=UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ ) for image in images] UpperCAmelCase_ = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images] UpperCAmelCase_ = {'''pixel_values''': images} return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
390
1
import os import tempfile import unittest from transformers import NezhaConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, ) from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCamelCase__ : def __init__( self : int , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any]=13 , UpperCamelCase__ : List[str]=7 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Optional[int]=99 , UpperCamelCase__ : Tuple=32 , UpperCamelCase__ : Tuple=5 , UpperCamelCase__ : List[str]=4 , UpperCamelCase__ : Optional[int]=37 , UpperCamelCase__ : Union[str, Any]="gelu" , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Optional[int]=128 , UpperCamelCase__ : Tuple=32 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : str=2 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Tuple=3 , UpperCamelCase__ : str=4 , UpperCamelCase__ : List[Any]=None , ): '''simple docstring''' lowercase_ = parent lowercase_ = batch_size lowercase_ = seq_length lowercase_ = is_training lowercase_ = use_input_mask lowercase_ = use_token_type_ids lowercase_ = use_labels lowercase_ = vocab_size lowercase_ = hidden_size lowercase_ = num_hidden_layers lowercase_ = num_attention_heads lowercase_ = intermediate_size lowercase_ = hidden_act lowercase_ = hidden_dropout_prob lowercase_ = attention_probs_dropout_prob lowercase_ = max_position_embeddings lowercase_ = type_vocab_size lowercase_ = type_sequence_label_size lowercase_ = initializer_range lowercase_ = num_labels lowercase_ = num_choices lowercase_ = scope def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase_ = None if self.use_input_mask: lowercase_ = random_attention_mask([self.batch_size, self.seq_length] ) lowercase_ = None if self.use_token_type_ids: lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase_ = None lowercase_ = None lowercase_ = None if self.use_labels: lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase_ = ids_tensor([self.batch_size] , self.num_choices ) lowercase_ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' return NezhaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , ) def UpperCAmelCase__ ( self : str ): '''simple docstring''' ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) = self.prepare_config_and_inputs() lowercase_ = True lowercase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) lowercase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def UpperCAmelCase__ ( self : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] ): '''simple docstring''' lowercase_ = NezhaModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) lowercase_ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) lowercase_ = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def UpperCAmelCase__ ( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Dict , ): '''simple docstring''' lowercase_ = True lowercase_ = NezhaModel(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , ) lowercase_ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , ) lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] ): '''simple docstring''' lowercase_ = NezhaForMaskedLM(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] ): '''simple docstring''' lowercase_ = NezhaForNextSentencePrediction(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] ): '''simple docstring''' lowercase_ = NezhaForPreTraining(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , next_sentence_label=UpperCamelCase__ , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Dict ): '''simple docstring''' lowercase_ = NezhaForQuestionAnswering(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int ): '''simple docstring''' lowercase_ = self.num_labels lowercase_ = NezhaForSequenceClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase__ ( self : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] ): '''simple docstring''' lowercase_ = self.num_labels lowercase_ = NezhaForTokenClassification(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] ): '''simple docstring''' lowercase_ = self.num_choices lowercase_ = NezhaForMultipleChoice(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowercase_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase_ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' lowercase_ = self.prepare_config_and_inputs() ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) = config_and_inputs lowercase_ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class UpperCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Optional[int] = ( ( NezhaModel, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, ) if is_torch_available() else () ) __SCREAMING_SNAKE_CASE : List[Any] = ( { 'feature-extraction': NezhaModel, 'fill-mask': NezhaForMaskedLM, 'question-answering': NezhaForQuestionAnswering, 'text-classification': NezhaForSequenceClassification, 'token-classification': NezhaForTokenClassification, 'zero-shot': NezhaForSequenceClassification, } if is_torch_available() else {} ) __SCREAMING_SNAKE_CASE : Dict = True def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any]=False ): '''simple docstring''' lowercase_ = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ ) if return_labels: if model_class in get_values(UpperCamelCase__ ): lowercase_ = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCamelCase__ ) lowercase_ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ ) return inputs_dict def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' lowercase_ = NezhaModelTester(self ) lowercase_ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 ) def UpperCAmelCase__ ( self : str ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : int ): '''simple docstring''' lowercase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def UpperCAmelCase__ ( self : str ): '''simple docstring''' lowercase_ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*UpperCamelCase__ ) def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) = self.model_tester.prepare_config_and_inputs_for_decoder() lowercase_ = None self.model_tester.create_and_check_model_as_decoder( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' lowercase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ ) def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' lowercase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ ) def UpperCAmelCase__ ( self : int ): '''simple docstring''' lowercase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_next_sequence_prediction(*UpperCamelCase__ ) def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' lowercase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*UpperCamelCase__ ) def UpperCAmelCase__ ( self : Any ): '''simple docstring''' lowercase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ ) def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' lowercase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ ) def UpperCAmelCase__ ( self : Any ): '''simple docstring''' lowercase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ ) @slow def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ = NezhaModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) @slow @require_torch_gpu def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # NezhaForMultipleChoice behaves incorrectly in JIT environments. if model_class == NezhaForMultipleChoice: return lowercase_ = True lowercase_ = model_class(config=UpperCamelCase__ ) lowercase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) lowercase_ = torch.jit.trace( UpperCamelCase__ , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(UpperCamelCase__ , os.path.join(UpperCamelCase__ , """bert.pt""" ) ) lowercase_ = torch.jit.load(os.path.join(UpperCamelCase__ , """bert.pt""" ) , map_location=UpperCamelCase__ ) loaded(inputs_dict["""input_ids"""].to(UpperCamelCase__ ) , inputs_dict["""attention_mask"""].to(UpperCamelCase__ ) ) @require_torch class UpperCamelCase__ ( unittest.TestCase ): @slow def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' lowercase_ = NezhaModel.from_pretrained("""sijunhe/nezha-cn-base""" ) lowercase_ = torch.tensor([[0, 1, 2, 3, 4, 5]] ) lowercase_ = torch.tensor([[0, 1, 1, 1, 1, 1]] ) with torch.no_grad(): lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )[0] lowercase_ = torch.Size((1, 6, 768) ) self.assertEqual(output.shape , UpperCamelCase__ ) lowercase_ = torch.tensor([[[0.0_685, 0.2_441, 0.1_102], [0.0_600, 0.1_906, 0.1_349], [0.0_221, 0.0_819, 0.0_586]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCamelCase__ , atol=1e-4 ) ) @slow def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' lowercase_ = NezhaForMaskedLM.from_pretrained("""sijunhe/nezha-cn-base""" ) lowercase_ = torch.tensor([[0, 1, 2, 3, 4, 5]] ) lowercase_ = torch.tensor([[1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )[0] lowercase_ = torch.Size((1, 6, 21_128) ) self.assertEqual(output.shape , UpperCamelCase__ ) lowercase_ = torch.tensor( [[-2.7_939, -1.7_902, -2.2_189], [-2.8_585, -1.8_908, -2.3_723], [-2.6_499, -1.7_750, -2.2_558]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCamelCase__ , atol=1e-4 ) )
708
import io import math from typing import Dict, Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, get_image_size, infer_channel_dimension_format, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_vision_available, logging from ...utils.import_utils import requires_backends if is_vision_available(): import textwrap from PIL import Image, ImageDraw, ImageFont if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: a = False a = logging.get_logger(__name__) a = 'ybelkada/fonts' def UpperCAmelCase_ ( ): if is_torch_available() and not is_torch_greater_or_equal_than_1_11: raise ImportError( F'''You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use ''' """Pix2StructImageProcessor. Please upgrade torch.""" ) def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): requires_backends(UpperCAmelCase__ , ["""torch"""] ) _check_torch_version() lowercase_ = image_tensor.unsqueeze(0 ) lowercase_ = torch.nn.functional.unfold(UpperCAmelCase__ , (patch_height, patch_width) , stride=(patch_height, patch_width) ) lowercase_ = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , UpperCAmelCase__ , UpperCAmelCase__ , -1 ) lowercase_ = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape( image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , ) return patches.unsqueeze(0 ) def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ = 3_6 , UpperCAmelCase__ = "black" , UpperCAmelCase__ = "white" , UpperCAmelCase__ = 5 , UpperCAmelCase__ = 5 , UpperCAmelCase__ = 5 , UpperCAmelCase__ = 5 , UpperCAmelCase__ = None , UpperCAmelCase__ = None , ): requires_backends(UpperCAmelCase__ , """vision""" ) # Add new lines so that each line is no more than 80 characters. lowercase_ = textwrap.TextWrapper(width=8_0 ) lowercase_ = wrapper.wrap(text=UpperCAmelCase__ ) lowercase_ = """\n""".join(UpperCAmelCase__ ) if font_bytes is not None and font_path is None: lowercase_ = io.BytesIO(UpperCAmelCase__ ) elif font_path is not None: lowercase_ = font_path else: lowercase_ = hf_hub_download(UpperCAmelCase__ , """Arial.TTF""" ) lowercase_ = ImageFont.truetype(UpperCAmelCase__ , encoding="""UTF-8""" , size=UpperCAmelCase__ ) # Use a temporary canvas to determine the width and height in pixels when # rendering the text. lowercase_ = ImageDraw.Draw(Image.new("""RGB""" , (1, 1) , UpperCAmelCase__ ) ) lowercase_ , lowercase_ , lowercase_ , lowercase_ = temp_draw.textbbox((0, 0) , UpperCAmelCase__ , UpperCAmelCase__ ) # Create the actual image with a bit of padding around the text. lowercase_ = text_width + left_padding + right_padding lowercase_ = text_height + top_padding + bottom_padding lowercase_ = Image.new("""RGB""" , (image_width, image_height) , UpperCAmelCase__ ) lowercase_ = ImageDraw.Draw(UpperCAmelCase__ ) draw.text(xy=(left_padding, top_padding) , text=UpperCAmelCase__ , fill=UpperCAmelCase__ , font=UpperCAmelCase__ ) return image def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ): requires_backends(UpperCAmelCase__ , """vision""" ) # Convert to PIL image if necessary lowercase_ = to_pil_image(UpperCAmelCase__ ) lowercase_ = render_text(UpperCAmelCase__ , **UpperCAmelCase__ ) lowercase_ = max(header_image.width , image.width ) lowercase_ = int(image.height * (new_width / image.width) ) lowercase_ = int(header_image.height * (new_width / header_image.width) ) lowercase_ = Image.new("""RGB""" , (new_width, new_height + new_header_height) , """white""" ) new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) ) new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) ) # Convert back to the original framework if necessary lowercase_ = to_numpy_array(UpperCAmelCase__ ) if infer_channel_dimension_format(UpperCAmelCase__ ) == ChannelDimension.LAST: lowercase_ = to_channel_dimension_format(UpperCAmelCase__ , ChannelDimension.LAST ) return new_image class UpperCamelCase__ ( __magic_name__ ): __SCREAMING_SNAKE_CASE : Tuple = ['flattened_patches'] def __init__( self : str , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : int = 2_048 , UpperCamelCase__ : bool = False , **UpperCamelCase__ : Optional[int] , ): '''simple docstring''' super().__init__(**UpperCamelCase__ ) lowercase_ = patch_size if patch_size is not None else {"""height""": 16, """width""": 16} lowercase_ = do_normalize lowercase_ = do_convert_rgb lowercase_ = max_patches lowercase_ = is_vqa def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : int , UpperCamelCase__ : dict , **UpperCamelCase__ : Optional[int] ): '''simple docstring''' requires_backends(self.extract_flattened_patches , """torch""" ) _check_torch_version() # convert to torch lowercase_ = to_channel_dimension_format(UpperCamelCase__ , ChannelDimension.FIRST ) lowercase_ = torch.from_numpy(UpperCamelCase__ ) lowercase_ , lowercase_ = patch_size["""height"""], patch_size["""width"""] lowercase_ , lowercase_ = get_image_size(UpperCamelCase__ ) # maximize scale s.t. lowercase_ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) ) lowercase_ = max(min(math.floor(scale * image_height / patch_height ) , UpperCamelCase__ ) , 1 ) lowercase_ = max(min(math.floor(scale * image_width / patch_width ) , UpperCamelCase__ ) , 1 ) lowercase_ = max(num_feasible_rows * patch_height , 1 ) lowercase_ = max(num_feasible_cols * patch_width , 1 ) lowercase_ = torch.nn.functional.interpolate( image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="""bilinear""" , align_corners=UpperCamelCase__ , antialias=UpperCamelCase__ , ).squeeze(0 ) # [1, rows, columns, patch_height * patch_width * image_channels] lowercase_ = torch_extract_patches(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) lowercase_ = patches.shape lowercase_ = patches_shape[1] lowercase_ = patches_shape[2] lowercase_ = patches_shape[3] # [rows * columns, patch_height * patch_width * image_channels] lowercase_ = patches.reshape([rows * columns, depth] ) # [rows * columns, 1] lowercase_ = torch.arange(UpperCamelCase__ ).reshape([rows, 1] ).repeat(1 , UpperCamelCase__ ).reshape([rows * columns, 1] ) lowercase_ = torch.arange(UpperCamelCase__ ).reshape([1, columns] ).repeat(UpperCamelCase__ , 1 ).reshape([rows * columns, 1] ) # Offset by 1 so the ids do not contain zeros, which represent padding. row_ids += 1 col_ids += 1 # Prepare additional patch features. # [rows * columns, 1] lowercase_ = row_ids.to(torch.floataa ) lowercase_ = col_ids.to(torch.floataa ) # [rows * columns, 2 + patch_height * patch_width * image_channels] lowercase_ = torch.cat([row_ids, col_ids, patches] , -1 ) # [max_patches, 2 + patch_height * patch_width * image_channels] lowercase_ = torch.nn.functional.pad(UpperCamelCase__ , [0, 0, 0, max_patches - (rows * columns)] ).float() lowercase_ = to_numpy_array(UpperCamelCase__ ) return result def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Dict ): '''simple docstring''' if image.dtype == np.uinta: lowercase_ = image.astype(np.floataa ) # take mean across the whole `image` lowercase_ = np.mean(UpperCamelCase__ ) lowercase_ = np.std(UpperCamelCase__ ) lowercase_ = max(UpperCamelCase__ , 1.0 / math.sqrt(np.prod(image.shape ) ) ) return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , **UpperCamelCase__ ) def UpperCAmelCase__ ( self : str , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Union[str, Any] , ): '''simple docstring''' lowercase_ = do_normalize if do_normalize is not None else self.do_normalize lowercase_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb lowercase_ = patch_size if patch_size is not None else self.patch_size lowercase_ = max_patches if max_patches is not None else self.max_patches lowercase_ = self.is_vqa if kwargs.get("""data_format""" , UpperCamelCase__ ) is not None: raise ValueError("""data_format is not an accepted input as the outputs are """ ) lowercase_ = make_list_of_images(UpperCamelCase__ ) if not valid_images(UpperCamelCase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: lowercase_ = [convert_to_rgb(UpperCamelCase__ ) for image in images] # All transformations expect numpy arrays. lowercase_ = [to_numpy_array(UpperCamelCase__ ) for image in images] if is_vqa: if header_text is None: raise ValueError("""A header text must be provided for VQA models.""" ) lowercase_ = kwargs.pop("""font_bytes""" , UpperCamelCase__ ) lowercase_ = kwargs.pop("""font_path""" , UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ): lowercase_ = [header_text] * len(UpperCamelCase__ ) lowercase_ = [ render_header(UpperCamelCase__ , header_text[i] , font_bytes=UpperCamelCase__ , font_path=UpperCamelCase__ ) for i, image in enumerate(UpperCamelCase__ ) ] if do_normalize: lowercase_ = [self.normalize(image=UpperCamelCase__ ) for image in images] # convert to torch tensor and permute lowercase_ = [ self.extract_flattened_patches(image=UpperCamelCase__ , max_patches=UpperCamelCase__ , patch_size=UpperCamelCase__ ) for image in images ] # create attention mask in numpy lowercase_ = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images] lowercase_ = BatchFeature( data={"""flattened_patches""": images, """attention_mask""": attention_masks} , tensor_type=UpperCamelCase__ ) return encoded_outputs
650
0
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A__ = logging.get_logger(__name__) A__ = { '''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''', # See all LeViT models at https://huggingface.co/models?filter=levit } class a ( __lowerCamelCase ): __lowerCAmelCase : Any = """levit""" def __init__( self :int ,__lowercase :Union[str, Any]=2_2_4 ,__lowercase :Tuple=3 ,__lowercase :Optional[Any]=3 ,__lowercase :Optional[int]=2 ,__lowercase :List[Any]=1 ,__lowercase :List[str]=1_6 ,__lowercase :Any=[1_2_8, 2_5_6, 3_8_4] ,__lowercase :Dict=[4, 8, 1_2] ,__lowercase :Optional[Any]=[4, 4, 4] ,__lowercase :Any=[1_6, 1_6, 1_6] ,__lowercase :Dict=0 ,__lowercase :List[str]=[2, 2, 2] ,__lowercase :Any=[2, 2, 2] ,__lowercase :Optional[Any]=0.02 ,**__lowercase :Dict ,): super().__init__(**__lowercase ) snake_case__ : Tuple = image_size snake_case__ : int = num_channels snake_case__ : Any = kernel_size snake_case__ : str = stride snake_case__ : Optional[int] = padding snake_case__ : Optional[Any] = hidden_sizes snake_case__ : List[Any] = num_attention_heads snake_case__ : List[str] = depths snake_case__ : Dict = key_dim snake_case__ : str = drop_path_rate snake_case__ : List[Any] = patch_size snake_case__ : Tuple = attention_ratio snake_case__ : Tuple = mlp_ratio snake_case__ : Any = initializer_range snake_case__ : Dict = [ ['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] class a ( __lowerCamelCase ): __lowerCAmelCase : List[Any] = version.parse("""1.11""" ) @property def __lowerCamelCase ( self :Dict ): return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def __lowerCamelCase ( self :Dict ): return 1e-4
252
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices A__ = logging.get_logger(__name__) A__ = { '''shi-labs/nat-mini-in1k-224''': '''https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json''', # See all Nat models at https://huggingface.co/models?filter=nat } class a ( __lowerCamelCase , __lowerCamelCase ): __lowerCAmelCase : List[str] = """nat""" __lowerCAmelCase : Union[str, Any] = { """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self :List[str] ,__lowercase :int=4 ,__lowercase :int=3 ,__lowercase :Optional[int]=6_4 ,__lowercase :Tuple=[3, 4, 6, 5] ,__lowercase :List[Any]=[2, 4, 8, 1_6] ,__lowercase :Optional[int]=7 ,__lowercase :Optional[int]=3.0 ,__lowercase :List[Any]=True ,__lowercase :List[str]=0.0 ,__lowercase :Optional[Any]=0.0 ,__lowercase :Tuple=0.1 ,__lowercase :Union[str, Any]="gelu" ,__lowercase :str=0.02 ,__lowercase :Optional[Any]=1e-5 ,__lowercase :Optional[Any]=0.0 ,__lowercase :List[str]=None ,__lowercase :List[Any]=None ,**__lowercase :Optional[Any] ,): super().__init__(**__lowercase ) snake_case__ : str = patch_size snake_case__ : str = num_channels snake_case__ : Dict = embed_dim snake_case__ : List[Any] = depths snake_case__ : Any = len(__lowercase ) snake_case__ : List[str] = num_heads snake_case__ : Dict = kernel_size snake_case__ : Optional[int] = mlp_ratio snake_case__ : Optional[int] = qkv_bias snake_case__ : Tuple = hidden_dropout_prob snake_case__ : Optional[Any] = attention_probs_dropout_prob snake_case__ : Union[str, Any] = drop_path_rate snake_case__ : int = hidden_act snake_case__ : Dict = layer_norm_eps snake_case__ : str = initializer_range # we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model snake_case__ : List[Any] = int(embed_dim * 2 ** (len(__lowercase ) - 1) ) snake_case__ : Union[str, Any] = layer_scale_init_value snake_case__ : Tuple = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 ,len(__lowercase ) + 1 )] snake_case__ , snake_case__ : Optional[int] = get_aligned_output_features_output_indices( out_features=__lowercase ,out_indices=__lowercase ,stage_names=self.stage_names )
252
1
import ast import os import re import shutil import tempfile import unittest from unittest import mock import torch from accelerate.test_utils.examples import compare_against_test from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow from accelerate.utils import write_basic_config # DataLoaders built from `test_samples/MRPC` for quick testing # Should mock `{script_name}.get_dataloaders` via: # @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders) a : Dict = [ """cross_validation.py""", """gradient_accumulation.py""", """local_sgd.py""", """multi_process_metrics.py""", """memory.py""", """automatic_gradient_accumulation.py""", """fsdp_with_peak_mem_tracking.py""", """deepspeed_with_config_support.py""", """megatron_lm_gpt_pretraining.py""", ] class __UpperCAmelCase( unittest.TestCase ): """simple docstring""" def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = None ): '''simple docstring''' lowercase__ : int= None lowercase__ : Tuple= os.path.abspath(os.path.join("examples" , "by_feature" ) ) lowercase__ : List[Any]= os.path.abspath("examples" ) for item in os.listdir(_UpperCAmelCase ): if item not in EXCLUDE_EXAMPLES: lowercase__ : List[str]= os.path.join(_UpperCAmelCase , _UpperCAmelCase ) if os.path.isfile(_UpperCAmelCase ) and ".py" in item_path: with self.subTest( tested_script=_UpperCAmelCase , feature_script=_UpperCAmelCase , tested_section="main()" if parser_only else "training_function()" , ): lowercase__ : Tuple= compare_against_test( os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) lowercase__ : List[str]= "\n".join(_UpperCAmelCase ) if special_strings is not None: for string in special_strings: lowercase__ : Any= diff.replace(_UpperCAmelCase , "" ) self.assertEqual(_UpperCAmelCase , "" ) def UpperCAmelCase_ ( self ): '''simple docstring''' self.one_complete_example("complete_nlp_example.py" , _UpperCAmelCase ) self.one_complete_example("complete_nlp_example.py" , _UpperCAmelCase ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Union[str, Any]= os.path.abspath(os.path.join("examples" , "cv_example.py" ) ) lowercase__ : List[Any]= [ " " * 16 + "{\n\n", " " * 20 + "\"accuracy\": eval_metric[\"accuracy\"],\n\n", " " * 20 + "\"f1\": eval_metric[\"f1\"],\n\n", " " * 20 + "\"train_loss\": total_loss.item() / len(train_dataloader),\n\n", " " * 20 + "\"epoch\": epoch,\n\n", " " * 16 + "},\n\n", " " * 16 + "step=epoch,\n", " " * 12, " " * 8 + "for step, batch in enumerate(active_dataloader):\n", ] self.one_complete_example("complete_cv_example.py" , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) self.one_complete_example("complete_cv_example.py" , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) @mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "1"} ) class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = False @classmethod def UpperCAmelCase_ ( cls ): '''simple docstring''' super().setUpClass() lowercase__ : Union[str, Any]= tempfile.mkdtemp() lowercase__ : List[str]= os.path.join(cls._tmpdir , "default_config.yml" ) write_basic_config(save_location=cls.configPath ) lowercase__ : int= ["accelerate", "launch", "--config_file", cls.configPath] @classmethod def UpperCAmelCase_ ( cls ): '''simple docstring''' super().tearDownClass() shutil.rmtree(cls._tmpdir ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : int= F''' examples/by_feature/checkpointing.py --checkpointing_steps epoch --output_dir {self.tmpdir} '''.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "epoch_0" ) ) ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Union[str, Any]= F''' examples/by_feature/checkpointing.py --checkpointing_steps 1 --output_dir {self.tmpdir} '''.split() lowercase__ : List[Any]= run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "step_2" ) ) ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= F''' examples/by_feature/checkpointing.py --resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )} '''.split() lowercase__ : Any= run_command(self._launch_args + testargs , return_stdout=_UpperCAmelCase ) self.assertNotIn("epoch 0:" , _UpperCAmelCase ) self.assertIn("epoch 1:" , _UpperCAmelCase ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= F''' examples/by_feature/checkpointing.py --resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )} '''.split() lowercase__ : Optional[Any]= run_command(self._launch_args + testargs , return_stdout=_UpperCAmelCase ) if torch.cuda.is_available(): lowercase__ : List[Any]= torch.cuda.device_count() else: lowercase__ : Tuple= 1 if num_processes > 1: self.assertNotIn("epoch 0:" , _UpperCAmelCase ) self.assertIn("epoch 1:" , _UpperCAmelCase ) else: self.assertIn("epoch 0:" , _UpperCAmelCase ) self.assertIn("epoch 1:" , _UpperCAmelCase ) @slow def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Any= "\n examples/by_feature/cross_validation.py\n --num_folds 2\n ".split() with mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "0"} ): lowercase__ : int= run_command(self._launch_args + testargs , return_stdout=_UpperCAmelCase ) lowercase__ : int= re.findall("({.+})" , _UpperCAmelCase ) lowercase__ : List[str]= [r for r in results if "accuracy" in r][-1] lowercase__ : Optional[int]= ast.literal_eval(_UpperCAmelCase ) self.assertGreaterEqual(results["accuracy"] , 0.75 ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Tuple= ["examples/by_feature/multi_process_metrics.py"] run_command(self._launch_args + testargs ) @require_trackers @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def UpperCAmelCase_ ( self ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdir: lowercase__ : Union[str, Any]= F''' examples/by_feature/tracking.py --with_tracking --project_dir {tmpdir} '''.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "tracking" ) ) ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Tuple= ["examples/by_feature/gradient_accumulation.py"] run_command(self._launch_args + testargs ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Union[str, Any]= ["examples/by_feature/local_sgd.py"] run_command(self._launch_args + testargs )
714
"""simple docstring""" def lowercase__(A ) ->list[int]: """simple docstring""" lowercase__ : List[str]= len(A ) for i in range(A ): for j in range(i + 1 , A ): if numbers[j] < numbers[i]: lowercase__, lowercase__ : List[str]= numbers[j], numbers[i] return numbers if __name__ == "__main__": a : Dict = input("""Enter numbers separated by a comma:\n""").strip() a : List[str] = [int(item) for item in user_input.split(""",""")] print(exchange_sort(unsorted))
85
0
'''simple docstring''' import os import unittest from transformers import BatchEncoding from transformers.models.bert.tokenization_bert import ( BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer from transformers.testing_utils import require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin class UpperCamelCase__ (a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = ProphetNetTokenizer _UpperCamelCase = False def UpperCamelCase_ ( self ): super().setUp() lowerCamelCase__ = [ """[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] lowerCamelCase__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) def UpperCamelCase_ ( self ,_lowerCAmelCase ): lowerCamelCase__ = """UNwant\u00E9d,running""" lowerCamelCase__ = """unwanted, running""" return input_text, output_text def UpperCamelCase_ ( self ): lowerCamelCase__ = self.tokenizer_class(self.vocab_file ) lowerCamelCase__ = tokenizer.tokenize("""UNwant\u00E9d,running""" ) self.assertListEqual(_lowerCAmelCase ,["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) ,[9, 6, 7, 12, 10, 11] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = BasicTokenizer() self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) ,["""ah""", """\u535A""", """\u63A8""", """zz"""] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = BasicTokenizer(do_lower_case=_lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) ,["""hello""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = BasicTokenizer(do_lower_case=_lowerCAmelCase ,strip_accents=_lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hällo""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""h\u00E9llo"""] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = BasicTokenizer(do_lower_case=_lowerCAmelCase ,strip_accents=_lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hallo""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = BasicTokenizer(do_lower_case=_lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hallo""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = BasicTokenizer(do_lower_case=_lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) ,["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = BasicTokenizer(do_lower_case=_lowerCAmelCase ,strip_accents=_lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = BasicTokenizer(do_lower_case=_lowerCAmelCase ,strip_accents=_lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = BasicTokenizer(do_lower_case=_lowerCAmelCase ,never_split=["""[UNK]"""] ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) ,["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] ) def UpperCamelCase_ ( self ): lowerCamelCase__ = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""] lowerCamelCase__ = {} for i, token in enumerate(_lowerCAmelCase ): lowerCamelCase__ = i lowerCamelCase__ = WordpieceTokenizer(vocab=_lowerCAmelCase ,unk_token="""[UNK]""" ) self.assertListEqual(tokenizer.tokenize("""""" ) ,[] ) self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) ,["""un""", """##want""", """##ed""", """runn""", """##ing"""] ) self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) ,["""[UNK]""", """runn""", """##ing"""] ) @require_torch def UpperCamelCase_ ( self ): lowerCamelCase__ = self.tokenizer_class.from_pretrained("""microsoft/prophetnet-large-uncased""" ) lowerCamelCase__ = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] lowerCamelCase__ = [10_37, 21_46, 2_04_23, 20_05, 76_80, 78_49, 39_89, 10_12, 1_02] lowerCamelCase__ = tokenizer(_lowerCAmelCase ,padding=_lowerCAmelCase ,return_tensors="""pt""" ) self.assertIsInstance(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = list(batch.input_ids.numpy()[0] ) self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase ) self.assertEqual((2, 9) ,batch.input_ids.shape ) self.assertEqual((2, 9) ,batch.attention_mask.shape ) def UpperCamelCase_ ( self ): self.assertTrue(_is_whitespace(""" """ ) ) self.assertTrue(_is_whitespace("""\t""" ) ) self.assertTrue(_is_whitespace("""\r""" ) ) self.assertTrue(_is_whitespace("""\n""" ) ) self.assertTrue(_is_whitespace("""\u00A0""" ) ) self.assertFalse(_is_whitespace("""A""" ) ) self.assertFalse(_is_whitespace("""-""" ) ) def UpperCamelCase_ ( self ): self.assertTrue(_is_control("""\u0005""" ) ) self.assertFalse(_is_control("""A""" ) ) self.assertFalse(_is_control(""" """ ) ) self.assertFalse(_is_control("""\t""" ) ) self.assertFalse(_is_control("""\r""" ) ) def UpperCamelCase_ ( self ): self.assertTrue(_is_punctuation("""-""" ) ) self.assertTrue(_is_punctuation("""$""" ) ) self.assertTrue(_is_punctuation("""`""" ) ) self.assertTrue(_is_punctuation(""".""" ) ) self.assertFalse(_is_punctuation("""A""" ) ) self.assertFalse(_is_punctuation(""" """ ) ) @slow def UpperCamelCase_ ( self ): lowerCamelCase__ = self.tokenizer_class.from_pretrained("""microsoft/prophetnet-large-uncased""" ) lowerCamelCase__ = tokenizer.encode("""sequence builders""" ,add_special_tokens=_lowerCAmelCase ) lowerCamelCase__ = tokenizer.encode("""multi-sequence build""" ,add_special_tokens=_lowerCAmelCase ) lowerCamelCase__ = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase ) lowerCamelCase__ = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase ,_lowerCAmelCase ) assert encoded_sentence == text + [1_02] assert encoded_pair == text + [1_02] + text_a + [1_02]
50
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase : int = { 'configuration_xmod': [ 'XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XmodConfig', 'XmodOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Tuple = [ 'XMOD_PRETRAINED_MODEL_ARCHIVE_LIST', 'XmodForCausalLM', 'XmodForMaskedLM', 'XmodForMultipleChoice', 'XmodForQuestionAnswering', 'XmodForSequenceClassification', 'XmodForTokenClassification', 'XmodModel', 'XmodPreTrainedModel', ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys UpperCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
50
1
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING __SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : List[str] = { 'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json', # See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr } class SCREAMING_SNAKE_CASE__ ( _A ): lowercase__ = "deformable_detr" lowercase__ = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=3 , __UpperCamelCase=300 , __UpperCamelCase=1024 , __UpperCamelCase=6 , __UpperCamelCase=1024 , __UpperCamelCase=8 , __UpperCamelCase=6 , __UpperCamelCase=1024 , __UpperCamelCase=8 , __UpperCamelCase=0.0 , __UpperCamelCase=True , __UpperCamelCase="relu" , __UpperCamelCase=256 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0_2 , __UpperCamelCase=1.0 , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase="sine" , __UpperCamelCase="resnet50" , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=4 , __UpperCamelCase=4 , __UpperCamelCase=4 , __UpperCamelCase=False , __UpperCamelCase=300 , __UpperCamelCase=False , __UpperCamelCase=1 , __UpperCamelCase=5 , __UpperCamelCase=2 , __UpperCamelCase=1 , __UpperCamelCase=1 , __UpperCamelCase=5 , __UpperCamelCase=2 , __UpperCamelCase=0.1 , __UpperCamelCase=0.2_5 , __UpperCamelCase=False , **__UpperCamelCase , ): '''simple docstring''' if backbone_config is not None and use_timm_backbone: raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" ) if not use_timm_backbone: if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) __a : Dict = CONFIG_MAPPING["resnet"](out_features=["""stage4"""] ) elif isinstance(__lowerCamelCase , __lowerCamelCase ): __a : int = backbone_config.get("""model_type""" ) __a : Optional[int] = CONFIG_MAPPING[backbone_model_type] __a : Dict = config_class.from_dict(__lowerCamelCase ) __a : Tuple = use_timm_backbone __a : Optional[int] = backbone_config __a : str = num_channels __a : Tuple = num_queries __a : str = max_position_embeddings __a : Tuple = d_model __a : List[Any] = encoder_ffn_dim __a : Optional[int] = encoder_layers __a : Optional[Any] = encoder_attention_heads __a : List[str] = decoder_ffn_dim __a : Optional[int] = decoder_layers __a : Union[str, Any] = decoder_attention_heads __a : List[str] = dropout __a : List[Any] = attention_dropout __a : int = activation_dropout __a : Dict = activation_function __a : Dict = init_std __a : int = init_xavier_std __a : Dict = encoder_layerdrop __a : str = auxiliary_loss __a : List[Any] = position_embedding_type __a : List[str] = backbone __a : Optional[int] = use_pretrained_backbone __a : Optional[Any] = dilation # deformable attributes __a : Dict = num_feature_levels __a : Optional[Any] = encoder_n_points __a : List[Any] = decoder_n_points __a : Tuple = two_stage __a : Optional[int] = two_stage_num_proposals __a : str = with_box_refine if two_stage is True and with_box_refine is False: raise ValueError("""If two_stage is True, with_box_refine must be True.""" ) # Hungarian matcher __a : str = class_cost __a : Union[str, Any] = bbox_cost __a : List[Any] = giou_cost # Loss coefficients __a : Optional[int] = mask_loss_coefficient __a : int = dice_loss_coefficient __a : Optional[Any] = bbox_loss_coefficient __a : int = giou_loss_coefficient __a : str = eos_coefficient __a : Dict = focal_alpha __a : List[Any] = disable_custom_kernels super().__init__(is_encoder_decoder=__lowerCamelCase , **__lowerCamelCase ) @property def __lowerCamelCase ( self ): '''simple docstring''' return self.encoder_attention_heads @property def __lowerCamelCase ( self ): '''simple docstring''' return self.d_model def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[int] = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: __a : Any = self.backbone_config.to_dict() __a : Any = self.__class__.model_type return output
717
'''simple docstring''' import warnings from functools import wraps from typing import Callable def _snake_case ( lowercase ) -> Callable: @wraps(lowercase ) def _inner_fn(*lowercase , **lowercase ): warnings.warn( (F"""'{fn.__name__}' is experimental and might be subject to breaking changes in the future.""") , lowercase , ) return fn(*lowercase , **lowercase ) return _inner_fn
697
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available _lowercase : Dict ={ """configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""], """tokenization_xlm""": ["""XLMTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Union[str, Any] =[ """XLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """XLMForMultipleChoice""", """XLMForQuestionAnswering""", """XLMForQuestionAnsweringSimple""", """XLMForSequenceClassification""", """XLMForTokenClassification""", """XLMModel""", """XLMPreTrainedModel""", """XLMWithLMHeadModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : int =[ """TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFXLMForMultipleChoice""", """TFXLMForQuestionAnsweringSimple""", """TFXLMForSequenceClassification""", """TFXLMForTokenClassification""", """TFXLMMainLayer""", """TFXLMModel""", """TFXLMPreTrainedModel""", """TFXLMWithLMHeadModel""", ] if TYPE_CHECKING: from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig from .tokenization_xlm import XLMTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm import ( XLM_PRETRAINED_MODEL_ARCHIVE_LIST, XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMPreTrainedModel, XLMWithLMHeadModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm import ( TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMForMultipleChoice, TFXLMForQuestionAnsweringSimple, TFXLMForSequenceClassification, TFXLMForTokenClassification, TFXLMMainLayer, TFXLMModel, TFXLMPreTrainedModel, TFXLMWithLMHeadModel, ) else: import sys _lowercase : Dict =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
364
import unittest from transformers import DebertaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, ) from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCamelCase_ ( snake_case__ ): def __init__( self : str , lowerCamelCase : Optional[int] , lowerCamelCase : str=13 , lowerCamelCase : Optional[Any]=7 , lowerCamelCase : List[Any]=True , lowerCamelCase : Dict=True , lowerCamelCase : List[Any]=True , lowerCamelCase : Any=True , lowerCamelCase : Optional[Any]=99 , lowerCamelCase : Union[str, Any]=32 , lowerCamelCase : Any=5 , lowerCamelCase : Tuple=4 , lowerCamelCase : Optional[int]=37 , lowerCamelCase : Optional[Any]="gelu" , lowerCamelCase : Optional[Any]=0.1 , lowerCamelCase : str=0.1 , lowerCamelCase : Tuple=5_12 , lowerCamelCase : Union[str, Any]=16 , lowerCamelCase : Union[str, Any]=2 , lowerCamelCase : Dict=0.02 , lowerCamelCase : Any=False , lowerCamelCase : int=True , lowerCamelCase : Optional[Any]="None" , lowerCamelCase : Optional[int]=3 , lowerCamelCase : List[Any]=4 , lowerCamelCase : int=None , ): lowerCamelCase_ : int = parent lowerCamelCase_ : List[Any] = batch_size lowerCamelCase_ : Dict = seq_length lowerCamelCase_ : Union[str, Any] = is_training lowerCamelCase_ : str = use_input_mask lowerCamelCase_ : int = use_token_type_ids lowerCamelCase_ : Tuple = use_labels lowerCamelCase_ : str = vocab_size lowerCamelCase_ : int = hidden_size lowerCamelCase_ : Optional[int] = num_hidden_layers lowerCamelCase_ : Optional[Any] = num_attention_heads lowerCamelCase_ : Any = intermediate_size lowerCamelCase_ : List[Any] = hidden_act lowerCamelCase_ : List[Any] = hidden_dropout_prob lowerCamelCase_ : List[Any] = attention_probs_dropout_prob lowerCamelCase_ : List[str] = max_position_embeddings lowerCamelCase_ : Tuple = type_vocab_size lowerCamelCase_ : str = type_sequence_label_size lowerCamelCase_ : Optional[int] = initializer_range lowerCamelCase_ : List[Any] = num_labels lowerCamelCase_ : str = num_choices lowerCamelCase_ : Dict = relative_attention lowerCamelCase_ : Optional[int] = position_biased_input lowerCamelCase_ : List[Any] = pos_att_type lowerCamelCase_ : Optional[int] = scope def __a ( self : List[Any] ): lowerCamelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase_ : Dict = None if self.use_input_mask: lowerCamelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) lowerCamelCase_ : str = None if self.use_token_type_ids: lowerCamelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase_ : Union[str, Any] = None lowerCamelCase_ : List[Any] = None lowerCamelCase_ : str = None if self.use_labels: lowerCamelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase_ : Any = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase_ : Optional[Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __a ( self : Tuple ): return DebertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def __a ( self : int ): lowerCamelCase_ : int = self.get_config() lowerCamelCase_ : Any = 3_00 return config def __a ( self : Optional[int] , lowerCamelCase : Dict ): self.parent.assertListEqual(list(result.loss.size() ) , [] ) def __a ( self : Dict , lowerCamelCase : Tuple , lowerCamelCase : List[Any] , lowerCamelCase : Tuple , lowerCamelCase : Any , lowerCamelCase : Tuple , lowerCamelCase : str , lowerCamelCase : List[str] ): lowerCamelCase_ : str = DebertaModel(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() lowerCamelCase_ : Optional[int] = model(lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase )[0] lowerCamelCase_ : Union[str, Any] = model(lowerCamelCase , token_type_ids=lowerCamelCase )[0] lowerCamelCase_ : List[Any] = model(lowerCamelCase )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def __a ( self : Optional[Any] , lowerCamelCase : Tuple , lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : str , lowerCamelCase : Any ): lowerCamelCase_ : Any = DebertaForMaskedLM(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() lowerCamelCase_ : Any = model(lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , labels=lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __a ( self : Tuple , lowerCamelCase : Any , lowerCamelCase : Any , lowerCamelCase : int , lowerCamelCase : str , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int] , lowerCamelCase : Tuple ): lowerCamelCase_ : List[str] = self.num_labels lowerCamelCase_ : str = DebertaForSequenceClassification(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() lowerCamelCase_ : List[str] = model(lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , labels=lowerCamelCase ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(lowerCamelCase ) def __a ( self : List[str] , lowerCamelCase : Dict , lowerCamelCase : Tuple , lowerCamelCase : str , lowerCamelCase : Optional[int] , lowerCamelCase : Tuple , lowerCamelCase : List[str] , lowerCamelCase : Optional[int] ): lowerCamelCase_ : List[Any] = self.num_labels lowerCamelCase_ : str = DebertaForTokenClassification(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() lowerCamelCase_ : List[str] = model(lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , labels=lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __a ( self : List[str] , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any] , lowerCamelCase : int , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any] , lowerCamelCase : List[str] , lowerCamelCase : str ): lowerCamelCase_ : int = DebertaForQuestionAnswering(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() lowerCamelCase_ : List[Any] = model( lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , start_positions=lowerCamelCase , end_positions=lowerCamelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __a ( self : int ): lowerCamelCase_ : Any = self.prepare_config_and_inputs() ( ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ) : Dict = config_and_inputs lowerCamelCase_ : str = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class UpperCamelCase_ ( snake_case__ , snake_case__ , unittest.TestCase ): _a : str = ( ( DebertaModel, DebertaForMaskedLM, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaForQuestionAnswering, ) if is_torch_available() else () ) _a : Any = ( { 'feature-extraction': DebertaModel, 'fill-mask': DebertaForMaskedLM, 'question-answering': DebertaForQuestionAnswering, 'text-classification': DebertaForSequenceClassification, 'token-classification': DebertaForTokenClassification, 'zero-shot': DebertaForSequenceClassification, } if is_torch_available() else {} ) _a : Optional[int] = True _a : List[str] = False _a : List[str] = False _a : List[str] = False _a : List[Any] = False def __a ( self : List[str] ): lowerCamelCase_ : List[Any] = DebertaModelTester(self ) lowerCamelCase_ : Any = ConfigTester(self , config_class=lowerCamelCase , hidden_size=37 ) def __a ( self : Any ): self.config_tester.run_common_tests() def __a ( self : List[str] ): lowerCamelCase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*lowerCamelCase ) def __a ( self : int ): lowerCamelCase_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*lowerCamelCase ) def __a ( self : Optional[int] ): lowerCamelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*lowerCamelCase ) def __a ( self : str ): lowerCamelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*lowerCamelCase ) def __a ( self : Union[str, Any] ): lowerCamelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*lowerCamelCase ) @slow def __a ( self : Optional[int] ): for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ : Optional[int] = DebertaModel.from_pretrained(lowerCamelCase ) self.assertIsNotNone(lowerCamelCase ) @require_torch @require_sentencepiece @require_tokenizers class UpperCamelCase_ ( unittest.TestCase ): @unittest.skip(reason='Model not available yet' ) def __a ( self : str ): pass @slow def __a ( self : Dict ): lowerCamelCase_ : List[str] = DebertaModel.from_pretrained('microsoft/deberta-base' ) lowerCamelCase_ : Optional[Any] = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] ) lowerCamelCase_ : List[str] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): lowerCamelCase_ : Dict = model(lowerCamelCase , attention_mask=lowerCamelCase )[0] # compare the actual values for a slice. lowerCamelCase_ : str = torch.tensor( [[[-0.5_986, -0.8_055, -0.8_462], [1.4_484, -0.9_348, -0.8_059], [0.3_123, 0.0_032, -1.4_131]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCamelCase , atol=1E-4 ) , F"{output[:, 1:4, 1:4]}" )
364
1
'''simple docstring''' import argparse import os from . import ( ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BART_PRETRAINED_MODEL_ARCHIVE_LIST, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, BartConfig, BertConfig, CamembertConfig, CTRLConfig, DistilBertConfig, DPRConfig, ElectraConfig, FlaubertConfig, GPTaConfig, LayoutLMConfig, LxmertConfig, OpenAIGPTConfig, RobertaConfig, TaConfig, TFAlbertForPreTraining, TFBartForConditionalGeneration, TFBartForSequenceClassification, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFCamembertForMaskedLM, TFCTRLLMHeadModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, TFElectraForPreTraining, TFFlaubertWithLMHeadModel, TFGPTaLMHeadModel, TFLayoutLMForMaskedLM, TFLxmertForPreTraining, TFLxmertVisualFeatureEncoder, TFOpenAIGPTLMHeadModel, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForSequenceClassification, TFTaForConditionalGeneration, TFTransfoXLLMHeadModel, TFWavaVecaModel, TFXLMRobertaForMaskedLM, TFXLMWithLMHeadModel, TFXLNetLMHeadModel, TransfoXLConfig, WavaVecaConfig, WavaVecaModel, XLMConfig, XLMRobertaConfig, XLNetConfig, is_torch_available, load_pytorch_checkpoint_in_tfa_model, ) from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging if is_torch_available(): import numpy as np import torch from . import ( AlbertForPreTraining, BartForConditionalGeneration, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, CamembertForMaskedLM, CTRLLMHeadModel, DistilBertForMaskedLM, DistilBertForQuestionAnswering, DPRContextEncoder, DPRQuestionEncoder, DPRReader, ElectraForPreTraining, FlaubertWithLMHeadModel, GPTaLMHeadModel, LayoutLMForMaskedLM, LxmertForPreTraining, LxmertVisualFeatureEncoder, OpenAIGPTLMHeadModel, RobertaForMaskedLM, RobertaForSequenceClassification, TaForConditionalGeneration, TransfoXLLMHeadModel, XLMRobertaForMaskedLM, XLMWithLMHeadModel, XLNetLMHeadModel, ) logging.set_verbosity_info() lowercase_ : Optional[int] = { '''bart''': ( BartConfig, TFBartForConditionalGeneration, TFBartForSequenceClassification, BartForConditionalGeneration, BART_PRETRAINED_MODEL_ARCHIVE_LIST, ), '''bert''': ( BertConfig, TFBertForPreTraining, BertForPreTraining, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''bert-large-uncased-whole-word-masking-finetuned-squad''': ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''bert-large-cased-whole-word-masking-finetuned-squad''': ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''bert-base-cased-finetuned-mrpc''': ( BertConfig, TFBertForSequenceClassification, BertForSequenceClassification, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''dpr''': ( DPRConfig, TFDPRQuestionEncoder, TFDPRContextEncoder, TFDPRReader, DPRQuestionEncoder, DPRContextEncoder, DPRReader, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ), '''gpt2''': ( GPTaConfig, TFGPTaLMHeadModel, GPTaLMHeadModel, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''xlnet''': ( XLNetConfig, TFXLNetLMHeadModel, XLNetLMHeadModel, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''xlm''': ( XLMConfig, TFXLMWithLMHeadModel, XLMWithLMHeadModel, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''xlm-roberta''': ( XLMRobertaConfig, TFXLMRobertaForMaskedLM, XLMRobertaForMaskedLM, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''transfo-xl''': ( TransfoXLConfig, TFTransfoXLLMHeadModel, TransfoXLLMHeadModel, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''openai-gpt''': ( OpenAIGPTConfig, TFOpenAIGPTLMHeadModel, OpenAIGPTLMHeadModel, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''roberta''': ( RobertaConfig, TFRobertaForCausalLM, TFRobertaForMaskedLM, RobertaForMaskedLM, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''layoutlm''': ( LayoutLMConfig, TFLayoutLMForMaskedLM, LayoutLMForMaskedLM, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, ), '''roberta-large-mnli''': ( RobertaConfig, TFRobertaForSequenceClassification, RobertaForSequenceClassification, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''camembert''': ( CamembertConfig, TFCamembertForMaskedLM, CamembertForMaskedLM, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''flaubert''': ( FlaubertConfig, TFFlaubertWithLMHeadModel, FlaubertWithLMHeadModel, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''distilbert''': ( DistilBertConfig, TFDistilBertForMaskedLM, DistilBertForMaskedLM, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''distilbert-base-distilled-squad''': ( DistilBertConfig, TFDistilBertForQuestionAnswering, DistilBertForQuestionAnswering, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''lxmert''': ( LxmertConfig, TFLxmertForPreTraining, LxmertForPreTraining, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''lxmert-visual-feature-encoder''': ( LxmertConfig, TFLxmertVisualFeatureEncoder, LxmertVisualFeatureEncoder, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''ctrl''': ( CTRLConfig, TFCTRLLMHeadModel, CTRLLMHeadModel, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''albert''': ( AlbertConfig, TFAlbertForPreTraining, AlbertForPreTraining, ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''t5''': ( TaConfig, TFTaForConditionalGeneration, TaForConditionalGeneration, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''electra''': ( ElectraConfig, TFElectraForPreTraining, ElectraForPreTraining, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''wav2vec2''': ( WavaVecaConfig, TFWavaVecaModel, WavaVecaModel, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), } def SCREAMING_SNAKE_CASE ( lowercase_ : Tuple , lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : List[str]=False , lowercase_ : str=True ): if model_type not in MODEL_CLASSES: raise ValueError(F"""Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.""" ) lowercase , lowercase , lowercase , lowercase = MODEL_CLASSES[model_type] # Initialise TF model if config_file in aws_config_map: lowercase = cached_file(lowercase_ , lowercase_ , force_download=not use_cached_models ) lowercase = config_class.from_json_file(lowercase_ ) lowercase = True lowercase = True print(F"""Building TensorFlow model from configuration: {config}""" ) lowercase = model_class(lowercase_ ) # Load weights from tf checkpoint if pytorch_checkpoint_path in aws_config_map.keys(): lowercase = cached_file( lowercase_ , lowercase_ , force_download=not use_cached_models ) # Load PyTorch checkpoint in tf2 model: lowercase = load_pytorch_checkpoint_in_tfa_model(lowercase_ , lowercase_ ) if compare_with_pt_model: lowercase = tf_model(tf_model.dummy_inputs , training=lowercase_ ) # build the network lowercase = torch.load(lowercase_ , map_location="""cpu""" ) lowercase = pt_model_class.from_pretrained( pretrained_model_name_or_path=lowercase_ , config=lowercase_ , state_dict=lowercase_ ) with torch.no_grad(): lowercase = pt_model(**pt_model.dummy_inputs ) lowercase = pto[0].numpy() lowercase = tfo[0].numpy() lowercase = np.amax(np.abs(np_pt - np_tf ) ) print(F"""Max absolute difference between models outputs {diff}""" ) assert diff <= 2E-2, F"""Error, model absolute difference is >2e-2: {diff}""" # Save pytorch-model print(F"""Save TensorFlow model to {tf_dump_path}""" ) tf_model.save_weights(lowercase_ , save_format="""h5""" ) def SCREAMING_SNAKE_CASE ( lowercase_ : Tuple , lowercase_ : int , lowercase_ : Tuple=None , lowercase_ : Optional[int]=None , lowercase_ : int=False , lowercase_ : List[str]=False , lowercase_ : Dict=False , lowercase_ : List[Any]=False , ): if args_model_type is None: lowercase = list(MODEL_CLASSES.keys() ) else: lowercase = [args_model_type] for j, model_type in enumerate(lowercase_ , start=1 ): print("""=""" * 100 ) print(F""" Converting model type {j}/{len(lowercase_ )}: {model_type}""" ) print("""=""" * 100 ) if model_type not in MODEL_CLASSES: raise ValueError(F"""Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.""" ) lowercase , lowercase , lowercase , lowercase , lowercase = MODEL_CLASSES[model_type] if model_shortcut_names_or_path is None: lowercase = list(aws_model_maps.keys() ) if config_shortcut_names_or_path is None: lowercase = model_shortcut_names_or_path for i, (model_shortcut_name, config_shortcut_name) in enumerate( zip(lowercase_ , lowercase_ ) , start=1 ): print("""-""" * 100 ) if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name: if not only_convert_finetuned_models: print(F""" Skipping finetuned checkpoint {model_shortcut_name}""" ) continue lowercase = model_shortcut_name elif only_convert_finetuned_models: print(F""" Skipping not finetuned checkpoint {model_shortcut_name}""" ) continue print( F""" Converting checkpoint {i}/{len(lowercase_ )}: {model_shortcut_name} - model_type {model_type}""" ) print("""-""" * 100 ) if config_shortcut_name in aws_config_map: lowercase = cached_file(lowercase_ , lowercase_ , force_download=not use_cached_models ) else: lowercase = config_shortcut_name if model_shortcut_name in aws_model_maps: lowercase = cached_file(lowercase_ , lowercase_ , force_download=not use_cached_models ) else: lowercase = model_shortcut_name if os.path.isfile(lowercase_ ): lowercase = """converted_model""" convert_pt_checkpoint_to_tf( model_type=lowercase_ , pytorch_checkpoint_path=lowercase_ , config_file=lowercase_ , tf_dump_path=os.path.join(lowercase_ , model_shortcut_name + """-tf_model.h5""" ) , compare_with_pt_model=lowercase_ , ) if remove_cached_files: os.remove(lowercase_ ) os.remove(lowercase_ ) if __name__ == "__main__": lowercase_ : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_dump_path''', default=None, type=str, required=True, help='''Path to the output Tensorflow dump file.''' ) parser.add_argument( '''--model_type''', default=None, type=str, help=( f'''Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and ''' '''convert all the models from AWS.''' ), ) parser.add_argument( '''--pytorch_checkpoint_path''', default=None, type=str, help=( '''Path to the PyTorch checkpoint path or shortcut name to download from AWS. ''' '''If not given, will download and convert all the checkpoints from AWS.''' ), ) parser.add_argument( '''--config_file''', default=None, type=str, help=( '''The config json file corresponding to the pre-trained model. \n''' '''This specifies the model architecture. If not given and ''' '''--pytorch_checkpoint_path is not given or is a shortcut name ''' '''use the configuration associated to the shortcut name on the AWS''' ), ) parser.add_argument( '''--compare_with_pt_model''', action='''store_true''', help='''Compare Tensorflow and PyTorch model predictions.''' ) parser.add_argument( '''--use_cached_models''', action='''store_true''', help='''Use cached models if possible instead of updating to latest checkpoint versions.''', ) parser.add_argument( '''--remove_cached_files''', action='''store_true''', help='''Remove pytorch models after conversion (save memory when converting in batches).''', ) parser.add_argument('''--only_convert_finetuned_models''', action='''store_true''', help='''Only convert finetuned models.''') lowercase_ : Union[str, Any] = parser.parse_args() # if args.pytorch_checkpoint_path is not None: # convert_pt_checkpoint_to_tf(args.model_type.lower(), # args.pytorch_checkpoint_path, # args.config_file if args.config_file is not None else args.pytorch_checkpoint_path, # args.tf_dump_path, # compare_with_pt_model=args.compare_with_pt_model, # use_cached_models=args.use_cached_models) # else: convert_all_pt_checkpoints_to_tf( args.model_type.lower() if args.model_type is not None else None, args.tf_dump_path, model_shortcut_names_or_path=[args.pytorch_checkpoint_path] if args.pytorch_checkpoint_path is not None else None, config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None, compare_with_pt_model=args.compare_with_pt_model, use_cached_models=args.use_cached_models, remove_cached_files=args.remove_cached_files, only_convert_finetuned_models=args.only_convert_finetuned_models, )
653
'''simple docstring''' import json import logging import os import socket import git import numpy as np import torch logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO, ) lowercase_ : Tuple = logging.getLogger(__name__) def SCREAMING_SNAKE_CASE ( lowercase_ : str ): lowercase = git.Repo(search_parent_directories=lowercase_ ) lowercase = { """repo_id""": str(lowercase_ ), """repo_sha""": str(repo.head.object.hexsha ), """repo_branch""": str(repo.active_branch ), } with open(os.path.join(lowercase_ , """git_log.json""" ) , """w""" ) as f: json.dump(lowercase_ , lowercase_ , indent=4 ) def SCREAMING_SNAKE_CASE ( lowercase_ : str ): if params.n_gpu <= 0: lowercase = 0 lowercase = -1 lowercase = True lowercase = False return assert torch.cuda.is_available() logger.info("""Initializing GPUs""" ) if params.n_gpu > 1: assert params.local_rank != -1 lowercase = int(os.environ["""WORLD_SIZE"""] ) lowercase = int(os.environ["""N_GPU_NODE"""] ) lowercase = int(os.environ["""RANK"""] ) # number of nodes / node ID lowercase = params.world_size // params.n_gpu_per_node lowercase = params.global_rank // params.n_gpu_per_node lowercase = True assert params.n_nodes == int(os.environ["""N_NODES"""] ) assert params.node_id == int(os.environ["""NODE_RANK"""] ) # local job (single GPU) else: assert params.local_rank == -1 lowercase = 1 lowercase = 0 lowercase = 0 lowercase = 0 lowercase = 1 lowercase = 1 lowercase = False # sanity checks assert params.n_nodes >= 1 assert 0 <= params.node_id < params.n_nodes assert 0 <= params.local_rank <= params.global_rank < params.world_size assert params.world_size == params.n_nodes * params.n_gpu_per_node # define whether this is the master process / if we are in multi-node distributed mode lowercase = params.node_id == 0 and params.local_rank == 0 lowercase = params.n_nodes > 1 # summary lowercase = F"""--- Global rank: {params.global_rank} - """ logger.info(PREFIX + """Number of nodes: %i""" % params.n_nodes ) logger.info(PREFIX + """Node ID : %i""" % params.node_id ) logger.info(PREFIX + """Local rank : %i""" % params.local_rank ) logger.info(PREFIX + """World size : %i""" % params.world_size ) logger.info(PREFIX + """GPUs per node : %i""" % params.n_gpu_per_node ) logger.info(PREFIX + """Master : %s""" % str(params.is_master ) ) logger.info(PREFIX + """Multi-node : %s""" % str(params.multi_node ) ) logger.info(PREFIX + """Multi-GPU : %s""" % str(params.multi_gpu ) ) logger.info(PREFIX + """Hostname : %s""" % socket.gethostname() ) # set GPU device torch.cuda.set_device(params.local_rank ) # initialize multi-GPU if params.multi_gpu: logger.info("""Initializing PyTorch distributed""" ) torch.distributed.init_process_group( init_method="""env://""" , backend="""nccl""" , ) def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] ): np.random.seed(args.seed ) torch.manual_seed(args.seed ) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed )
653
1
from collections import OrderedDict from typing import Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...feature_extraction_utils import FeatureExtractionMixin from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType, logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { """deepmind/language-perceiver""": """https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json""", # See all Perceiver models at https://huggingface.co/models?filter=perceiver } class snake_case__ ( lowercase_): '''simple docstring''' lowerCamelCase : str = "perceiver" def __init__( self , a__=2_56 , a__=12_80 , a__=7_68 , a__=1 , a__=26 , a__=8 , a__=8 , a__=None , a__=None , a__="kv" , a__=1 , a__=1 , a__="gelu" , a__=0.1 , a__=0.02 , a__=1e-12 , a__=True , a__=2_62 , a__=20_48 , a__=56 , a__=[3_68, 4_96] , a__=16 , a__=19_20 , a__=16 , a__=[1, 16, 2_24, 2_24] , **a__ , ) -> List[str]: '''simple docstring''' super().__init__(**a__ ) __snake_case :Tuple = num_latents __snake_case :Optional[Any] = d_latents __snake_case :Tuple = d_model __snake_case :List[Any] = num_blocks __snake_case :Optional[Any] = num_self_attends_per_block __snake_case :int = num_self_attention_heads __snake_case :int = num_cross_attention_heads __snake_case :int = qk_channels __snake_case :Tuple = v_channels __snake_case :Dict = cross_attention_shape_for_attention __snake_case :Any = self_attention_widening_factor __snake_case :Optional[int] = cross_attention_widening_factor __snake_case :Dict = hidden_act __snake_case :List[str] = attention_probs_dropout_prob __snake_case :int = initializer_range __snake_case :Tuple = layer_norm_eps __snake_case :List[Any] = use_query_residual # masked language modeling attributes __snake_case :Dict = vocab_size __snake_case :Dict = max_position_embeddings # image classification attributes __snake_case :Any = image_size # flow attributes __snake_case :Any = train_size # multimodal autoencoding attributes __snake_case :List[str] = num_frames __snake_case :Any = audio_samples_per_frame __snake_case :Tuple = samples_per_patch __snake_case :Tuple = output_shape class snake_case__ ( lowercase_): '''simple docstring''' @property def __lowercase ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task == "multiple-choice": __snake_case :int = {0: """batch""", 1: """choice""", 2: """sequence"""} else: __snake_case :Any = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""inputs""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] ) @property def __lowercase ( self ) -> float: '''simple docstring''' return 1e-4 def __lowercase ( self , a__ , a__ = -1 , a__ = -1 , a__ = -1 , a__ = False , a__ = None , a__ = 3 , a__ = 40 , a__ = 40 , ) -> Mapping[str, Any]: '''simple docstring''' if isinstance(a__ , a__ ): # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX __snake_case :str = compute_effective_axis_dimension( a__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX __snake_case :Union[str, Any] = preprocessor.num_special_tokens_to_add(a__ ) __snake_case :Union[str, Any] = compute_effective_axis_dimension( a__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=a__ ) # Generate dummy inputs according to compute batch and sequence __snake_case :List[str] = [""" """.join(["""a"""] ) * seq_length] * batch_size __snake_case :int = dict(preprocessor(a__ , return_tensors=a__ ) ) __snake_case :Any = inputs.pop("""input_ids""" ) return inputs elif isinstance(a__ , a__ ) and preprocessor.model_input_names[0] == "pixel_values": # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX __snake_case :str = compute_effective_axis_dimension(a__ , fixed_dimension=OnnxConfig.default_fixed_batch ) __snake_case :Union[str, Any] = self._generate_dummy_images(a__ , a__ , a__ , a__ ) __snake_case :Union[str, Any] = dict(preprocessor(images=a__ , return_tensors=a__ ) ) __snake_case :List[Any] = inputs.pop("""pixel_values""" ) return inputs else: raise ValueError( """Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.""" )
455
def UpperCamelCase ( snake_case__ : list ): '''simple docstring''' if not grid or not grid[0]: raise TypeError("""The grid does not contain the appropriate information""" ) for cell_n in range(1 ,len(grid[0] ) ): grid[0][cell_n] += grid[0][cell_n - 1] __snake_case :str = grid[0] for row_n in range(1 ,len(snake_case__ ) ): __snake_case :Optional[int] = grid[row_n] __snake_case :Optional[Any] = fill_row(snake_case__ ,snake_case__ ) __snake_case :Dict = grid[row_n] return grid[-1][-1] def UpperCamelCase ( snake_case__ : list ,snake_case__ : list ): '''simple docstring''' current_row[0] += row_above[0] for cell_n in range(1 ,len(snake_case__ ) ): current_row[cell_n] += min(current_row[cell_n - 1] ,row_above[cell_n] ) return current_row if __name__ == "__main__": import doctest doctest.testmod()
455
1
"""simple docstring""" import torch from diffusers import DiffusionPipeline class UpperCamelCase (__snake_case ): def __init__( self :Tuple , __magic_name__ :Optional[int] , __magic_name__ :Dict ) ->int: super().__init__() self.register_modules(unet=__magic_name__ , scheduler=__magic_name__ ) def __call__( self :Optional[Any] ) ->Optional[Any]: lowercase : Tuple = torch.randn( (1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , ) lowercase : int = 1 lowercase : Dict = self.unet(__magic_name__ , __magic_name__ ).sample lowercase : List[str] = self.scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ ).prev_sample lowercase : List[Any] = scheduler_output - scheduler_output + torch.ones_like(__magic_name__ ) return result
706
"""simple docstring""" from math import cos, sin, sqrt, tau from audio_filters.iir_filter import IIRFilter def UpperCamelCase ( _A , _A , _A = 1 / sqrt(2 ) ) -> IIRFilter: lowercase : Optional[int] = tau * frequency / samplerate lowercase : Union[str, Any] = sin(_A ) lowercase : Tuple = cos(_A ) lowercase : Any = _sin / (2 * q_factor) lowercase : Any = (1 - _cos) / 2 lowercase : List[str] = 1 - _cos lowercase : int = 1 + alpha lowercase : Optional[int] = -2 * _cos lowercase : str = 1 - alpha lowercase : Optional[Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def UpperCamelCase ( _A , _A , _A = 1 / sqrt(2 ) ) -> IIRFilter: lowercase : Union[str, Any] = tau * frequency / samplerate lowercase : Dict = sin(_A ) lowercase : List[Any] = cos(_A ) lowercase : str = _sin / (2 * q_factor) lowercase : Any = (1 + _cos) / 2 lowercase : Dict = -1 - _cos lowercase : Tuple = 1 + alpha lowercase : Tuple = -2 * _cos lowercase : Any = 1 - alpha lowercase : int = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def UpperCamelCase ( _A , _A , _A = 1 / sqrt(2 ) ) -> IIRFilter: lowercase : Optional[int] = tau * frequency / samplerate lowercase : Optional[int] = sin(_A ) lowercase : Any = cos(_A ) lowercase : str = _sin / (2 * q_factor) lowercase : Optional[int] = _sin / 2 lowercase : Dict = 0 lowercase : Any = -ba lowercase : Any = 1 + alpha lowercase : Union[str, Any] = -2 * _cos lowercase : Any = 1 - alpha lowercase : str = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def UpperCamelCase ( _A , _A , _A = 1 / sqrt(2 ) ) -> IIRFilter: lowercase : Optional[int] = tau * frequency / samplerate lowercase : List[Any] = sin(_A ) lowercase : Tuple = cos(_A ) lowercase : Any = _sin / (2 * q_factor) lowercase : Dict = 1 - alpha lowercase : int = -2 * _cos lowercase : Optional[Any] = 1 + alpha lowercase : Optional[Any] = IIRFilter(2 ) filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] ) return filt def UpperCamelCase ( _A , _A , _A , _A = 1 / sqrt(2 ) , ) -> IIRFilter: lowercase : Optional[Any] = tau * frequency / samplerate lowercase : Optional[int] = sin(_A ) lowercase : Dict = cos(_A ) lowercase : Optional[Any] = _sin / (2 * q_factor) lowercase : int = 10 ** (gain_db / 40) lowercase : str = 1 + alpha * big_a lowercase : str = -2 * _cos lowercase : Optional[int] = 1 - alpha * big_a lowercase : Optional[Any] = 1 + alpha / big_a lowercase : Tuple = -2 * _cos lowercase : List[Any] = 1 - alpha / big_a lowercase : str = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def UpperCamelCase ( _A , _A , _A , _A = 1 / sqrt(2 ) , ) -> IIRFilter: lowercase : Optional[Any] = tau * frequency / samplerate lowercase : str = sin(_A ) lowercase : str = cos(_A ) lowercase : Optional[int] = _sin / (2 * q_factor) lowercase : List[Any] = 10 ** (gain_db / 40) lowercase : Optional[Any] = (big_a + 1) - (big_a - 1) * _cos lowercase : Optional[Any] = (big_a + 1) + (big_a - 1) * _cos lowercase : List[Any] = (big_a - 1) - (big_a + 1) * _cos lowercase : Dict = (big_a - 1) + (big_a + 1) * _cos lowercase : int = 2 * sqrt(_A ) * alpha lowercase : Union[str, Any] = big_a * (pmc + aaa) lowercase : int = 2 * big_a * mpc lowercase : Optional[Any] = big_a * (pmc - aaa) lowercase : Tuple = ppmc + aaa lowercase : int = -2 * pmpc lowercase : Optional[int] = ppmc - aaa lowercase : Dict = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def UpperCamelCase ( _A , _A , _A , _A = 1 / sqrt(2 ) , ) -> IIRFilter: lowercase : Optional[Any] = tau * frequency / samplerate lowercase : Union[str, Any] = sin(_A ) lowercase : List[Any] = cos(_A ) lowercase : str = _sin / (2 * q_factor) lowercase : List[str] = 10 ** (gain_db / 40) lowercase : str = (big_a + 1) - (big_a - 1) * _cos lowercase : List[Any] = (big_a + 1) + (big_a - 1) * _cos lowercase : str = (big_a - 1) - (big_a + 1) * _cos lowercase : str = (big_a - 1) + (big_a + 1) * _cos lowercase : int = 2 * sqrt(_A ) * alpha lowercase : int = big_a * (ppmc + aaa) lowercase : Optional[Any] = -2 * big_a * pmpc lowercase : Tuple = big_a * (ppmc - aaa) lowercase : Union[str, Any] = pmc + aaa lowercase : List[Any] = 2 * mpc lowercase : Optional[Any] = pmc - aaa lowercase : Tuple = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt
348
0
from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. _lowercase = 2_00 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. _lowercase = 50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. _lowercase = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 10_00)) def _A (UpperCamelCase : str , UpperCamelCase : str ) ->tuple[str, float]: '''simple docstring''' lowerCamelCase__ : Union[str, Any] = len([g for position, g in enumerate(UpperCamelCase ) if g == main_target[position]] ) return (item, float(UpperCamelCase )) def _A (UpperCamelCase : str , UpperCamelCase : str ) ->tuple[str, str]: '''simple docstring''' lowerCamelCase__ : str = random.randint(0 , len(UpperCamelCase ) - 1 ) lowerCamelCase__ : List[str] = parent_a[:random_slice] + parent_a[random_slice:] lowerCamelCase__ : Any = parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def _A (UpperCamelCase : str , UpperCamelCase : list[str] ) ->str: '''simple docstring''' lowerCamelCase__ : int = list(UpperCamelCase ) if random.uniform(0 , 1 ) < MUTATION_PROBABILITY: lowerCamelCase__ : Optional[int] = random.choice(UpperCamelCase ) return "".join(UpperCamelCase ) def _A (UpperCamelCase : tuple[str, float] , UpperCamelCase : list[tuple[str, float]] , UpperCamelCase : list[str] , ) ->list[str]: '''simple docstring''' lowerCamelCase__ : Any = [] # Generate more children proportionally to the fitness score. lowerCamelCase__ : str = int(parent_a[1] * 100 ) + 1 lowerCamelCase__ : List[str] = 10 if child_n >= 10 else child_n for _ in range(UpperCamelCase ): lowerCamelCase__ : Any = population_score[random.randint(0 , UpperCamelCase )][0] lowerCamelCase__ ,lowerCamelCase__ : Tuple = crossover(parent_a[0] , UpperCamelCase ) # Append new string to the population list. pop.append(mutate(UpperCamelCase , UpperCamelCase ) ) pop.append(mutate(UpperCamelCase , UpperCamelCase ) ) return pop def _A (UpperCamelCase : str , UpperCamelCase : list[str] , UpperCamelCase : bool = True ) ->tuple[int, int, str]: '''simple docstring''' if N_POPULATION < N_SELECTED: lowerCamelCase__ : str = f"{N_POPULATION} must be bigger than {N_SELECTED}" raise ValueError(UpperCamelCase ) # Verify that the target contains no genes besides the ones inside genes variable. lowerCamelCase__ : List[str] = sorted({c for c in target if c not in genes} ) if not_in_genes_list: lowerCamelCase__ : Union[str, Any] = f"{not_in_genes_list} is not in genes list, evolution cannot converge" raise ValueError(UpperCamelCase ) # Generate random starting population. lowerCamelCase__ : List[str] = [] for _ in range(UpperCamelCase ): population.append("""""".join([random.choice(UpperCamelCase ) for i in range(len(UpperCamelCase ) )] ) ) # Just some logs to know what the algorithms is doing. lowerCamelCase__ ,lowerCamelCase__ : Optional[Any] = 0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(UpperCamelCase ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. lowerCamelCase__ : Optional[int] = [evaluate(UpperCamelCase , UpperCamelCase ) for item in population] # Check if there is a matching evolution. lowerCamelCase__ : Optional[int] = sorted(UpperCamelCase , key=lambda UpperCamelCase : x[1] , reverse=UpperCamelCase ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( f"\nGeneration: {generation}" f"\nTotal Population:{total_population}" f"\nBest score: {population_score[0][1]}" f"\nBest string: {population_score[0][0]}" ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. lowerCamelCase__ : Tuple = population[: int(N_POPULATION / 3 )] population.clear() population.extend(UpperCamelCase ) # Normalize population score to be between 0 and 1. lowerCamelCase__ : Dict = [ (item, score / len(UpperCamelCase )) for item, score in population_score ] # This is selection for i in range(UpperCamelCase ): population.extend(select(population_score[int(UpperCamelCase )] , UpperCamelCase , UpperCamelCase ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(UpperCamelCase ) > N_POPULATION: break if __name__ == "__main__": _lowercase = ( '''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!''' ) _lowercase = list( ''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm''' '''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\''' ) _lowercase , _lowercase , _lowercase = basic(target_str, genes_list) print( F'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}''' )
157
# Note: if you intend to run this script make sure you look under scripts/fsmt/ # to locate the appropriate script to do the work correctly. There is a set of scripts to: # - download and prepare data and run the conversion script # - perform eval to get the best hparam into the config # - generate model_cards - useful if you have multiple models from the same paper import argparse import json import os import re from collections import OrderedDict from os.path import basename, dirname import fairseq import torch from fairseq import hub_utils from fairseq.data.dictionary import Dictionary from transformers import FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE from transformers.utils import WEIGHTS_NAME, logging logging.set_verbosity_warning() _lowercase = 2 # based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping` # values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults: # # * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users) # * `early_stopping`: `False` consistently scored better # * `length_penalty` varied, so will assign the best one depending on the model _lowercase = { # fairseq: '''wmt19-ru-en''': {'''length_penalty''': 1.1}, '''wmt19-en-ru''': {'''length_penalty''': 1.15}, '''wmt19-en-de''': {'''length_penalty''': 1.0}, '''wmt19-de-en''': {'''length_penalty''': 1.1}, # allenai: '''wmt16-en-de-dist-12-1''': {'''length_penalty''': 0.6}, '''wmt16-en-de-dist-6-1''': {'''length_penalty''': 0.6}, '''wmt16-en-de-12-1''': {'''length_penalty''': 0.8}, '''wmt19-de-en-6-6-base''': {'''length_penalty''': 0.6}, '''wmt19-de-en-6-6-big''': {'''length_penalty''': 0.6}, } # this remaps the different models to their organization names _lowercase = {} for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: _lowercase = '''facebook''' for m in [ "wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1", "wmt19-de-en-6-6-base", "wmt19-de-en-6-6-big", ]: _lowercase = '''allenai''' def _A (UpperCamelCase : Tuple ) ->Any: '''simple docstring''' lowerCamelCase__ : str = dict((re.sub(r"""@@$""" , """""" , UpperCamelCase ), v) if k.endswith("""@@""" ) else (re.sub(r"""$""" , """</w>""" , UpperCamelCase ), v) for k, v in d.items() ) lowerCamelCase__ : List[Any] = """<s> <pad> </s> <unk>""".split() # restore the special tokens for k in keep_keys: del da[f"{k}</w>"] lowerCamelCase__ : List[str] = d[k] # restore return da def _A (UpperCamelCase : str , UpperCamelCase : List[str] ) ->int: '''simple docstring''' assert os.path.exists(UpperCamelCase ) os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase ) print(f"Writing results to {pytorch_dump_folder_path}" ) # handle various types of models lowerCamelCase__ : str = basename(UpperCamelCase ) lowerCamelCase__ : str = dirname(UpperCamelCase ) lowerCamelCase__ : Any = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel lowerCamelCase__ : Tuple = cls.hub_models() lowerCamelCase__ : Optional[int] = {"""bpe""": """fastbpe""", """tokenizer""": """moses"""} lowerCamelCase__ : List[Any] = """.""" # note: since the model dump is old, fairseq has upgraded its model some # time later, and it does a whole lot of rewrites and splits on the saved # weights, therefore we can't use torch.load() directly on the model file. # see: upgrade_state_dict(state_dict) in fairseq_model.py print(f"using checkpoint {checkpoint_file}" ) lowerCamelCase__ : List[str] = hub_utils.from_pretrained( UpperCamelCase , UpperCamelCase , UpperCamelCase , archive_map=UpperCamelCase , **UpperCamelCase ) lowerCamelCase__ : Union[str, Any] = vars(chkpt["""args"""]["""model"""] ) lowerCamelCase__ : Union[str, Any] = args["""source_lang"""] lowerCamelCase__ : Dict = args["""target_lang"""] lowerCamelCase__ : List[Any] = dirname(UpperCamelCase ) lowerCamelCase__ : Union[str, Any] = basename(UpperCamelCase ) # dicts lowerCamelCase__ : int = os.path.join(UpperCamelCase , f"dict.{src_lang}.txt" ) lowerCamelCase__ : Optional[Any] = os.path.join(UpperCamelCase , f"dict.{tgt_lang}.txt" ) lowerCamelCase__ : Any = Dictionary.load(UpperCamelCase ) lowerCamelCase__ : List[Any] = rewrite_dict_keys(src_dict.indices ) lowerCamelCase__ : Optional[Any] = len(UpperCamelCase ) lowerCamelCase__ : Union[str, Any] = os.path.join(UpperCamelCase , """vocab-src.json""" ) print(f"Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records" ) with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(UpperCamelCase , ensure_ascii=UpperCamelCase , indent=UpperCamelCase ) ) # detect whether this is a do_lower_case situation, which can be derived by checking whether we # have at least one uppercase letter in the source vocab lowerCamelCase__ : Dict = True for k in src_vocab.keys(): if not k.islower(): lowerCamelCase__ : Tuple = False break lowerCamelCase__ : Union[str, Any] = Dictionary.load(UpperCamelCase ) lowerCamelCase__ : List[str] = rewrite_dict_keys(tgt_dict.indices ) lowerCamelCase__ : str = len(UpperCamelCase ) lowerCamelCase__ : List[str] = os.path.join(UpperCamelCase , """vocab-tgt.json""" ) print(f"Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records" ) with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(UpperCamelCase , ensure_ascii=UpperCamelCase , indent=UpperCamelCase ) ) # merges_file (bpecodes) lowerCamelCase__ : List[str] = os.path.join(UpperCamelCase , VOCAB_FILES_NAMES["""merges_file"""] ) for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code" lowerCamelCase__ : Union[str, Any] = os.path.join(UpperCamelCase , UpperCamelCase ) if os.path.exists(UpperCamelCase ): break with open(UpperCamelCase , encoding="""utf-8""" ) as fin: lowerCamelCase__ : Dict = fin.read() lowerCamelCase__ : Tuple = re.sub(r""" \d+$""" , """""" , UpperCamelCase , 0 , re.M ) # remove frequency number print(f"Generating {merges_file}" ) with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as fout: fout.write(UpperCamelCase ) # model config lowerCamelCase__ : Optional[Any] = os.path.join(UpperCamelCase , """config.json""" ) # validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe - # may have to modify the tokenizer if a different type is used by a future model assert args["bpe"] == "fastbpe", f"need to extend tokenizer to support bpe={args['bpe']}" assert args["tokenizer"] == "moses", f"need to extend tokenizer to support bpe={args['tokenizer']}" lowerCamelCase__ : str = { """architectures""": ["""FSMTForConditionalGeneration"""], """model_type""": """fsmt""", """activation_dropout""": args["""activation_dropout"""], """activation_function""": """relu""", """attention_dropout""": args["""attention_dropout"""], """d_model""": args["""decoder_embed_dim"""], """dropout""": args["""dropout"""], """init_std""": 0.02, """max_position_embeddings""": args["""max_source_positions"""], """num_hidden_layers""": args["""encoder_layers"""], """src_vocab_size""": src_vocab_size, """tgt_vocab_size""": tgt_vocab_size, """langs""": [src_lang, tgt_lang], """encoder_attention_heads""": args["""encoder_attention_heads"""], """encoder_ffn_dim""": args["""encoder_ffn_embed_dim"""], """encoder_layerdrop""": args["""encoder_layerdrop"""], """encoder_layers""": args["""encoder_layers"""], """decoder_attention_heads""": args["""decoder_attention_heads"""], """decoder_ffn_dim""": args["""decoder_ffn_embed_dim"""], """decoder_layerdrop""": args["""decoder_layerdrop"""], """decoder_layers""": args["""decoder_layers"""], """bos_token_id""": 0, """pad_token_id""": 1, """eos_token_id""": 2, """is_encoder_decoder""": True, """scale_embedding""": not args["""no_scale_embedding"""], """tie_word_embeddings""": args["""share_all_embeddings"""], } # good hparam defaults to start with lowerCamelCase__ : Dict = 5 lowerCamelCase__ : Optional[Any] = False if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]: lowerCamelCase__ : Any = best_score_hparams[model_dir]["""length_penalty"""] else: lowerCamelCase__ : Union[str, Any] = 1.0 print(f"Generating {fsmt_model_config_file}" ) with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(UpperCamelCase , ensure_ascii=UpperCamelCase , indent=UpperCamelCase ) ) # tokenizer config lowerCamelCase__ : Union[str, Any] = os.path.join(UpperCamelCase , UpperCamelCase ) lowerCamelCase__ : Any = { """langs""": [src_lang, tgt_lang], """model_max_length""": 1024, """do_lower_case""": do_lower_case, } print(f"Generating {fsmt_tokenizer_config_file}" ) with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(UpperCamelCase , ensure_ascii=UpperCamelCase , indent=UpperCamelCase ) ) # model lowerCamelCase__ : Tuple = chkpt["""models"""][0] lowerCamelCase__ : Tuple = model.state_dict() # rename keys to start with 'model.' lowerCamelCase__ : Tuple = OrderedDict(("""model.""" + k, v) for k, v in model_state_dict.items() ) # remove unneeded keys lowerCamelCase__ : int = [ """model.model""", """model.encoder.version""", """model.decoder.version""", """model.encoder_embed_tokens.weight""", """model.decoder_embed_tokens.weight""", """model.encoder.embed_positions._float_tensor""", """model.decoder.embed_positions._float_tensor""", ] for k in ignore_keys: model_state_dict.pop(UpperCamelCase , UpperCamelCase ) lowerCamelCase__ : Tuple = FSMTConfig.from_pretrained(UpperCamelCase ) lowerCamelCase__ : List[str] = FSMTForConditionalGeneration(UpperCamelCase ) # check that it loads ok model_new.load_state_dict(UpperCamelCase , strict=UpperCamelCase ) # save lowerCamelCase__ : Any = os.path.join(UpperCamelCase , UpperCamelCase ) print(f"Generating {pytorch_weights_dump_path}" ) torch.save(UpperCamelCase , UpperCamelCase ) print("""Conversion is done!""" ) print("""\nLast step is to upload the files to s3""" ) print(f"cd {data_root}" ) print(f"transformers-cli upload {model_dir}" ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--fsmt_checkpoint_path''', default=None, type=str, required=True, help=( '''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,''' ''' bpecodes, etc.''' ), ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) _lowercase = parser.parse_args() convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
157
1
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline else: from .pipeline_kandinsky import KandinskyPipeline from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput from .text_encoder import MultilingualCLIP
76
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Tuple = logging.get_logger(__name__) __lowerCAmelCase : Union[str, Any] = { 'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json', # See all Cvt models at https://huggingface.co/models?filter=cvt } class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """cvt""" def __init__( self : Dict , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : List[Any]=[7, 3, 3] , UpperCamelCase__ : Any=[4, 2, 2] , UpperCamelCase__ : Optional[Any]=[2, 1, 1] , UpperCamelCase__ : Union[str, Any]=[64, 192, 384] , UpperCamelCase__ : Dict=[1, 3, 6] , UpperCamelCase__ : Any=[1, 2, 10] , UpperCamelCase__ : List[str]=[4.0, 4.0, 4.0] , UpperCamelCase__ : Dict=[0.0, 0.0, 0.0] , UpperCamelCase__ : Tuple=[0.0, 0.0, 0.0] , UpperCamelCase__ : Optional[Any]=[0.0, 0.0, 0.1] , UpperCamelCase__ : str=[True, True, True] , UpperCamelCase__ : Optional[Any]=[False, False, True] , UpperCamelCase__ : Union[str, Any]=["dw_bn", "dw_bn", "dw_bn"] , UpperCamelCase__ : List[Any]=[3, 3, 3] , UpperCamelCase__ : Any=[1, 1, 1] , UpperCamelCase__ : Optional[int]=[2, 2, 2] , UpperCamelCase__ : Any=[1, 1, 1] , UpperCamelCase__ : List[str]=[1, 1, 1] , UpperCamelCase__ : int=0.02 , UpperCamelCase__ : int=1E-12 , **UpperCamelCase__ : int , ) -> Dict: """simple docstring""" super().__init__(**UpperCamelCase__ ) __magic_name__ = num_channels __magic_name__ = patch_sizes __magic_name__ = patch_stride __magic_name__ = patch_padding __magic_name__ = embed_dim __magic_name__ = num_heads __magic_name__ = depth __magic_name__ = mlp_ratio __magic_name__ = attention_drop_rate __magic_name__ = drop_rate __magic_name__ = drop_path_rate __magic_name__ = qkv_bias __magic_name__ = cls_token __magic_name__ = qkv_projection_method __magic_name__ = kernel_qkv __magic_name__ = padding_kv __magic_name__ = stride_kv __magic_name__ = padding_q __magic_name__ = stride_q __magic_name__ = initializer_range __magic_name__ = layer_norm_eps
76
1
'''simple docstring''' import numpy as np import datasets _UpperCAmelCase : Optional[int] = ''' Compute the Mahalanobis Distance Mahalonobis distance is the distance between a point and a distribution. And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance. It was introduced by Prof. P. C. Mahalanobis in 1936 and has been used in various statistical applications ever since [source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/] ''' _UpperCAmelCase : Optional[int] = '''\ @article{de2000mahalanobis, title={The mahalanobis distance}, author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L}, journal={Chemometrics and intelligent laboratory systems}, volume={50}, number={1}, pages={1--18}, year={2000}, publisher={Elsevier} } ''' _UpperCAmelCase : List[Any] = ''' Args: X: List of datapoints to be compared with the `reference_distribution`. reference_distribution: List of datapoints from the reference distribution we want to compare to. Returns: mahalanobis: The Mahalonobis distance for each datapoint in `X`. Examples: >>> mahalanobis_metric = datasets.load_metric("mahalanobis") >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]]) >>> print(results) {\'mahalanobis\': array([0.5])} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __magic_name__ ( datasets.Metric ): def _A( self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''X''': datasets.Sequence(datasets.Value('''float''' , id='''sequence''' ) , id='''X''' ), } ) , ) def _A( self , snake_case_ , snake_case_ ): # convert to numpy arrays lowercase =np.array(snake_case_ ) lowercase =np.array(snake_case_ ) # Assert that arrays are 2D if len(X.shape ) != 2: raise ValueError('''Expected `X` to be a 2D vector''' ) if len(reference_distribution.shape ) != 2: raise ValueError('''Expected `reference_distribution` to be a 2D vector''' ) if reference_distribution.shape[0] < 2: raise ValueError( '''Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension''' ) # Get mahalanobis distance for each prediction lowercase =X - np.mean(snake_case_ ) lowercase =np.cov(reference_distribution.T ) try: lowercase =np.linalg.inv(snake_case_ ) except np.linalg.LinAlgError: lowercase =np.linalg.pinv(snake_case_ ) lowercase =np.dot(snake_case_ , snake_case_ ) lowercase =np.dot(snake_case_ , X_minus_mu.T ).diagonal() return {"mahalanobis": mahal_dist}
72
import os lowercase__ : List[str] = {'''I''': 1, '''V''': 5, '''X''': 1_0, '''L''': 5_0, '''C''': 1_0_0, '''D''': 5_0_0, '''M''': 1_0_0_0} def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> int: lowerCAmelCase = 0 lowerCAmelCase = 0 while index < len(snake_case__ ) - 1: lowerCAmelCase = SYMBOLS[numerals[index]] lowerCAmelCase = SYMBOLS[numerals[index + 1]] if current_value < next_value: total_value -= current_value else: total_value += current_value index += 1 total_value += SYMBOLS[numerals[index]] return total_value def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> str: lowerCAmelCase = '''''' lowerCAmelCase = num // 1_0_0_0 numerals += m_count * "M" num %= 1_0_0_0 lowerCAmelCase = num // 1_0_0 if c_count == 9: numerals += "CM" c_count -= 9 elif c_count == 4: numerals += "CD" c_count -= 4 if c_count >= 5: numerals += "D" c_count -= 5 numerals += c_count * "C" num %= 1_0_0 lowerCAmelCase = num // 1_0 if x_count == 9: numerals += "XC" x_count -= 9 elif x_count == 4: numerals += "XL" x_count -= 4 if x_count >= 5: numerals += "L" x_count -= 5 numerals += x_count * "X" num %= 1_0 if num == 9: numerals += "IX" num -= 9 elif num == 4: numerals += "IV" num -= 4 if num >= 5: numerals += "V" num -= 5 numerals += num * "I" return numerals def SCREAMING_SNAKE_CASE_ ( snake_case__ = "/p089_roman.txt" ) -> int: lowerCAmelCase = 0 with open(os.path.dirname(snake_case__ ) + roman_numerals_filename ) as filea: lowerCAmelCase = filea.readlines() for line in lines: lowerCAmelCase = line.strip() lowerCAmelCase = parse_roman_numerals(snake_case__ ) lowerCAmelCase = generate_roman_numerals(snake_case__ ) savings += len(snake_case__ ) - len(snake_case__ ) return savings if __name__ == "__main__": print(f'{solution() = }')
312
0
"""simple docstring""" import re import string import numpy as np import datasets __lowercase : List[str] = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n' __lowercase : List[str] = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n' __lowercase : List[str] = '\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase ( datasets.Metric ): """simple docstring""" def _lowerCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Value('''string''' , id='''sequence''' ), } ) , reference_urls=[] , ) def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=False , ) -> Any: '''simple docstring''' if regexes_to_ignore is not None: for s in regexes_to_ignore: lowerCamelCase_ = np.array([re.sub(UpperCamelCase__ , '''''' , UpperCamelCase__ ) for x in predictions] ) lowerCamelCase_ = np.array([re.sub(UpperCamelCase__ , '''''' , UpperCamelCase__ ) for x in references] ) else: lowerCamelCase_ = np.asarray(UpperCamelCase__ ) lowerCamelCase_ = np.asarray(UpperCamelCase__ ) if ignore_case: lowerCamelCase_ = np.char.lower(UpperCamelCase__ ) lowerCamelCase_ = np.char.lower(UpperCamelCase__ ) if ignore_punctuation: lowerCamelCase_ = string.punctuation.maketrans('''''' , '''''' , string.punctuation ) lowerCamelCase_ = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ ) lowerCamelCase_ = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ ) if ignore_numbers: lowerCamelCase_ = string.digits.maketrans('''''' , '''''' , string.digits ) lowerCamelCase_ = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ ) lowerCamelCase_ = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ ) lowerCamelCase_ = predictions == references return {"exact_match": np.mean(UpperCamelCase__ ) * 100}
705
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowerCamelCase_ = tempfile.mkdtemp() # fmt: off lowerCamelCase_ = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>'''] # fmt: on lowerCamelCase_ = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) ) lowerCamelCase_ = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', ''''''] lowerCamelCase_ = {'''unk_token''': '''<unk>'''} lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(UpperCamelCase__ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(UpperCamelCase__ ) ) lowerCamelCase_ = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.48_145_466, 0.4_578_275, 0.40_821_073], '''image_std''': [0.26_862_954, 0.26_130_258, 0.27_577_711], } lowerCamelCase_ = os.path.join(self.tmpdirname , UpperCamelCase__ ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(UpperCamelCase__ , UpperCamelCase__ ) def _lowerCAmelCase ( self , **UpperCamelCase__ ) -> str: '''simple docstring''' return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def _lowerCAmelCase ( self , **UpperCamelCase__ ) -> Dict: '''simple docstring''' return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def _lowerCAmelCase ( self , **UpperCamelCase__ ) -> Optional[Any]: '''simple docstring''' return CLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def _lowerCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def _lowerCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowerCamelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] lowerCamelCase_ = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def _lowerCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowerCamelCase_ = self.get_tokenizer() lowerCamelCase_ = self.get_rust_tokenizer() lowerCamelCase_ = self.get_image_processor() lowerCamelCase_ = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) processor_slow.save_pretrained(self.tmpdirname ) lowerCamelCase_ = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase__ ) lowerCamelCase_ = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) processor_fast.save_pretrained(self.tmpdirname ) lowerCamelCase_ = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , UpperCamelCase__ ) self.assertIsInstance(processor_fast.tokenizer , UpperCamelCase__ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , UpperCamelCase__ ) self.assertIsInstance(processor_fast.image_processor , UpperCamelCase__ ) def _lowerCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowerCamelCase_ = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowerCamelCase_ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) lowerCamelCase_ = self.get_image_processor(do_normalize=UpperCamelCase__ , padding_value=1.0 ) lowerCamelCase_ = CLIPProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCamelCase__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , UpperCamelCase__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCamelCase__ ) def _lowerCAmelCase ( self ) -> int: '''simple docstring''' lowerCamelCase_ = self.get_image_processor() lowerCamelCase_ = self.get_tokenizer() lowerCamelCase_ = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) lowerCamelCase_ = self.prepare_image_inputs() lowerCamelCase_ = image_processor(UpperCamelCase__ , return_tensors='''np''' ) lowerCamelCase_ = processor(images=UpperCamelCase__ , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def _lowerCAmelCase ( self ) -> List[str]: '''simple docstring''' lowerCamelCase_ = self.get_image_processor() lowerCamelCase_ = self.get_tokenizer() lowerCamelCase_ = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) lowerCamelCase_ = '''lower newer''' lowerCamelCase_ = processor(text=UpperCamelCase__ ) lowerCamelCase_ = tokenizer(UpperCamelCase__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _lowerCAmelCase ( self ) -> Tuple: '''simple docstring''' lowerCamelCase_ = self.get_image_processor() lowerCamelCase_ = self.get_tokenizer() lowerCamelCase_ = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) lowerCamelCase_ = '''lower newer''' lowerCamelCase_ = self.prepare_image_inputs() lowerCamelCase_ = processor(text=UpperCamelCase__ , images=UpperCamelCase__ ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(UpperCamelCase__ ): processor() def _lowerCAmelCase ( self ) -> int: '''simple docstring''' lowerCamelCase_ = self.get_image_processor() lowerCamelCase_ = self.get_tokenizer() lowerCamelCase_ = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) lowerCamelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowerCamelCase_ = processor.batch_decode(UpperCamelCase__ ) lowerCamelCase_ = tokenizer.batch_decode(UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) def _lowerCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowerCamelCase_ = self.get_image_processor() lowerCamelCase_ = self.get_tokenizer() lowerCamelCase_ = CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) lowerCamelCase_ = '''lower newer''' lowerCamelCase_ = self.prepare_image_inputs() lowerCamelCase_ = processor(text=UpperCamelCase__ , images=UpperCamelCase__ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
66
0
from __future__ import annotations import math from collections import Counter from string import ascii_lowercase def UpperCAmelCase__ ( lowerCamelCase_ : str ): __a , __a : Optional[int] = analyze_text(UpperCAmelCase_ ) __a : Optional[int] = list(' ' + ascii_lowercase ) # what is our total sum of probabilities. __a : Tuple = sum(single_char_strings.values() ) # one length string __a : List[str] = 0 # for each alpha we go in our dict and if it is in it we calculate entropy for ch in my_alphas: if ch in single_char_strings: __a : str = single_char_strings[ch] __a : str = my_str / all_sum my_fir_sum += prob * math.loga(UpperCAmelCase_ ) # entropy formula. # print entropy print(f'''{round(-1 * my_fir_sum ):.1f}''' ) # two len string __a : Union[str, Any] = sum(two_char_strings.values() ) __a : Optional[Any] = 0 # for each alpha (two in size) calculate entropy. for cha in my_alphas: for cha in my_alphas: __a : Tuple = cha + cha if sequence in two_char_strings: __a : Union[str, Any] = two_char_strings[sequence] __a : Any = int(UpperCAmelCase_ ) / all_sum my_sec_sum += prob * math.loga(UpperCAmelCase_ ) # print second entropy print(f'''{round(-1 * my_sec_sum ):.1f}''' ) # print the difference between them print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' ) def UpperCAmelCase__ ( lowerCamelCase_ : str ): __a : int = Counter() # type: ignore __a : List[Any] = Counter() # type: ignore single_char_strings[text[-1]] += 1 # first case when we have space at start. two_char_strings[" " + text[0]] += 1 for i in range(0 , len(UpperCAmelCase_ ) - 1 ): single_char_strings[text[i]] += 1 two_char_strings[text[i : i + 2]] += 1 return single_char_strings, two_char_strings def UpperCAmelCase__ ( ): import doctest doctest.testmod() # text = ( # "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark " # "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest " # "jointure saw horrible. He private he on be imagine suppose. Fertile " # "beloved evident through no service elderly is. Blind there if every no so " # "at. Own neglected you preferred way sincerity delivered his attempted. To " # "of message cottage windows do besides against uncivil. Delightful " # "unreserved impossible few estimating men favourable see entreaties. She " # "propriety immediate was improving. He or entrance humoured likewise " # "moderate. Much nor game son say feel. Fat make met can must form into " # "gate. Me we offending prevailed discovery. " # ) # calculate_prob(text) if __name__ == "__main__": main()
47
'''simple docstring''' import os import sys import unittest a_ : Optional[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path a_ : Tuple = os.path.join(git_repo_path, """src""", """transformers""") a_ : List[Any] = """ {0} = None """ a_ : Optional[Any] = """ class {0}(metaclass=DummyObject): _backends = {1} def __init__(self, *args, **kwargs): requires_backends(self, {1}) """ a_ : str = """ def {0}(*args, **kwargs): requires_backends({0}, {1}) """ class snake_case ( unittest.TestCase ): """simple docstring""" def snake_case ( self ): """simple docstring""" lowerCamelCase_ = find_backend(" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")" ) self.assertIsNone(UpperCamelCase ) lowerCamelCase_ = find_backend(" if not is_tokenizers_available():" ) self.assertEqual(UpperCamelCase , "tokenizers" ) lowerCamelCase_ = find_backend(" if not is_tensorflow_text_available():" ) self.assertEqual(UpperCamelCase , "tensorflow_text" ) lowerCamelCase_ = find_backend(" if not (is_sentencepiece_available() and is_tokenizers_available()):" ) self.assertEqual(UpperCamelCase , "sentencepiece_and_tokenizers" ) lowerCamelCase_ = find_backend( " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" ) self.assertEqual(UpperCamelCase , "sentencepiece_and_tensorflow_text" ) lowerCamelCase_ = find_backend( " if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):" ) self.assertEqual(UpperCamelCase , "sentencepiece_and_tokenizers_and_vision" ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("torch" , UpperCamelCase ) self.assertIn("tensorflow_text" , UpperCamelCase ) self.assertIn("sentencepiece_and_tokenizers" , UpperCamelCase ) # Likewise, we can't assert on the exact content of a key self.assertIn("BertModel" , objects["torch"] ) self.assertIn("TFBertModel" , objects["tf"] ) self.assertIn("FlaxBertModel" , objects["flax"] ) self.assertIn("BertModel" , objects["torch"] ) self.assertIn("TFBertTokenizer" , objects["tensorflow_text"] ) self.assertIn("convert_slow_tokenizer" , objects["sentencepiece_and_tokenizers"] ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = create_dummy_object("CONSTANT" , "'torch'" ) self.assertEqual(UpperCamelCase , "\nCONSTANT = None\n" ) lowerCamelCase_ = create_dummy_object("function" , "'torch'" ) self.assertEqual( UpperCamelCase , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" ) lowerCamelCase_ = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n" lowerCamelCase_ = create_dummy_object("FakeClass" , "'torch'" ) self.assertEqual(UpperCamelCase , UpperCamelCase ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n" lowerCamelCase_ = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} ) self.assertEqual(dummy_files["torch"] , UpperCamelCase )
675
0
"""simple docstring""" import torch from diffusers import KDPMaDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class a ( _lowerCamelCase ): """simple docstring""" UpperCAmelCase = (KDPMaDiscreteScheduler,) UpperCAmelCase = 1_0 def UpperCamelCase ( self: Dict , **UpperCamelCase: Union[str, Any] ): """simple docstring""" A__ = { """num_train_timesteps""": 11_00, """beta_start""": 0.0_001, """beta_end""": 0.02, """beta_schedule""": """linear""", } config.update(**UpperCamelCase ) return config def UpperCamelCase ( self: List[str] ): """simple docstring""" for timesteps in [10, 50, 1_00, 10_00]: self.check_over_configs(num_train_timesteps=UpperCamelCase ) def UpperCamelCase ( self: List[str] ): """simple docstring""" for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ): self.check_over_configs(beta_start=UpperCamelCase , beta_end=UpperCamelCase ) def UpperCamelCase ( self: Optional[Any] ): """simple docstring""" for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=UpperCamelCase ) def UpperCamelCase ( self: Optional[int] ): """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=UpperCamelCase ) def UpperCamelCase ( self: Tuple ): """simple docstring""" A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config(prediction_type="""v_prediction""" ) A__ = scheduler_class(**UpperCamelCase ) scheduler.set_timesteps(self.num_inference_steps ) A__ = self.dummy_model() A__ = self.dummy_sample_deter * scheduler.init_noise_sigma A__ = sample.to(UpperCamelCase ) for i, t in enumerate(scheduler.timesteps ): A__ = scheduler.scale_model_input(UpperCamelCase , UpperCamelCase ) A__ = model(UpperCamelCase , UpperCamelCase ) A__ = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase ) A__ = output.prev_sample A__ = torch.sum(torch.abs(UpperCamelCase ) ) A__ = torch.mean(torch.abs(UpperCamelCase ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 4.6_9_3_4e-0_7 ) < 1e-2 assert abs(result_mean.item() - 6.1_1_1_2e-1_0 ) < 1e-3 else: # CUDA assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2e-0_7 ) < 1e-2 assert abs(result_mean.item() - 0.0_002 ) < 1e-3 def UpperCamelCase ( self: str ): """simple docstring""" if torch_device == "mps": return A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config() A__ = scheduler_class(**UpperCamelCase ) scheduler.set_timesteps(self.num_inference_steps ) A__ = self.dummy_model() A__ = self.dummy_sample_deter * scheduler.init_noise_sigma A__ = sample.to(UpperCamelCase ) for i, t in enumerate(scheduler.timesteps ): A__ = scheduler.scale_model_input(UpperCamelCase , UpperCamelCase ) A__ = model(UpperCamelCase , UpperCamelCase ) A__ = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase ) A__ = output.prev_sample A__ = torch.sum(torch.abs(UpperCamelCase ) ) A__ = torch.mean(torch.abs(UpperCamelCase ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 20.4_125 ) < 1e-2 assert abs(result_mean.item() - 0.0_266 ) < 1e-3 else: # CUDA assert abs(result_sum.item() - 20.4_125 ) < 1e-2 assert abs(result_mean.item() - 0.0_266 ) < 1e-3 def UpperCamelCase ( self: Any ): """simple docstring""" if torch_device == "mps": return A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config() A__ = scheduler_class(**UpperCamelCase ) scheduler.set_timesteps(self.num_inference_steps , device=UpperCamelCase ) A__ = self.dummy_model() A__ = self.dummy_sample_deter.to(UpperCamelCase ) * scheduler.init_noise_sigma for t in scheduler.timesteps: A__ = scheduler.scale_model_input(UpperCamelCase , UpperCamelCase ) A__ = model(UpperCamelCase , UpperCamelCase ) A__ = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase ) A__ = output.prev_sample A__ = torch.sum(torch.abs(UpperCamelCase ) ) A__ = torch.mean(torch.abs(UpperCamelCase ) ) if str(UpperCamelCase ).startswith("""cpu""" ): # The following sum varies between 148 and 156 on mps. Why? assert abs(result_sum.item() - 20.4_125 ) < 1e-2 assert abs(result_mean.item() - 0.0_266 ) < 1e-3 else: # CUDA assert abs(result_sum.item() - 20.4_125 ) < 1e-2 assert abs(result_mean.item() - 0.0_266 ) < 1e-3
500
"""simple docstring""" import inspect import unittest from transformers import SegformerConfig, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_MAPPING, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerModel, ) from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import SegformerImageProcessor class a ( _lowerCamelCase ): """simple docstring""" def UpperCamelCase ( self: Dict ): """simple docstring""" A__ = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(UpperCamelCase , """hidden_sizes""" ) ) self.parent.assertTrue(hasattr(UpperCamelCase , """num_attention_heads""" ) ) self.parent.assertTrue(hasattr(UpperCamelCase , """num_encoder_blocks""" ) ) class a : """simple docstring""" def __init__( self: str , UpperCamelCase: Dict , UpperCamelCase: int=13 , UpperCamelCase: Optional[int]=64 , UpperCamelCase: List[Any]=3 , UpperCamelCase: List[Any]=4 , UpperCamelCase: Optional[Any]=[2, 2, 2, 2] , UpperCamelCase: Any=[8, 4, 2, 1] , UpperCamelCase: Optional[int]=[16, 32, 64, 1_28] , UpperCamelCase: str=[1, 4, 8, 16] , UpperCamelCase: Dict=[1, 2, 4, 8] , UpperCamelCase: Optional[Any]=True , UpperCamelCase: Union[str, Any]=True , UpperCamelCase: List[str]="gelu" , UpperCamelCase: Tuple=0.1 , UpperCamelCase: Optional[int]=0.1 , UpperCamelCase: Tuple=0.02 , UpperCamelCase: int=3 , UpperCamelCase: str=None , ): """simple docstring""" A__ = parent A__ = batch_size A__ = image_size A__ = num_channels A__ = num_encoder_blocks A__ = sr_ratios A__ = depths A__ = hidden_sizes A__ = downsampling_rates A__ = num_attention_heads A__ = is_training A__ = use_labels A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = initializer_range A__ = num_labels A__ = scope def UpperCamelCase ( self: Tuple ): """simple docstring""" A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) A__ = self.get_config() return config, pixel_values, labels def UpperCamelCase ( self: Optional[int] ): """simple docstring""" return SegformerConfig( image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def UpperCamelCase ( self: Tuple , UpperCamelCase: str , UpperCamelCase: Optional[Any] , UpperCamelCase: int ): """simple docstring""" A__ = SegformerModel(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() A__ = model(UpperCamelCase ) A__ = A__ = self.image_size // (self.downsampling_rates[-1] * 2) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) ) def UpperCamelCase ( self: List[str] , UpperCamelCase: Tuple , UpperCamelCase: str , UpperCamelCase: List[str] ): """simple docstring""" A__ = self.num_labels A__ = SegformerForSemanticSegmentation(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() A__ = model(UpperCamelCase ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) A__ = model(UpperCamelCase , labels=UpperCamelCase ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) self.parent.assertGreater(result.loss , 0.0 ) def UpperCamelCase ( self: Optional[Any] , UpperCamelCase: List[str] , UpperCamelCase: str , UpperCamelCase: Tuple ): """simple docstring""" A__ = 1 A__ = SegformerForSemanticSegmentation(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() A__ = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(UpperCamelCase ) A__ = model(UpperCamelCase , labels=UpperCamelCase ) self.parent.assertGreater(result.loss , 0.0 ) def UpperCamelCase ( self: Tuple ): """simple docstring""" A__ = self.prepare_config_and_inputs() A__ , A__ , A__ = config_and_inputs A__ = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class a ( _lowerCamelCase, _lowerCamelCase, unittest.TestCase ): """simple docstring""" UpperCAmelCase = ( ( SegformerModel, SegformerForSemanticSegmentation, SegformerForImageClassification, ) if is_torch_available() else () ) UpperCAmelCase = ( { "feature-extraction": SegformerModel, "image-classification": SegformerForImageClassification, "image-segmentation": SegformerForSemanticSegmentation, } if is_torch_available() else {} ) UpperCAmelCase = True UpperCAmelCase = False UpperCAmelCase = False UpperCAmelCase = False def UpperCamelCase ( self: Any ): """simple docstring""" A__ = SegformerModelTester(self ) A__ = SegformerConfigTester(self , config_class=UpperCamelCase ) def UpperCamelCase ( self: Any ): """simple docstring""" self.config_tester.run_common_tests() def UpperCamelCase ( self: Dict ): """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase ) def UpperCamelCase ( self: int ): """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_binary_image_segmentation(*UpperCamelCase ) def UpperCamelCase ( self: Dict ): """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_segmentation(*UpperCamelCase ) @unittest.skip("""SegFormer does not use inputs_embeds""" ) def UpperCamelCase ( self: List[str] ): """simple docstring""" pass @unittest.skip("""SegFormer does not have get_input_embeddings method and get_output_embeddings methods""" ) def UpperCamelCase ( self: str ): """simple docstring""" pass def UpperCamelCase ( self: Optional[Any] ): """simple docstring""" A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(UpperCamelCase ) A__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A__ = [*signature.parameters.keys()] A__ = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , UpperCamelCase ) def UpperCamelCase ( self: List[Any] ): """simple docstring""" A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = True for model_class in self.all_model_classes: A__ = True A__ = False A__ = True A__ = model_class(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() with torch.no_grad(): A__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) ) A__ = outputs.attentions A__ = sum(self.model_tester.depths ) self.assertEqual(len(UpperCamelCase ) , UpperCamelCase ) # check that output_attentions also work using config del inputs_dict["output_attentions"] A__ = True A__ = model_class(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() with torch.no_grad(): A__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) ) A__ = outputs.attentions self.assertEqual(len(UpperCamelCase ) , UpperCamelCase ) # verify the first attentions (first block, first layer) A__ = (self.model_tester.image_size // 4) ** 2 A__ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , ) # verify the last attentions (last block, last layer) A__ = (self.model_tester.image_size // 32) ** 2 A__ = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2 self.assertListEqual( list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , ) A__ = len(UpperCamelCase ) # Check attention is always last and order is fine A__ = True A__ = True A__ = model_class(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() with torch.no_grad(): A__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) ) self.assertEqual(out_len + 1 , len(UpperCamelCase ) ) A__ = outputs.attentions self.assertEqual(len(UpperCamelCase ) , UpperCamelCase ) # verify the first attentions (first block, first layer) A__ = (self.model_tester.image_size // 4) ** 2 A__ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , ) def UpperCamelCase ( self: Union[str, Any] ): """simple docstring""" def check_hidden_states_output(UpperCamelCase: Dict , UpperCamelCase: Tuple , UpperCamelCase: Dict ): A__ = model_class(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() with torch.no_grad(): A__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) ) A__ = outputs.hidden_states A__ = self.model_tester.num_encoder_blocks self.assertEqual(len(UpperCamelCase ) , UpperCamelCase ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.hidden_sizes[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = True check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A__ = True check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def UpperCamelCase ( self: Union[str, Any] ): """simple docstring""" if not self.model_tester.is_training: return A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = True for model_class in self.all_model_classes: if model_class in get_values(UpperCamelCase ): continue A__ = model_class(UpperCamelCase ) model.to(UpperCamelCase ) model.train() A__ = self._prepare_for_class(UpperCamelCase , UpperCamelCase , return_labels=UpperCamelCase ) A__ = model(**UpperCamelCase ).loss loss.backward() @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def UpperCamelCase ( self: Optional[int] ): """simple docstring""" pass @slow def UpperCamelCase ( self: Optional[Any] ): """simple docstring""" for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ = SegformerModel.from_pretrained(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) def _snake_case ( ): A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch class a ( unittest.TestCase ): """simple docstring""" @slow def UpperCamelCase ( self: Any ): """simple docstring""" A__ = SegformerImageProcessor( image_scale=(5_12, 5_12) , keep_ratio=UpperCamelCase , align=UpperCamelCase , do_random_crop=UpperCamelCase ) A__ = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to( UpperCamelCase ) A__ = prepare_img() A__ = image_processor(images=UpperCamelCase , return_tensors="""pt""" ) A__ = encoded_inputs.pixel_values.to(UpperCamelCase ) with torch.no_grad(): A__ = model(UpperCamelCase ) A__ = torch.Size((1, model.config.num_labels, 1_28, 1_28) ) self.assertEqual(outputs.logits.shape , UpperCamelCase ) A__ = torch.tensor( [ [[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]], [[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]], [[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]], ] ).to(UpperCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , UpperCamelCase , atol=1e-4 ) ) @slow def UpperCamelCase ( self: Optional[Any] ): """simple docstring""" A__ = SegformerImageProcessor( image_scale=(5_12, 5_12) , keep_ratio=UpperCamelCase , align=UpperCamelCase , do_random_crop=UpperCamelCase ) A__ = SegformerForSemanticSegmentation.from_pretrained( """nvidia/segformer-b1-finetuned-cityscapes-1024-1024""" ).to(UpperCamelCase ) A__ = prepare_img() A__ = image_processor(images=UpperCamelCase , return_tensors="""pt""" ) A__ = encoded_inputs.pixel_values.to(UpperCamelCase ) with torch.no_grad(): A__ = model(UpperCamelCase ) A__ = torch.Size((1, model.config.num_labels, 1_28, 1_28) ) self.assertEqual(outputs.logits.shape , UpperCamelCase ) A__ = torch.tensor( [ [[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]], [[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]], [[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]], ] ).to(UpperCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , UpperCamelCase , atol=1e-1 ) ) @slow def UpperCamelCase ( self: List[Any] ): """simple docstring""" A__ = SegformerImageProcessor( image_scale=(5_12, 5_12) , keep_ratio=UpperCamelCase , align=UpperCamelCase , do_random_crop=UpperCamelCase ) A__ = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to( UpperCamelCase ) A__ = prepare_img() A__ = image_processor(images=UpperCamelCase , return_tensors="""pt""" ) A__ = encoded_inputs.pixel_values.to(UpperCamelCase ) with torch.no_grad(): A__ = model(UpperCamelCase ) A__ = outputs.logits.detach().cpu() A__ = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase , target_sizes=[(5_00, 3_00)] ) A__ = torch.Size((5_00, 3_00) ) self.assertEqual(segmentation[0].shape , UpperCamelCase ) A__ = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase ) A__ = torch.Size((1_28, 1_28) ) self.assertEqual(segmentation[0].shape , UpperCamelCase )
500
1
from __future__ import annotations import requests _snake_case : Any = set( 'approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports'.split() ) def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : int = 1, lowerCAmelCase_ : str = "new", lowerCAmelCase_ : list | None = None ): __lowerCAmelCase = wanted_data or [] if invalid_search_terms := ", ".join(sorted(set(lowerCAmelCase_ ) - valid_terms ) ): __lowerCAmelCase = F"""Invalid search term: {invalid_search_terms}""" raise ValueError(lowerCAmelCase_ ) __lowerCAmelCase = requests.get( F"""https://reddit.com/r/{subreddit}/{age}.json?limit={limit}""", headers={'User-agent': 'A random string'}, ) if response.status_code == 429: raise requests.HTTPError __lowerCAmelCase = response.json() if not wanted_data: return {id_: data["data"]["children"][id_] for id_ in range(lowerCAmelCase_ )} __lowerCAmelCase = {} for id_ in range(lowerCAmelCase_ ): __lowerCAmelCase = { item: data['data']['children'][id_]['data'][item] for item in wanted_data } return data_dict if __name__ == "__main__": # If you get Error 429, that means you are rate limited.Try after some time print(get_subreddit_data('learnpython', wanted_data=['title', 'url', 'selftext']))
53
import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : int = (IPNDMScheduler,) _lowercase : int = (('''num_inference_steps''', 50),) def __magic_name__ ( self : Any , **UpperCamelCase__ : Tuple): '''simple docstring''' snake_case__ = {"""num_train_timesteps""": 1_0_0_0} config.update(**UpperCamelCase__) return config def __magic_name__ ( self : int , UpperCamelCase__ : Dict=0 , **UpperCamelCase__ : int): '''simple docstring''' snake_case__ = dict(self.forward_default_kwargs) snake_case__ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__) snake_case__ = self.dummy_sample snake_case__ = 0.1 * sample snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: snake_case__ = self.get_scheduler_config(**UpperCamelCase__) snake_case__ = scheduler_class(**UpperCamelCase__) scheduler.set_timesteps(UpperCamelCase__) # copy over dummy past residuals snake_case__ = dummy_past_residuals[:] if time_step is None: snake_case__ = scheduler.timesteps[len(scheduler.timesteps) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(UpperCamelCase__) snake_case__ = scheduler_class.from_pretrained(UpperCamelCase__) new_scheduler.set_timesteps(UpperCamelCase__) # copy over dummy past residuals snake_case__ = dummy_past_residuals[:] snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical" snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical" def __magic_name__ ( self : List[Any]): '''simple docstring''' pass def __magic_name__ ( self : Tuple , UpperCamelCase__ : Union[str, Any]=0 , **UpperCamelCase__ : Tuple): '''simple docstring''' snake_case__ = dict(self.forward_default_kwargs) snake_case__ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__) snake_case__ = self.dummy_sample snake_case__ = 0.1 * sample snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: snake_case__ = self.get_scheduler_config() snake_case__ = scheduler_class(**UpperCamelCase__) scheduler.set_timesteps(UpperCamelCase__) # copy over dummy past residuals (must be after setting timesteps) snake_case__ = dummy_past_residuals[:] if time_step is None: snake_case__ = scheduler.timesteps[len(scheduler.timesteps) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(UpperCamelCase__) snake_case__ = scheduler_class.from_pretrained(UpperCamelCase__) # copy over dummy past residuals new_scheduler.set_timesteps(UpperCamelCase__) # copy over dummy past residual (must be after setting timesteps) snake_case__ = dummy_past_residuals[:] snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical" snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical" def __magic_name__ ( self : Union[str, Any] , **UpperCamelCase__ : Dict): '''simple docstring''' snake_case__ = self.scheduler_classes[0] snake_case__ = self.get_scheduler_config(**UpperCamelCase__) snake_case__ = scheduler_class(**UpperCamelCase__) snake_case__ = 1_0 snake_case__ = self.dummy_model() snake_case__ = self.dummy_sample_deter scheduler.set_timesteps(UpperCamelCase__) for i, t in enumerate(scheduler.timesteps): snake_case__ = model(UpperCamelCase__ , UpperCamelCase__) snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__).prev_sample for i, t in enumerate(scheduler.timesteps): snake_case__ = model(UpperCamelCase__ , UpperCamelCase__) snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__).prev_sample return sample def __magic_name__ ( self : Optional[int]): '''simple docstring''' snake_case__ = dict(self.forward_default_kwargs) snake_case__ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__) for scheduler_class in self.scheduler_classes: snake_case__ = self.get_scheduler_config() snake_case__ = scheduler_class(**UpperCamelCase__) snake_case__ = self.dummy_sample snake_case__ = 0.1 * sample if num_inference_steps is not None and hasattr(UpperCamelCase__ , """set_timesteps"""): scheduler.set_timesteps(UpperCamelCase__) elif num_inference_steps is not None and not hasattr(UpperCamelCase__ , """set_timesteps"""): snake_case__ = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] snake_case__ = dummy_past_residuals[:] snake_case__ = scheduler.timesteps[5] snake_case__ = scheduler.timesteps[6] snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample self.assertEqual(output_a.shape , sample.shape) self.assertEqual(output_a.shape , output_a.shape) snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample self.assertEqual(output_a.shape , sample.shape) self.assertEqual(output_a.shape , output_a.shape) def __magic_name__ ( self : Union[str, Any]): '''simple docstring''' for timesteps in [1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=UpperCamelCase__ , time_step=UpperCamelCase__) def __magic_name__ ( self : Dict): '''simple docstring''' for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0]): self.check_over_forward(num_inference_steps=UpperCamelCase__ , time_step=UpperCamelCase__) def __magic_name__ ( self : List[str]): '''simple docstring''' snake_case__ = self.full_loop() snake_case__ = torch.mean(torch.abs(UpperCamelCase__)) assert abs(result_mean.item() - 2_5_4_0_5_2_9) < 1_0
654
0
"""simple docstring""" from __future__ import annotations import math def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ) -> Dict: if len(UpperCamelCase_ ) != 2 or len(a[0] ) != 2 or len(UpperCamelCase_ ) != 2 or len(b[0] ) != 2: raise Exception("""Matrices are not 2x2""" ) __SCREAMING_SNAKE_CASE = [ [a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]], [a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]], ] return new_matrix def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ) -> int: return [ [matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )] for row in range(len(UpperCamelCase_ ) ) ] def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ) -> Tuple: return [ [matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )] for row in range(len(UpperCamelCase_ ) ) ] def _lowerCAmelCase ( UpperCamelCase_ ) -> Any: if len(UpperCamelCase_ ) % 2 != 0 or len(a[0] ) % 2 != 0: raise Exception("""Odd matrices are not supported!""" ) __SCREAMING_SNAKE_CASE = len(UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = matrix_length // 2 __SCREAMING_SNAKE_CASE = [[a[i][j] for j in range(UpperCamelCase_ , UpperCamelCase_ )] for i in range(UpperCamelCase_ )] __SCREAMING_SNAKE_CASE = [ [a[i][j] for j in range(UpperCamelCase_ , UpperCamelCase_ )] for i in range(UpperCamelCase_ , UpperCamelCase_ ) ] __SCREAMING_SNAKE_CASE = [[a[i][j] for j in range(UpperCamelCase_ )] for i in range(UpperCamelCase_ )] __SCREAMING_SNAKE_CASE = [[a[i][j] for j in range(UpperCamelCase_ )] for i in range(UpperCamelCase_ , UpperCamelCase_ )] return top_left, top_right, bot_left, bot_right def _lowerCAmelCase ( UpperCamelCase_ ) -> Tuple: return len(UpperCamelCase_ ), len(matrix[0] ) def _lowerCAmelCase ( UpperCamelCase_ ) -> Optional[int]: print("""\n""".join(str(UpperCamelCase_ ) for line in matrix ) ) def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ) -> Dict: if matrix_dimensions(UpperCamelCase_ ) == (2, 2): return default_matrix_multiplication(UpperCamelCase_ , UpperCamelCase_ ) __SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = split_matrix(UpperCamelCase_ ) __SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = split_matrix(UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = actual_strassen(UpperCamelCase_ , matrix_subtraction(UpperCamelCase_ , UpperCamelCase_ ) ) __SCREAMING_SNAKE_CASE = actual_strassen(matrix_addition(UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = actual_strassen(matrix_addition(UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = actual_strassen(UpperCamelCase_ , matrix_subtraction(UpperCamelCase_ , UpperCamelCase_ ) ) __SCREAMING_SNAKE_CASE = actual_strassen(matrix_addition(UpperCamelCase_ , UpperCamelCase_ ) , matrix_addition(UpperCamelCase_ , UpperCamelCase_ ) ) __SCREAMING_SNAKE_CASE = actual_strassen(matrix_subtraction(UpperCamelCase_ , UpperCamelCase_ ) , matrix_addition(UpperCamelCase_ , UpperCamelCase_ ) ) __SCREAMING_SNAKE_CASE = actual_strassen(matrix_subtraction(UpperCamelCase_ , UpperCamelCase_ ) , matrix_addition(UpperCamelCase_ , UpperCamelCase_ ) ) __SCREAMING_SNAKE_CASE = matrix_addition(matrix_subtraction(matrix_addition(UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ ) , UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = matrix_addition(UpperCamelCase_ , UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = matrix_addition(UpperCamelCase_ , UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = matrix_subtraction(matrix_subtraction(matrix_addition(UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ ) , UpperCamelCase_ ) # construct the new matrix from our 4 quadrants __SCREAMING_SNAKE_CASE = [] for i in range(len(UpperCamelCase_ ) ): new_matrix.append(top_left[i] + top_right[i] ) for i in range(len(UpperCamelCase_ ) ): new_matrix.append(bot_left[i] + bot_right[i] ) return new_matrix def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ) -> Union[str, Any]: if matrix_dimensions(UpperCamelCase_ )[1] != matrix_dimensions(UpperCamelCase_ )[0]: __SCREAMING_SNAKE_CASE = ( """Unable to multiply these matrices, please check the dimensions.\n""" f"Matrix A: {matrixa}\n" f"Matrix B: {matrixa}" ) raise Exception(UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = matrix_dimensions(UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = matrix_dimensions(UpperCamelCase_ ) if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]: return [matrixa, matrixa] __SCREAMING_SNAKE_CASE = max(*UpperCamelCase_ , *UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = int(math.pow(2 , math.ceil(math.loga(UpperCamelCase_ ) ) ) ) __SCREAMING_SNAKE_CASE = matrixa __SCREAMING_SNAKE_CASE = matrixa # Adding zeros to the matrices so that the arrays dimensions are the same and also # power of 2 for i in range(0 , UpperCamelCase_ ): if i < dimensiona[0]: for _ in range(dimensiona[1] , UpperCamelCase_ ): new_matrixa[i].append(0 ) else: new_matrixa.append([0] * maxim ) if i < dimensiona[0]: for _ in range(dimensiona[1] , UpperCamelCase_ ): new_matrixa[i].append(0 ) else: new_matrixa.append([0] * maxim ) __SCREAMING_SNAKE_CASE = actual_strassen(UpperCamelCase_ , UpperCamelCase_ ) # Removing the additional zeros for i in range(0 , UpperCamelCase_ ): if i < dimensiona[0]: for _ in range(dimensiona[1] , UpperCamelCase_ ): final_matrix[i].pop() else: final_matrix.pop() return final_matrix if __name__ == "__main__": __magic_name__ = [ [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 2, 3, 1], ] __magic_name__ = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]] print(strassen(matrixa, matrixa))
717
"""simple docstring""" def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ): return int((input_a, input_a).count(0 ) == 0 ) def _lowerCAmelCase ( ): assert and_gate(0 , 0 ) == 0 assert and_gate(0 , 1 ) == 0 assert and_gate(1 , 0 ) == 0 assert and_gate(1 , 1 ) == 1 if __name__ == "__main__": test_and_gate() print(and_gate(1, 0)) print(and_gate(0, 0)) print(and_gate(0, 1)) print(and_gate(1, 1))
248
0
"""simple docstring""" import numpy as np import torch from torch.nn import CrossEntropyLoss from transformers import AutoModelForCausalLM, AutoTokenizer import datasets from datasets import logging _a = """\ """ _a = """ Perplexity (PPL) is one of the most common metrics for evaluating language models. It is defined as the exponentiated average negative log-likelihood of a sequence. For more information, see https://huggingface.co/docs/transformers/perplexity """ _a = """ Args: model_id (str): model used for calculating Perplexity NOTE: Perplexity can only be calculated for causal language models. This includes models such as gpt2, causal variations of bert, causal versions of t5, and more (the full list can be found in the AutoModelForCausalLM documentation here: https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM ) input_texts (list of str): input text, each separate text snippet is one list entry. batch_size (int): the batch size to run texts through the model. Defaults to 16. add_start_token (bool): whether to add the start token to the texts, so the perplexity can include the probability of the first word. Defaults to True. device (str): device to run on, defaults to 'cuda' when available Returns: perplexity: dictionary containing the perplexity scores for the texts in the input list, as well as the mean perplexity. If one of the input texts is longer than the max input length of the model, then it is truncated to the max length for the perplexity computation. Examples: Example 1: >>> perplexity = datasets.load_metric(\"perplexity\") >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"] >>> results = perplexity.compute(model_id='gpt2', ... add_start_token=False, ... input_texts=input_texts) # doctest:+ELLIPSIS >>> print(list(results.keys())) ['perplexities', 'mean_perplexity'] >>> print(round(results[\"mean_perplexity\"], 2)) 78.22 >>> print(round(results[\"perplexities\"][0], 2)) 11.11 Example 2: >>> perplexity = datasets.load_metric(\"perplexity\") >>> input_texts = datasets.load_dataset(\"wikitext\", ... \"wikitext-2-raw-v1\", ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS [...] >>> input_texts = [s for s in input_texts if s!=''] >>> results = perplexity.compute(model_id='gpt2', ... input_texts=input_texts) # doctest:+ELLIPSIS >>> print(list(results.keys())) ['perplexities', 'mean_perplexity'] >>> print(round(results[\"mean_perplexity\"], 2)) 60.35 >>> print(round(results[\"perplexities\"][0], 2)) 81.12 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _UpperCAmelCase( datasets.Metric ): def UpperCAmelCase ( self) -> Any: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''input_texts''': datasets.Value('''string'''), }) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , ) def UpperCAmelCase ( self , __a , __a , __a = 16 , __a = True , __a=None) -> Dict: '''simple docstring''' if device is not None: assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." if device == "gpu": _UpperCamelCase = '''cuda''' else: _UpperCamelCase = '''cuda''' if torch.cuda.is_available() else '''cpu''' _UpperCamelCase = AutoModelForCausalLM.from_pretrained(__a) _UpperCamelCase = model.to(__a) _UpperCamelCase = AutoTokenizer.from_pretrained(__a) # if batch_size > 1 (which generally leads to padding being required), and # if there is not an already assigned pad_token, assign an existing # special token to also be the padding token if tokenizer.pad_token is None and batch_size > 1: _UpperCamelCase = list(tokenizer.special_tokens_map_extended.values()) # check that the model already has at least one special token defined assert ( len(__a) > 0 ), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1." # assign one of the special tokens to also be the pad token tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]}) if add_start_token: # leave room for <BOS> token to be added: assert ( tokenizer.bos_token is not None ), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False" _UpperCamelCase = model.config.max_length - 1 else: _UpperCamelCase = model.config.max_length _UpperCamelCase = tokenizer( __a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , return_tensors='''pt''' , return_attention_mask=__a , ).to(__a) _UpperCamelCase = encodings['''input_ids'''] _UpperCamelCase = encodings['''attention_mask'''] # check that each input is long enough: if add_start_token: assert torch.all(torch.ge(attn_masks.sum(1) , 1)), "Each input text must be at least one token long." else: assert torch.all( torch.ge(attn_masks.sum(1) , 2)), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings." _UpperCamelCase = [] _UpperCamelCase = CrossEntropyLoss(reduction='''none''') for start_index in logging.tqdm(range(0 , len(__a) , __a)): _UpperCamelCase = min(start_index + batch_size , len(__a)) _UpperCamelCase = encoded_texts[start_index:end_index] _UpperCamelCase = attn_masks[start_index:end_index] if add_start_token: _UpperCamelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0)).to(__a) _UpperCamelCase = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1) _UpperCamelCase = torch.cat( [torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa).to(__a), attn_mask] , dim=1) _UpperCamelCase = encoded_batch with torch.no_grad(): _UpperCamelCase = model(__a , attention_mask=__a).logits _UpperCamelCase = out_logits[..., :-1, :].contiguous() _UpperCamelCase = labels[..., 1:].contiguous() _UpperCamelCase = attn_mask[..., 1:].contiguous() _UpperCamelCase = torch.expa( (loss_fct(shift_logits.transpose(1 , 2) , __a) * shift_attention_mask_batch).sum(1) / shift_attention_mask_batch.sum(1)) ppls += perplexity_batch.tolist() return {"perplexities": ppls, "mean_perplexity": np.mean(__a)}
19
import math from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ : Dict =logging.get_logger(__name__) lowerCAmelCase__ : int ={ '''facebook/data2vec-base-960h''': '''https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json''', # See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio } class UpperCAmelCase_ ( UpperCamelCase_ ): '''simple docstring''' UpperCamelCase__ : Optional[Any] = '''data2vec-audio''' def __init__( self , _A=32 , _A=768 , _A=12 , _A=12 , _A=3_072 , _A="gelu" , _A=0.1 , _A=0.1 , _A=0.1 , _A=0.0 , _A=0.1 , _A=0.1 , _A=0.0_2 , _A=1e-5 , _A="gelu" , _A=(512, 512, 512, 512, 512, 512, 512) , _A=(5, 2, 2, 2, 2, 2, 2) , _A=(10, 3, 3, 3, 3, 2, 2) , _A=False , _A=16 , _A=19 , _A=5 , _A=0.0_5 , _A=10 , _A=2 , _A=0.0 , _A=10 , _A=0 , _A="sum" , _A=False , _A=False , _A=256 , _A=(512, 512, 512, 512, 1_500) , _A=(5, 3, 3, 1, 1) , _A=(1, 2, 3, 1, 1) , _A=512 , _A=0 , _A=1 , _A=2 , _A=False , _A=3 , _A=2 , _A=3 , _A=None , **_A , ): '''simple docstring''' super().__init__(**_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A ) __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = feat_extract_activation __SCREAMING_SNAKE_CASE = list(_A ) __SCREAMING_SNAKE_CASE = list(_A ) __SCREAMING_SNAKE_CASE = list(_A ) __SCREAMING_SNAKE_CASE = conv_bias __SCREAMING_SNAKE_CASE = num_conv_pos_embeddings __SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups __SCREAMING_SNAKE_CASE = conv_pos_kernel_size __SCREAMING_SNAKE_CASE = len(self.conv_dim ) __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = hidden_dropout __SCREAMING_SNAKE_CASE = attention_dropout __SCREAMING_SNAKE_CASE = activation_dropout __SCREAMING_SNAKE_CASE = feat_proj_dropout __SCREAMING_SNAKE_CASE = final_dropout __SCREAMING_SNAKE_CASE = layerdrop __SCREAMING_SNAKE_CASE = layer_norm_eps __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==' ' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =' f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,""" f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __SCREAMING_SNAKE_CASE = mask_time_prob __SCREAMING_SNAKE_CASE = mask_time_length __SCREAMING_SNAKE_CASE = mask_time_min_masks __SCREAMING_SNAKE_CASE = mask_feature_prob __SCREAMING_SNAKE_CASE = mask_feature_length __SCREAMING_SNAKE_CASE = mask_feature_min_masks # ctc loss __SCREAMING_SNAKE_CASE = ctc_loss_reduction __SCREAMING_SNAKE_CASE = ctc_zero_infinity # adapter __SCREAMING_SNAKE_CASE = add_adapter __SCREAMING_SNAKE_CASE = adapter_kernel_size __SCREAMING_SNAKE_CASE = adapter_stride __SCREAMING_SNAKE_CASE = num_adapter_layers __SCREAMING_SNAKE_CASE = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. __SCREAMING_SNAKE_CASE = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. __SCREAMING_SNAKE_CASE = list(_A ) __SCREAMING_SNAKE_CASE = list(_A ) __SCREAMING_SNAKE_CASE = list(_A ) __SCREAMING_SNAKE_CASE = xvector_output_dim @property def _A ( self ): '''simple docstring''' return math.prod(self.conv_stride )
148
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase__ : int = { "configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"], "convert_funnel_original_tf_checkpoint_to_pytorch": [], "tokenization_funnel": ["FunnelTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Optional[Any] = ["FunnelTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : List[str] = [ "FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST", "FunnelBaseModel", "FunnelForMaskedLM", "FunnelForMultipleChoice", "FunnelForPreTraining", "FunnelForQuestionAnswering", "FunnelForSequenceClassification", "FunnelForTokenClassification", "FunnelModel", "FunnelPreTrainedModel", "load_tf_weights_in_funnel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : List[str] = [ "TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST", "TFFunnelBaseModel", "TFFunnelForMaskedLM", "TFFunnelForMultipleChoice", "TFFunnelForPreTraining", "TFFunnelForQuestionAnswering", "TFFunnelForSequenceClassification", "TFFunnelForTokenClassification", "TFFunnelModel", "TFFunnelPreTrainedModel", ] if TYPE_CHECKING: from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig from .tokenization_funnel import FunnelTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_funnel_fast import FunnelTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_funnel import ( FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, FunnelBaseModel, FunnelForMaskedLM, FunnelForMultipleChoice, FunnelForPreTraining, FunnelForQuestionAnswering, FunnelForSequenceClassification, FunnelForTokenClassification, FunnelModel, FunnelPreTrainedModel, load_tf_weights_in_funnel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_funnel import ( TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, TFFunnelPreTrainedModel, ) else: import sys UpperCamelCase__ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
706
import inspect import os import unittest from pathlib import Path import torch import accelerate from accelerate.test_utils import execute_subprocess_async from accelerate.test_utils.testing import run_command class __snake_case ( unittest.TestCase ): __lowerCAmelCase : Dict = inspect.getfile(accelerate.test_utils ) __lowerCAmelCase : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_cli.py'] ) __lowerCAmelCase : Tuple = ['accelerate', 'launch'] __lowerCAmelCase : Union[str, Any] = Path.home() / '.cache/huggingface/accelerate' __lowerCAmelCase : List[str] = 'default_config.yaml' __lowerCAmelCase : List[Any] = config_folder / config_file __lowerCAmelCase : str = config_folder / '_default_config.yaml' __lowerCAmelCase : Optional[int] = Path('tests/test_configs' ) @classmethod def lowerCAmelCase__ ( cls): if cls.config_path.is_file(): cls.config_path.rename(cls.changed_path) @classmethod def lowerCAmelCase__ ( cls): if cls.changed_path.is_file(): cls.changed_path.rename(cls.config_path) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.base_cmd if torch.cuda.is_available() and (torch.cuda.device_count() > 1): cmd += ["--multi_gpu"] execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy()) def lowerCAmelCase__ ( self): for config in sorted(self.test_config_path.glob('**/*.yaml')): with self.subTest(config_file=_A): execute_subprocess_async( self.base_cmd + ['--config_file', str(_A), self.test_file_path] , env=os.environ.copy()) def lowerCAmelCase__ ( self): execute_subprocess_async(['accelerate', 'test'] , env=os.environ.copy()) class __snake_case ( unittest.TestCase ): __lowerCAmelCase : Optional[Any] = 'test-tpu' __lowerCAmelCase : str = 'us-central1-a' __lowerCAmelCase : Union[str, Any] = 'ls' __lowerCAmelCase : Union[str, Any] = ['accelerate', 'tpu-config'] __lowerCAmelCase : Union[str, Any] = 'cd /usr/share' __lowerCAmelCase : List[Any] = 'tests/test_samples/test_command_file.sh' __lowerCAmelCase : Dict = 'Running gcloud compute tpus tpu-vm ssh' def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + ['--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug'] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + [ '--config_file', 'tests/test_configs/0_12_0.yaml', '--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug', ] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--debug'] , return_stdout=_A) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--debug'] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + [ '--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--command', 'echo "Hello World"', '--debug', ] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command_file', self.command_file, '--debug'] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + [ '--config_file', 'tests/test_configs/0_12_0.yaml', '--command_file', self.command_file, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug', ] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--debug'] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , ) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = run_command( self.cmd + [ '--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--accelerate_version', '12.0.0', '--debug', ] , return_stdout=_A , ) self.assertIn( f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , )
620
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowerCAmelCase : List[str] = { '''configuration_roformer''': ['''ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoFormerConfig''', '''RoFormerOnnxConfig'''], '''tokenization_roformer''': ['''RoFormerTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Any = ['''RoFormerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : List[Any] = [ '''ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''RoFormerForCausalLM''', '''RoFormerForMaskedLM''', '''RoFormerForMultipleChoice''', '''RoFormerForQuestionAnswering''', '''RoFormerForSequenceClassification''', '''RoFormerForTokenClassification''', '''RoFormerLayer''', '''RoFormerModel''', '''RoFormerPreTrainedModel''', '''load_tf_weights_in_roformer''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : List[str] = [ '''TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFRoFormerForCausalLM''', '''TFRoFormerForMaskedLM''', '''TFRoFormerForMultipleChoice''', '''TFRoFormerForQuestionAnswering''', '''TFRoFormerForSequenceClassification''', '''TFRoFormerForTokenClassification''', '''TFRoFormerLayer''', '''TFRoFormerModel''', '''TFRoFormerPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Optional[Any] = [ '''FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FlaxRoFormerForMaskedLM''', '''FlaxRoFormerForMultipleChoice''', '''FlaxRoFormerForQuestionAnswering''', '''FlaxRoFormerForSequenceClassification''', '''FlaxRoFormerForTokenClassification''', '''FlaxRoFormerModel''', '''FlaxRoFormerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig from .tokenization_roformer import RoFormerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roformer_fast import RoFormerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roformer import ( ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, RoFormerForCausalLM, RoFormerForMaskedLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, RoFormerLayer, RoFormerModel, RoFormerPreTrainedModel, load_tf_weights_in_roformer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roformer import ( TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerLayer, TFRoFormerModel, TFRoFormerPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roformer import ( FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, FlaxRoFormerPreTrainedModel, ) else: import sys __lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
58
'''simple docstring''' def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" if height >= 1: move_tower(height - 1 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) move_disk(lowerCamelCase_ , lowerCamelCase_ ) move_tower(height - 1 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" print("moving disk from" , lowerCamelCase_ , "to" , lowerCamelCase_ ) def UpperCAmelCase_ ( ): """simple docstring""" lowerCAmelCase__ : List[str] = int(input("Height of hanoi: " ).strip() ) move_tower(lowerCamelCase_ , "A" , "B" , "C" ) if __name__ == "__main__": main()
378
0
'''simple docstring''' from random import randint, random def UpperCAmelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : int = 5 , ): lowerCamelCase : Union[str, Any] = [[-1] * number_of_cells] # Create a highway without any car lowerCamelCase : Any = 0 lowerCamelCase : int = max(UpperCAmelCase__ , 0) while i < number_of_cells: lowerCamelCase : List[Any] = ( randint(0 , UpperCAmelCase__) if random_speed else initial_speed ) # Place the cars i += ( randint(1 , max_speed * 2) if random_frequency else frequency ) # Arbitrary number, may need tuning return highway def UpperCAmelCase ( UpperCAmelCase__ : list , UpperCAmelCase__ : int): lowerCamelCase : Optional[Any] = 0 lowerCamelCase : Tuple = highway_now[car_index + 1 :] for cell in range(len(UpperCAmelCase__)): # May need a better name for this if cells[cell] != -1: # If the cell is not empty then return distance # we have the distance we wanted distance += 1 # Here if the car is near the end of the highway return distance + get_distance(UpperCAmelCase__ , -1) def UpperCAmelCase ( UpperCAmelCase__ : list , UpperCAmelCase__ : float , UpperCAmelCase__ : int): lowerCamelCase : List[str] = len(UpperCAmelCase__) # Beforce calculations, the highway is empty lowerCamelCase : Union[str, Any] = [-1] * number_of_cells for car_index in range(UpperCAmelCase__): if highway_now[car_index] != -1: # Add 1 to the current speed of the car and cap the speed lowerCamelCase : Union[str, Any] = min(highway_now[car_index] + 1 , UpperCAmelCase__) # Number of empty cell before the next car lowerCamelCase : List[str] = get_distance(UpperCAmelCase__ , UpperCAmelCase__) - 1 # We can't have the car causing an accident lowerCamelCase : List[str] = min(next_highway[car_index] , UpperCAmelCase__) if random() < probability: # Randomly, a driver will slow down lowerCamelCase : Any = max(next_highway[car_index] - 1 , 0) return next_highway def UpperCAmelCase ( UpperCAmelCase__ : list , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : int): lowerCamelCase : str = len(highway[0]) for i in range(UpperCAmelCase__): lowerCamelCase : List[str] = update(highway[i] , UpperCAmelCase__ , UpperCAmelCase__) lowerCamelCase : Dict = [-1] * number_of_cells for car_index in range(UpperCAmelCase__): lowerCamelCase : Dict = next_speeds_calculated[car_index] if speed != -1: # Change the position based on the speed (with % to create the loop) lowerCamelCase : Union[str, Any] = (car_index + speed) % number_of_cells # Commit the change of position lowerCamelCase : Union[str, Any] = speed highway.append(UpperCAmelCase__) return highway if __name__ == "__main__": import doctest doctest.testmod()
449
'''simple docstring''' from manim import * class __snake_case ( a__): def UpperCAmelCase_ ( self ): """simple docstring""" lowerCamelCase : Optional[int] = Rectangle(height=0.5, width=0.5 ) lowerCamelCase : List[Any] = Rectangle(height=0.46, width=0.46 ).set_stroke(width=0 ) lowerCamelCase : List[str] = [mem.copy() for i in range(6 )] lowerCamelCase : List[Any] = [mem.copy() for i in range(6 )] lowerCamelCase : str = VGroup(*A ).arrange(A, buff=0 ) lowerCamelCase : Any = VGroup(*A ).arrange(A, buff=0 ) lowerCamelCase : Dict = VGroup(A, A ).arrange(A, buff=0 ) lowerCamelCase : str = Text('CPU', font_size=24 ) lowerCamelCase : int = Group(A, A ).arrange(A, buff=0.5, aligned_edge=A ) cpu.move_to([-2.5, -0.5, 0] ) self.add(A ) lowerCamelCase : Optional[int] = [mem.copy() for i in range(1 )] lowerCamelCase : Union[str, Any] = VGroup(*A ).arrange(A, buff=0 ) lowerCamelCase : Optional[Any] = Text('GPU', font_size=24 ) lowerCamelCase : Tuple = Group(A, A ).arrange(A, buff=0.5, aligned_edge=A ) gpu.align_to(A, A ) gpu.set_x(gpu.get_x() - 1 ) self.add(A ) lowerCamelCase : Optional[int] = [mem.copy() for i in range(6 )] lowerCamelCase : Optional[Any] = VGroup(*A ).arrange(A, buff=0 ) lowerCamelCase : Any = Text('Model', font_size=24 ) lowerCamelCase : Tuple = Group(A, A ).arrange(A, buff=0.5, aligned_edge=A ) model.move_to([3, -1.0, 0] ) self.play( Create(A, run_time=1 ), Create(A, run_time=1 ), Create(A, run_time=1 ), ) lowerCamelCase : str = MarkupText( F'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''', font_size=24, ) lowerCamelCase : Any = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) lowerCamelCase : Tuple = MarkupText( F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''', font_size=18, ) key_text.move_to([-5, 2.4, 0] ) step_a.move_to([2, 2, 0] ) self.play(Write(A, run_time=2.5 ), Write(A ), Write(A ) ) self.add(A ) lowerCamelCase : str = [] lowerCamelCase : Optional[int] = [] lowerCamelCase : Optional[Any] = [] for i, rect in enumerate(A ): lowerCamelCase : List[str] = Rectangle(height=0.46, width=0.46 ).set_stroke(width=0.0 ).set_fill(A, opacity=0.7 ) cpu_target.move_to(A ) cpu_target.generate_target() lowerCamelCase : int = 0.46 / 4 lowerCamelCase : Optional[int] = 0.46 / 3 if i == 0: cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ), buff=0.02, direction=A ) cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 ) elif i == 3: cpu_target.target.next_to(cpu_targs[0].target, direction=A, buff=0.0 ) else: cpu_target.target.next_to(cpu_targs[i - 1].target, direction=A, buff=0.0 ) cpu_targs.append(A ) first_animations.append(rect.animate(run_time=0.5 ).set_stroke(A ) ) second_animations.append(MoveToTarget(A, run_time=1.5 ) ) self.play(*A ) self.play(*A ) self.wait()
449
1
import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 __a = sys.version_info >= (3, 10) def __lowercase ( _UpperCamelCase=None, _UpperCamelCase=None ) ->Optional[Any]: """simple docstring""" return field(default_factory=lambda: default, metadata=_UpperCamelCase ) @dataclass class __SCREAMING_SNAKE_CASE : A : int A : float A : str A : bool @dataclass class __SCREAMING_SNAKE_CASE : A : int = 42 A : str = field(default='toto' , metadata={'help': 'help message'} ) @dataclass class __SCREAMING_SNAKE_CASE : A : bool = False A : bool = True A : Optional[bool] = None class __SCREAMING_SNAKE_CASE ( A__ ): A : str = 'titi' A : List[Any] = 'toto' class __SCREAMING_SNAKE_CASE ( A__ ): A : List[str] = 'titi' A : str = 'toto' A : List[str] = 42 @dataclass class __SCREAMING_SNAKE_CASE : A : BasicEnum = "toto" def __lowerCamelCase ( self ): lowercase : str = BasicEnum(self.foo ) @dataclass class __SCREAMING_SNAKE_CASE : A : MixedTypeEnum = "toto" def __lowerCamelCase ( self ): lowercase : Dict = MixedTypeEnum(self.foo ) @dataclass class __SCREAMING_SNAKE_CASE : A : Optional[int] = None A : Optional[float] = field(default=A__ , metadata={'help': 'help message'} ) A : Optional[str] = None A : Optional[List[str]] = list_field(default=[] ) A : Optional[List[int]] = list_field(default=[] ) @dataclass class __SCREAMING_SNAKE_CASE : A : List[int] = list_field(default=[] ) A : List[int] = list_field(default=[1, 2, 3] ) A : List[str] = list_field(default=['Hallo', 'Bonjour', 'Hello'] ) A : List[float] = list_field(default=[0.1, 0.2, 0.3] ) @dataclass class __SCREAMING_SNAKE_CASE : A : List[int] = field() A : str = field() A : BasicEnum = field() def __lowerCamelCase ( self ): lowercase : Tuple = BasicEnum(self.required_enum ) @dataclass class __SCREAMING_SNAKE_CASE : A : int A : "BasicEnum" = field() A : "Optional[bool]" = None A : "str" = field(default='toto' , metadata={'help': 'help message'} ) A : "List[str]" = list_field(default=['Hallo', 'Bonjour', 'Hello'] ) if is_python_no_less_than_3_10: @dataclass class __SCREAMING_SNAKE_CASE : A : bool = False A : bool = True A : bool | None = None @dataclass class __SCREAMING_SNAKE_CASE : A : int | None = None A : float | None = field(default=A__ , metadata={'help': 'help message'} ) A : str | None = None A : list[str] | None = list_field(default=[] ) A : list[int] | None = list_field(default=[] ) class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): lowercase : Tuple = {k: v for k, v in vars(SCREAMING_SNAKE_CASE__ ).items() if k != '''container'''} lowercase : Optional[int] = {k: v for k, v in vars(SCREAMING_SNAKE_CASE__ ).items() if k != '''container'''} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get('''choices''' , SCREAMING_SNAKE_CASE__ ) and yy.get('''choices''' , SCREAMING_SNAKE_CASE__ ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx['''type'''](SCREAMING_SNAKE_CASE__ ) , yy['''type'''](SCREAMING_SNAKE_CASE__ ) ) del xx["type"], yy["type"] self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): lowercase : Tuple = HfArgumentParser(SCREAMING_SNAKE_CASE__ ) lowercase : List[Any] = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ ) expected.add_argument('''--bar''' , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ ) expected.add_argument('''--baz''' , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ ) expected.add_argument('''--flag''' , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , const=SCREAMING_SNAKE_CASE__ , nargs='''?''' ) self.argparsersEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowercase : int = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5'''] ((lowercase) , ) : str = parser.parse_args_into_dataclasses(SCREAMING_SNAKE_CASE__ , look_for_args_file=SCREAMING_SNAKE_CASE__ ) self.assertFalse(example.flag ) def __lowerCamelCase ( self ): lowercase : str = HfArgumentParser(SCREAMING_SNAKE_CASE__ ) lowercase : int = argparse.ArgumentParser() expected.add_argument('''--foo''' , default=42 , type=SCREAMING_SNAKE_CASE__ ) expected.add_argument('''--baz''' , default='''toto''' , type=SCREAMING_SNAKE_CASE__ , help='''help message''' ) self.argparsersEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): lowercase : List[Any] = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , const=SCREAMING_SNAKE_CASE__ , nargs='''?''' ) expected.add_argument('''--baz''' , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , const=SCREAMING_SNAKE_CASE__ , nargs='''?''' ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument('''--no_baz''' , action='''store_false''' , default=SCREAMING_SNAKE_CASE__ , dest='''baz''' ) expected.add_argument('''--opt''' , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ ) lowercase : List[str] = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(SCREAMING_SNAKE_CASE__ ) for dataclass_type in dataclass_types: lowercase : int = HfArgumentParser(SCREAMING_SNAKE_CASE__ ) self.argparsersEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowercase : Dict = parser.parse_args([] ) self.assertEqual(SCREAMING_SNAKE_CASE__ , Namespace(foo=SCREAMING_SNAKE_CASE__ , baz=SCREAMING_SNAKE_CASE__ , opt=SCREAMING_SNAKE_CASE__ ) ) lowercase : Dict = parser.parse_args(['''--foo''', '''--no_baz'''] ) self.assertEqual(SCREAMING_SNAKE_CASE__ , Namespace(foo=SCREAMING_SNAKE_CASE__ , baz=SCREAMING_SNAKE_CASE__ , opt=SCREAMING_SNAKE_CASE__ ) ) lowercase : Optional[int] = parser.parse_args(['''--foo''', '''--baz'''] ) self.assertEqual(SCREAMING_SNAKE_CASE__ , Namespace(foo=SCREAMING_SNAKE_CASE__ , baz=SCREAMING_SNAKE_CASE__ , opt=SCREAMING_SNAKE_CASE__ ) ) lowercase : Any = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] ) self.assertEqual(SCREAMING_SNAKE_CASE__ , Namespace(foo=SCREAMING_SNAKE_CASE__ , baz=SCREAMING_SNAKE_CASE__ , opt=SCREAMING_SNAKE_CASE__ ) ) lowercase : Tuple = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] ) self.assertEqual(SCREAMING_SNAKE_CASE__ , Namespace(foo=SCREAMING_SNAKE_CASE__ , baz=SCREAMING_SNAKE_CASE__ , opt=SCREAMING_SNAKE_CASE__ ) ) def __lowerCamelCase ( self ): lowercase : int = HfArgumentParser(SCREAMING_SNAKE_CASE__ ) lowercase : Optional[int] = argparse.ArgumentParser() expected.add_argument( '''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , ) self.argparsersEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowercase : Optional[Any] = parser.parse_args([] ) self.assertEqual(args.foo , '''toto''' ) lowercase : Tuple = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) lowercase : Tuple = parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo , '''titi''' ) lowercase : List[Any] = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) lowercase : List[Any] = parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo , 42 ) lowercase : Union[str, Any] = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def __lowerCamelCase ( self ): @dataclass class __SCREAMING_SNAKE_CASE : A : Literal["titi", "toto", 42] = "toto" lowercase : str = HfArgumentParser(SCREAMING_SNAKE_CASE__ ) lowercase : str = argparse.ArgumentParser() expected.add_argument( '''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , ) self.argparsersEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowercase : List[str] = parser.parse_args([] ) self.assertEqual(args.foo , '''toto''' ) lowercase : Dict = parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo , '''titi''' ) lowercase : Tuple = parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo , 42 ) def __lowerCamelCase ( self ): lowercase : Dict = HfArgumentParser(SCREAMING_SNAKE_CASE__ ) lowercase : Optional[Any] = argparse.ArgumentParser() expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=SCREAMING_SNAKE_CASE__ ) expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=SCREAMING_SNAKE_CASE__ ) expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=SCREAMING_SNAKE_CASE__ ) expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=SCREAMING_SNAKE_CASE__ ) self.argparsersEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowercase : str = parser.parse_args([] ) self.assertEqual( SCREAMING_SNAKE_CASE__ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , ) lowercase : List[Any] = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() ) self.assertEqual(SCREAMING_SNAKE_CASE__ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) ) def __lowerCamelCase ( self ): lowercase : str = argparse.ArgumentParser() expected.add_argument('''--foo''' , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ ) expected.add_argument('''--bar''' , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , help='''help message''' ) expected.add_argument('''--baz''' , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ ) expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=SCREAMING_SNAKE_CASE__ ) expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=SCREAMING_SNAKE_CASE__ ) lowercase : Tuple = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(SCREAMING_SNAKE_CASE__ ) for dataclass_type in dataclass_types: lowercase : Any = HfArgumentParser(SCREAMING_SNAKE_CASE__ ) self.argparsersEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowercase : Union[str, Any] = parser.parse_args([] ) self.assertEqual(SCREAMING_SNAKE_CASE__ , Namespace(foo=SCREAMING_SNAKE_CASE__ , bar=SCREAMING_SNAKE_CASE__ , baz=SCREAMING_SNAKE_CASE__ , ces=[] , des=[] ) ) lowercase : Dict = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() ) self.assertEqual(SCREAMING_SNAKE_CASE__ , Namespace(foo=12 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) ) def __lowerCamelCase ( self ): lowercase : Tuple = HfArgumentParser(SCREAMING_SNAKE_CASE__ ) lowercase : Tuple = argparse.ArgumentParser() expected.add_argument('''--required_list''' , nargs='''+''' , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ ) expected.add_argument('''--required_str''' , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ ) expected.add_argument( '''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=SCREAMING_SNAKE_CASE__ , ) self.argparsersEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): lowercase : List[Any] = HfArgumentParser(SCREAMING_SNAKE_CASE__ ) lowercase : Union[str, Any] = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ ) expected.add_argument( '''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=SCREAMING_SNAKE_CASE__ , ) expected.add_argument('''--opt''' , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ ) expected.add_argument('''--baz''' , default='''toto''' , type=SCREAMING_SNAKE_CASE__ , help='''help message''' ) expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=SCREAMING_SNAKE_CASE__ ) self.argparsersEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): lowercase : Any = HfArgumentParser(SCREAMING_SNAKE_CASE__ ) lowercase : Tuple = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, } lowercase : Any = parser.parse_dict(SCREAMING_SNAKE_CASE__ )[0] lowercase : List[str] = BasicExample(**SCREAMING_SNAKE_CASE__ ) self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): lowercase : List[Any] = HfArgumentParser(SCREAMING_SNAKE_CASE__ ) lowercase : List[Any] = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, '''extra''': 42, } self.assertRaises(SCREAMING_SNAKE_CASE__ , parser.parse_dict , SCREAMING_SNAKE_CASE__ , allow_extra_keys=SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): lowercase : Dict = HfArgumentParser(SCREAMING_SNAKE_CASE__ ) lowercase : Union[str, Any] = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: lowercase : Dict = os.path.join(SCREAMING_SNAKE_CASE__ , '''temp_json''' ) os.mkdir(SCREAMING_SNAKE_CASE__ ) with open(temp_local_path + '''.json''' , '''w+''' ) as f: json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowercase : List[Any] = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0] lowercase : List[str] = BasicExample(**SCREAMING_SNAKE_CASE__ ) self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): lowercase : Tuple = HfArgumentParser(SCREAMING_SNAKE_CASE__ ) lowercase : Union[str, Any] = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: lowercase : Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , '''temp_yaml''' ) os.mkdir(SCREAMING_SNAKE_CASE__ ) with open(temp_local_path + '''.yaml''' , '''w+''' ) as f: yaml.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowercase : List[Any] = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0] lowercase : Optional[int] = BasicExample(**SCREAMING_SNAKE_CASE__ ) self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): lowercase : int = HfArgumentParser(SCREAMING_SNAKE_CASE__ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
319
import random import unittest import torch from diffusers import IFInpaintingPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class __SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ): A : List[str] = IFInpaintingPipeline A : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'} A : Tuple = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS A : Optional[Any] = PipelineTesterMixin.required_optional_params - {'latents'} def __lowerCamelCase ( self ): return self._get_dummy_components() def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0 ): if str(SCREAMING_SNAKE_CASE__ ).startswith('''mps''' ): lowercase : Tuple = torch.manual_seed(SCREAMING_SNAKE_CASE__ ) else: lowercase : Dict = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ ) lowercase : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ ) lowercase : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ ) lowercase : Tuple = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''mask_image''': mask_image, '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def __lowerCamelCase ( self ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def __lowerCamelCase ( self ): self._test_save_load_optional_components() @unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' ) def __lowerCamelCase ( self ): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def __lowerCamelCase ( self ): self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def __lowerCamelCase ( self ): self._test_save_load_local() def __lowerCamelCase ( self ): self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
319
1
import sys __a = ( """73167176531330624919225119674426574742355349194934""" """96983520312774506326239578318016984801869478851843""" """85861560789112949495459501737958331952853208805511""" """12540698747158523863050715693290963295227443043557""" """66896648950445244523161731856403098711121722383113""" """62229893423380308135336276614282806444486645238749""" """30358907296290491560440772390713810515859307960866""" """70172427121883998797908792274921901699720888093776""" """65727333001053367881220235421809751254540594752243""" """52584907711670556013604839586446706324415722155397""" """53697817977846174064955149290862569321978468622482""" """83972241375657056057490261407972968652414535100474""" """82166370484403199890008895243450658541227588666881""" """16427171479924442928230863465674813919123162824586""" """17866458359124566529476545682848912883142607690042""" """24219022671055626321111109370544217506941658960408""" """07198403850962455444362981230987879927244284909188""" """84580156166097919133875499200524063689912560717606""" """05886116467109405077541002256983155200055935729725""" """71636269561882670428252483600823257530420752963450""" ) def UpperCamelCase_ ( a_ ) ->int: A =1 for digit in s: product *= int(a_ ) return product def UpperCamelCase_ ( a_ = N ) ->int: A =-sys.maxsize - 1 A =n[:13] A =13 while cur_index < len(a_ ) - 13: if int(n[cur_index] ) >= int(substr[0] ): A =substr[1:] + n[cur_index] cur_index += 1 else: A =max(a_ , str_eval(a_ ) ) A =n[cur_index : cur_index + 13] cur_index += 13 return largest_product if __name__ == "__main__": print(F'''{solution() = }''')
689
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __a = { """configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ """GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""", """GPTBigCodeForSequenceClassification""", """GPTBigCodeForTokenClassification""", """GPTBigCodeForCausalLM""", """GPTBigCodeModel""", """GPTBigCodePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_bigcode import ( GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTBigCodeForCausalLM, GPTBigCodeForSequenceClassification, GPTBigCodeForTokenClassification, GPTBigCodeModel, GPTBigCodePreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
689
1
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_convbert import ConvBertTokenizer UpperCamelCase_ : Optional[int] = logging.get_logger(__name__) UpperCamelCase_ : Optional[Any] = {"""vocab_file""": """vocab.txt"""} UpperCamelCase_ : Dict = { """vocab_file""": { """YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt""", """YituTech/conv-bert-medium-small""": ( """https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt""" ), """YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt""", } } UpperCamelCase_ : Union[str, Any] = { """YituTech/conv-bert-base""": 512, """YituTech/conv-bert-medium-small""": 512, """YituTech/conv-bert-small""": 512, } UpperCamelCase_ : Optional[int] = { """YituTech/conv-bert-base""": {"""do_lower_case""": True}, """YituTech/conv-bert-medium-small""": {"""do_lower_case""": True}, """YituTech/conv-bert-small""": {"""do_lower_case""": True}, } class __lowercase ( __snake_case ): _A = VOCAB_FILES_NAMES _A = PRETRAINED_VOCAB_FILES_MAP _A = PRETRAINED_INIT_CONFIGURATION _A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _A = ConvBertTokenizer def __init__(self : Any , snake_case : Optional[Any]=None , snake_case : Optional[int]=None , snake_case : Tuple=True , snake_case : List[str]="[UNK]" , snake_case : Any="[SEP]" , snake_case : Optional[int]="[PAD]" , snake_case : List[Any]="[CLS]" , snake_case : Any="[MASK]" , snake_case : List[str]=True , snake_case : Any=None , **snake_case : List[str] , ) -> Optional[int]: super().__init__( snake_case , tokenizer_file=snake_case , do_lower_case=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , tokenize_chinese_chars=snake_case , strip_accents=snake_case , **snake_case , ) _lowercase : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , snake_case ) != do_lower_case or normalizer_state.get("strip_accents" , snake_case ) != strip_accents or normalizer_state.get("handle_chinese_chars" , snake_case ) != tokenize_chinese_chars ): _lowercase : Optional[Any] = getattr(snake_case , normalizer_state.pop("type" ) ) _lowercase : Any = do_lower_case _lowercase : List[Any] = strip_accents _lowercase : str = tokenize_chinese_chars _lowercase : Dict = normalizer_class(**snake_case ) _lowercase : Optional[int] = do_lower_case def _a(self : Tuple , snake_case : int , snake_case : Dict=None ) -> Optional[Any]: _lowercase : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _a(self : str , snake_case : List[int] , snake_case : Optional[List[int]] = None ) -> List[int]: _lowercase : Optional[Any] = [self.sep_token_id] _lowercase : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _a(self : Any , snake_case : str , snake_case : Optional[str] = None ) -> Tuple[str]: _lowercase : Union[str, Any] = self._tokenizer.model.save(snake_case , name=snake_case ) return tuple(snake_case )
461
def UpperCamelCase ( _UpperCAmelCase : str ) -> bool: '''simple docstring''' _lowercase : List[str] = [int(_UpperCAmelCase ) for i in ip_va_address.split("." ) if i.isdigit()] return len(_UpperCAmelCase ) == 4 and all(0 <= int(_UpperCAmelCase ) <= 254 for octet in octets ) if __name__ == "__main__": UpperCamelCase_ : Dict = input().strip() UpperCamelCase_ : List[Any] = """valid""" if is_ip_va_address_valid(ip) else """invalid""" print(f'''{ip} is a {valid_or_invalid} IP v4 address.''')
461
1
"""simple docstring""" import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available from transformers.models.gpta.tokenization_gpta import GPTaTokenizer from transformers.testing_utils import require_keras_nlp, require_tf, slow if is_tf_available(): import tensorflow as tf if is_keras_nlp_available(): from transformers.models.gpta import TFGPTaTokenizer lowercase = ['''gpt2'''] lowercase = '''gpt2''' if is_tf_available(): class A_ ( tf.Module ): def __init__( self : str , __lowerCamelCase : List[Any] ) -> Optional[Any]: super().__init__() __magic_name__ = tokenizer __magic_name__ = AutoConfig.from_pretrained(__lowerCamelCase ) __magic_name__ = TFGPTaLMHeadModel.from_config(__lowerCamelCase ) @tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="text" ),) ) def _snake_case ( self : Tuple , __lowerCamelCase : Any ) -> List[Any]: __magic_name__ = self.tokenizer(__lowerCamelCase ) __magic_name__ = tokenized["input_ids"].to_tensor() __magic_name__ = tf.cast(input_ids_dense > 0 , tf.intaa ) # input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN]) __magic_name__ = self.model(input_ids=__lowerCamelCase , attention_mask=__lowerCamelCase )["logits"] return outputs @require_tf @require_keras_nlp class A_ ( unittest.TestCase ): def _snake_case ( self : Any ) -> Dict: super().setUp() __magic_name__ = [GPTaTokenizer.from_pretrained(__lowerCamelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS)] __magic_name__ = [TFGPTaTokenizer.from_pretrained(__lowerCamelCase ) for checkpoint in TOKENIZER_CHECKPOINTS] assert len(self.tokenizers ) == len(self.tf_tokenizers ) __magic_name__ = [ "This is a straightforward English test sentence.", "This one has some weird characters\rto\nsee\r\nif those\u00E9break things.", "Now we're going to add some Chinese: 一 二 三 一二三", "And some much more rare Chinese: 齉 堃 齉堃", "Je vais aussi écrire en français pour tester les accents", "Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ", ] __magic_name__ = list(zip(self.test_sentences , self.test_sentences[::-1] ) ) def _snake_case ( self : int ) -> Dict: for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ): for test_inputs in self.test_sentences: __magic_name__ = tokenizer([test_inputs] , return_tensors="tf" ) __magic_name__ = tf_tokenizer([test_inputs] ) for key in python_outputs.keys(): # convert them to numpy to avoid messing with ragged tensors __magic_name__ = python_outputs[key].numpy() __magic_name__ = tf_outputs[key].numpy() self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) ) self.assertTrue(tf.reduce_all(tf.cast(__lowerCamelCase , tf.intaa ) == tf_outputs_values ) ) @slow def _snake_case ( self : List[Any] ) -> int: for tf_tokenizer in self.tf_tokenizers: __magic_name__ = tf.function(__lowerCamelCase ) for test_inputs in self.test_sentences: __magic_name__ = tf.constant(__lowerCamelCase ) __magic_name__ = compiled_tokenizer(__lowerCamelCase ) __magic_name__ = tf_tokenizer(__lowerCamelCase ) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) ) @slow def _snake_case ( self : List[Any] ) -> Dict: for tf_tokenizer in self.tf_tokenizers: __magic_name__ = ModelToSave(tokenizer=__lowerCamelCase ) __magic_name__ = tf.convert_to_tensor([self.test_sentences[0]] ) __magic_name__ = model.serving(__lowerCamelCase ) # Build model with some sample inputs with TemporaryDirectory() as tempdir: __magic_name__ = Path(__lowerCamelCase ) / "saved.model" tf.saved_model.save(__lowerCamelCase , __lowerCamelCase , signatures={"serving_default": model.serving} ) __magic_name__ = tf.saved_model.load(__lowerCamelCase ) __magic_name__ = loaded_model.signatures["serving_default"](__lowerCamelCase )["output_0"] # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertTrue(tf.reduce_all(out == loaded_output ) ) @slow def _snake_case ( self : Union[str, Any] ) -> Union[str, Any]: for tf_tokenizer in self.tf_tokenizers: __magic_name__ = tf.convert_to_tensor([self.test_sentences[0]] ) __magic_name__ = tf_tokenizer(__lowerCamelCase ) # Build model with some sample inputs __magic_name__ = tf_tokenizer.get_config() __magic_name__ = TFGPTaTokenizer.from_config(__lowerCamelCase ) __magic_name__ = model_from_config(__lowerCamelCase ) for key in from_config_output.keys(): self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) ) @slow def _snake_case ( self : Any ) -> Tuple: for tf_tokenizer in self.tf_tokenizers: # for the test to run __magic_name__ = 1_2_3_1_2_3 for max_length in [3, 5, 1_0_2_4]: __magic_name__ = tf.convert_to_tensor([self.test_sentences[0]] ) __magic_name__ = tf_tokenizer(__lowerCamelCase , max_length=__lowerCamelCase ) __magic_name__ = out["input_ids"].numpy().shape[1] assert out_length == max_length
468
"""simple docstring""" from __future__ import annotations lowercase = [] def _lowerCAmelCase ( __lowerCamelCase:list[list[int]] , __lowerCamelCase:int , __lowerCamelCase:int ): '''simple docstring''' for i in range(len(__lowerCamelCase ) ): if board[row][i] == 1: return False for i in range(len(__lowerCamelCase ) ): if board[i][column] == 1: return False for i, j in zip(range(__lowerCamelCase , -1 , -1 ) , range(__lowerCamelCase , -1 , -1 ) ): if board[i][j] == 1: return False for i, j in zip(range(__lowerCamelCase , -1 , -1 ) , range(__lowerCamelCase , len(__lowerCamelCase ) ) ): if board[i][j] == 1: return False return True def _lowerCAmelCase ( __lowerCamelCase:list[list[int]] , __lowerCamelCase:int ): '''simple docstring''' if row >= len(__lowerCamelCase ): solution.append(__lowerCamelCase ) printboard(__lowerCamelCase ) print() return True for i in range(len(__lowerCamelCase ) ): if is_safe(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ): __magic_name__ = 1 solve(__lowerCamelCase , row + 1 ) __magic_name__ = 0 return False def _lowerCAmelCase ( __lowerCamelCase:list[list[int]] ): '''simple docstring''' for i in range(len(__lowerCamelCase ) ): for j in range(len(__lowerCamelCase ) ): if board[i][j] == 1: print("Q" , end=" " ) else: print("." , end=" " ) print() # n=int(input("The no. of queens")) lowercase = 8 lowercase = [[0 for i in range(n)] for j in range(n)] solve(board, 0) print('''The total no. of solutions are :''', len(solution))
468
1
from __future__ import annotations from random import random from typing import Generic, TypeVar SCREAMING_SNAKE_CASE = TypeVar('KT') SCREAMING_SNAKE_CASE = TypeVar('VT') class __UpperCAmelCase ( Generic[KT, VT] ): """simple docstring""" def __init__( self , __A = "root" , __A = None ): __a = key __a = value __a = [] def __repr__( self ): return f'''Node({self.key}: {self.value})''' @property def snake_case_ ( self ): return len(self.forward ) class __UpperCAmelCase ( Generic[KT, VT] ): """simple docstring""" def __init__( self , __A = 0.5 , __A = 16 ): __a = Node[KT, VT]() __a = 0 __a = p __a = max_level def __str__( self ): __a = list(self ) if len(__A ) == 0: return f'''SkipList(level={self.level})''' __a = max((len(str(__A ) ) for item in items) , default=4 ) __a = max(__A , 4 ) + 4 __a = self.head __a = [] __a = node.forward.copy() lines.append(f'''[{node.key}]'''.ljust(__A , """-""" ) + """* """ * len(__A ) ) lines.append(""" """ * label_size + """| """ * len(__A ) ) while len(node.forward ) != 0: __a = node.forward[0] lines.append( f'''[{node.key}]'''.ljust(__A , """-""" ) + """ """.join(str(n.key ) if n.key == node.key else """|""" for n in forwards ) ) lines.append(""" """ * label_size + """| """ * len(__A ) ) __a = node.forward lines.append("""None""".ljust(__A ) + """* """ * len(__A ) ) return f'''SkipList(level={self.level})\n''' + "\n".join(__A ) def __iter__( self ): __a = self.head while len(node.forward ) != 0: yield node.forward[0].key __a = node.forward[0] def snake_case_ ( self ): __a = 1 while random() < self.p and level < self.max_level: level += 1 return level def snake_case_ ( self , __A ): __a = [] __a = self.head for i in reversed(range(self.level ) ): # i < node.level - When node level is lesser than `i` decrement `i`. # node.forward[i].key < key - Jumping to node with key value higher # or equal to searched key would result # in skipping searched key. while i < node.level and node.forward[i].key < key: __a = node.forward[i] # Each leftmost node (relative to searched node) will potentially have to # be updated. update_vector.append(__A ) update_vector.reverse() # Note that we were inserting values in reverse order. # len(node.forward) != 0 - If current node doesn't contain any further # references then searched key is not present. # node.forward[0].key == key - Next node key should be equal to search key # if key is present. if len(node.forward ) != 0 and node.forward[0].key == key: return node.forward[0], update_vector else: return None, update_vector def snake_case_ ( self , __A ): __a , __a = self._locate_node(__A ) if node is not None: for i, update_node in enumerate(__A ): # Remove or replace all references to removed node. if update_node.level > i and update_node.forward[i].key == key: if node.level > i: __a = node.forward[i] else: __a = update_node.forward[:i] def snake_case_ ( self , __A , __A ): __a , __a = self._locate_node(__A ) if node is not None: __a = value else: __a = self.random_level() if level > self.level: # After level increase we have to add additional nodes to head. for _ in range(self.level - 1 , __A ): update_vector.append(self.head ) __a = level __a = Node(__A , __A ) for i, update_node in enumerate(update_vector[:level] ): # Change references to pass through new node. if update_node.level > i: new_node.forward.append(update_node.forward[i] ) if update_node.level < i + 1: update_node.forward.append(__A ) else: __a = new_node def snake_case_ ( self , __A ): __a , __a = self._locate_node(__A ) if node is not None: return node.value return None def a (): __a = SkipList() skip_list.insert("""Key1""" , 3 ) skip_list.insert("""Key2""" , 12 ) skip_list.insert("""Key3""" , 41 ) skip_list.insert("""Key4""" , -19 ) __a = skip_list.head __a = {} while node.level != 0: __a = node.forward[0] __a = node.value assert len(lowerCAmelCase__ ) == 4 assert all_values["Key1"] == 3 assert all_values["Key2"] == 12 assert all_values["Key3"] == 41 assert all_values["Key4"] == -19 def a (): __a = SkipList() skip_list.insert("""Key1""" , 10 ) skip_list.insert("""Key1""" , 12 ) skip_list.insert("""Key5""" , 7 ) skip_list.insert("""Key7""" , 10 ) skip_list.insert("""Key10""" , 5 ) skip_list.insert("""Key7""" , 7 ) skip_list.insert("""Key5""" , 5 ) skip_list.insert("""Key10""" , 10 ) __a = skip_list.head __a = {} while node.level != 0: __a = node.forward[0] __a = node.value if len(lowerCAmelCase__ ) != 4: print() assert len(lowerCAmelCase__ ) == 4 assert all_values["Key1"] == 12 assert all_values["Key7"] == 7 assert all_values["Key5"] == 5 assert all_values["Key10"] == 10 def a (): __a = SkipList() assert skip_list.find("""Some key""" ) is None def a (): __a = SkipList() skip_list.insert("""Key2""" , 20 ) assert skip_list.find("""Key2""" ) == 20 skip_list.insert("""Some Key""" , 10 ) skip_list.insert("""Key2""" , 8 ) skip_list.insert("""V""" , 13 ) assert skip_list.find("""Y""" ) is None assert skip_list.find("""Key2""" ) == 8 assert skip_list.find("""Some Key""" ) == 10 assert skip_list.find("""V""" ) == 13 def a (): __a = SkipList() skip_list.delete("""Some key""" ) assert len(skip_list.head.forward ) == 0 def a (): __a = SkipList() skip_list.insert("""Key1""" , 12 ) skip_list.insert("""V""" , 13 ) skip_list.insert("""X""" , 14 ) skip_list.insert("""Key2""" , 15 ) skip_list.delete("""V""" ) skip_list.delete("""Key2""" ) assert skip_list.find("""V""" ) is None assert skip_list.find("""Key2""" ) is None def a (): __a = SkipList() skip_list.insert("""Key1""" , 12 ) skip_list.insert("""V""" , 13 ) skip_list.insert("""X""" , 14 ) skip_list.insert("""Key2""" , 15 ) skip_list.delete("""V""" ) assert skip_list.find("""V""" ) is None assert skip_list.find("""X""" ) == 14 assert skip_list.find("""Key1""" ) == 12 assert skip_list.find("""Key2""" ) == 15 skip_list.delete("""X""" ) assert skip_list.find("""V""" ) is None assert skip_list.find("""X""" ) is None assert skip_list.find("""Key1""" ) == 12 assert skip_list.find("""Key2""" ) == 15 skip_list.delete("""Key1""" ) assert skip_list.find("""V""" ) is None assert skip_list.find("""X""" ) is None assert skip_list.find("""Key1""" ) is None assert skip_list.find("""Key2""" ) == 15 skip_list.delete("""Key2""" ) assert skip_list.find("""V""" ) is None assert skip_list.find("""X""" ) is None assert skip_list.find("""Key1""" ) is None assert skip_list.find("""Key2""" ) is None def a (): __a = SkipList() skip_list.insert("""Key1""" , 12 ) skip_list.insert("""V""" , 13 ) skip_list.insert("""X""" , 142 ) skip_list.insert("""Key2""" , 15 ) skip_list.delete("""X""" ) def traverse_keys(lowerCAmelCase__ ): yield node.key for forward_node in node.forward: yield from traverse_keys(lowerCAmelCase__ ) assert len(set(traverse_keys(skip_list.head ) ) ) == 4 def a (): def is_sorted(lowerCAmelCase__ ): return all(next_item >= item for item, next_item in zip(lowerCAmelCase__ , lst[1:] ) ) __a = SkipList() for i in range(10 ): skip_list.insert(lowerCAmelCase__ , lowerCAmelCase__ ) assert is_sorted(list(lowerCAmelCase__ ) ) skip_list.delete(5 ) skip_list.delete(8 ) skip_list.delete(2 ) assert is_sorted(list(lowerCAmelCase__ ) ) skip_list.insert(-12 , -12 ) skip_list.insert(77 , 77 ) assert is_sorted(list(lowerCAmelCase__ ) ) def a (): for _ in range(100 ): # Repeat test 100 times due to the probabilistic nature of skip list # random values == random bugs test_insert() test_insert_overrides_existing_value() test_searching_empty_list_returns_none() test_search() test_deleting_item_from_empty_list_do_nothing() test_deleted_items_are_not_founded_by_find_method() test_delete_removes_only_given_key() test_delete_doesnt_leave_dead_nodes() test_iter_always_yields_sorted_values() def a (): __a = SkipList() skip_list.insert(2 , """2""" ) skip_list.insert(4 , """4""" ) skip_list.insert(6 , """4""" ) skip_list.insert(4 , """5""" ) skip_list.insert(8 , """4""" ) skip_list.insert(9 , """4""" ) skip_list.delete(4 ) print(lowerCAmelCase__ ) if __name__ == "__main__": import doctest doctest.testmod() main()
99
'''simple docstring''' import argparse import os from pathlib import Path import fairseq import torch from packaging import version from torch import nn from transformers import ( BartConfig, BartForConditionalGeneration, BartForSequenceClassification, BartModel, BartTokenizer, ) from transformers.utils import logging SCREAMING_SNAKE_CASE_ = ["bart.large", "bart.large.mnli", "bart.large.cnn", "bart_xsum/model.pt"] SCREAMING_SNAKE_CASE_ = {"bart.large": BartModel, "bart.large.mnli": BartForSequenceClassification} if version.parse(fairseq.__version__) < version.parse("0.9.0"): raise Exception("requires fairseq >= 0.9.0") logging.set_verbosity_info() SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_ = " Hello world! cécé herlolip" SCREAMING_SNAKE_CASE_ = [ ("model.classification_heads.mnli.dense.weight", "classification_head.dense.weight"), ("model.classification_heads.mnli.dense.bias", "classification_head.dense.bias"), ("model.classification_heads.mnli.out_proj.weight", "classification_head.out_proj.weight"), ("model.classification_heads.mnli.out_proj.bias", "classification_head.out_proj.bias"), ] def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ ): __a : Dict = [ 'encoder.version', 'decoder.version', 'model.encoder.version', 'model.decoder.version', '_float_tensor', ] for k in ignore_keys: state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): __a : Dict = dct.pop(SCREAMING_SNAKE_CASE__ ) __a : Dict = val def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ ): __a : Dict = torch.load(SCREAMING_SNAKE_CASE__ , map_location='cpu' ) __a : Dict = torch.hub.load('pytorch/fairseq' , 'bart.large.cnn' ).eval() hub_interface.model.load_state_dict(sd['model'] ) return hub_interface def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ ): __a , __a : Dict = emb.weight.shape __a : Optional[Any] = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ ) __a : List[Any] = emb.weight.data return lin_layer @torch.no_grad() def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ): if not os.path.exists(SCREAMING_SNAKE_CASE__ ): __a : Tuple = torch.hub.load('pytorch/fairseq' , SCREAMING_SNAKE_CASE__ ).eval() else: __a : Optional[int] = load_xsum_checkpoint(SCREAMING_SNAKE_CASE__ ) bart.model.upgrade_state_dict(bart.model.state_dict() ) if hf_checkpoint_name is None: __a : List[str] = checkpoint_path.replace('.' , '-' ) __a : Optional[Any] = BartConfig.from_pretrained(SCREAMING_SNAKE_CASE__ ) __a : Union[str, Any] = bart.encode(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 ) __a : List[str] = BartTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ ).encode(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).unsqueeze(0 ) if not torch.eq(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).all(): raise ValueError( f'''converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}''' ) if checkpoint_path == "bart.large.mnli": __a : List[Any] = bart.state_dict() remove_ignore_keys_(SCREAMING_SNAKE_CASE__ ) __a : str = state_dict['model.decoder.embed_tokens.weight'] for src, dest in mnli_rename_keys: rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __a : Dict = BartForSequenceClassification(SCREAMING_SNAKE_CASE__ ).eval() model.load_state_dict(SCREAMING_SNAKE_CASE__ ) __a : Any = bart.predict('mnli' , SCREAMING_SNAKE_CASE__ , return_logits=SCREAMING_SNAKE_CASE__ ) __a : Optional[Any] = model(SCREAMING_SNAKE_CASE__ )[0] # logits else: # no classification heads to worry about __a : Dict = bart.model.state_dict() remove_ignore_keys_(SCREAMING_SNAKE_CASE__ ) __a : Optional[Any] = state_dict['decoder.embed_tokens.weight'] __a : List[Any] = bart.extract_features(SCREAMING_SNAKE_CASE__ ) if hf_checkpoint_name == "facebook/bart-large": __a : Dict = BartModel(SCREAMING_SNAKE_CASE__ ).eval() model.load_state_dict(SCREAMING_SNAKE_CASE__ ) __a : str = model(SCREAMING_SNAKE_CASE__ ).model[0] else: __a : Optional[Any] = BartForConditionalGeneration(SCREAMING_SNAKE_CASE__ ).eval() # an existing summarization ckpt model.model.load_state_dict(SCREAMING_SNAKE_CASE__ ) if hasattr(SCREAMING_SNAKE_CASE__ , 'lm_head' ): __a : Optional[int] = make_linear_from_emb(model.model.shared ) __a : List[Any] = model.model(SCREAMING_SNAKE_CASE__ )[0] # Check results if fairseq_output.shape != new_model_outputs.shape: raise ValueError( f'''`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}''' ) if (fairseq_output != new_model_outputs).any().item(): raise ValueError('Some values in `fairseq_output` are different from `new_model_outputs`' ) Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ ) model.save_pretrained(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem." ) parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument( "--hf_config", default=None, type=str, help="Which huggingface architecture to use: bart-large-xsum" ) SCREAMING_SNAKE_CASE_ = parser.parse_args() convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
597
0
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): '''simple docstring''' _a = 'naver-clova-ix/donut-base-finetuned-docvqa' _a = ( 'This is a tool that answers a question about an document (pdf). It takes an input named `document` which ' 'should be the document containing the information, as well as a `question` that is the question about the ' 'document. It returns a text that contains the answer to the question.' ) _a = 'document_qa' _a = AutoProcessor _a = VisionEncoderDecoderModel _a = ['image', 'text'] _a = ['text'] def __init__( self : Union[str, Any], *lowerCamelCase : int, **lowerCamelCase : List[str] )-> Optional[int]: if not is_vision_available(): raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' ) super().__init__(*lowerCamelCase, **lowerCamelCase ) def snake_case ( self : Dict, lowerCamelCase : "Image", lowerCamelCase : str )-> Optional[Any]: lowerCamelCase__ : List[Any] ='''<s_docvqa><s_question>{user_input}</s_question><s_answer>''' lowerCamelCase__ : Dict =task_prompt.replace('''{user_input}''', lowerCamelCase ) lowerCamelCase__ : List[Any] =self.pre_processor.tokenizer( lowerCamelCase, add_special_tokens=lowerCamelCase, return_tensors='''pt''' ).input_ids lowerCamelCase__ : Union[str, Any] =self.pre_processor(lowerCamelCase, return_tensors='''pt''' ).pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def snake_case ( self : List[str], lowerCamelCase : Tuple )-> Any: return self.model.generate( inputs['''pixel_values'''].to(self.device ), decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ), max_length=self.model.decoder.config.max_position_embeddings, early_stopping=lowerCamelCase, pad_token_id=self.pre_processor.tokenizer.pad_token_id, eos_token_id=self.pre_processor.tokenizer.eos_token_id, use_cache=lowerCamelCase, num_beams=1, bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]], return_dict_in_generate=lowerCamelCase, ).sequences def snake_case ( self : Optional[int], lowerCamelCase : Tuple )-> str: lowerCamelCase__ : Dict =self.pre_processor.batch_decode(lowerCamelCase )[0] lowerCamelCase__ : Optional[int] =sequence.replace(self.pre_processor.tokenizer.eos_token, '''''' ) lowerCamelCase__ : Dict =sequence.replace(self.pre_processor.tokenizer.pad_token, '''''' ) lowerCamelCase__ : Optional[int] =re.sub(r'''<.*?>''', '''''', lowerCamelCase, count=1 ).strip() # remove first task start token lowerCamelCase__ : int =self.pre_processor.tokenajson(lowerCamelCase ) return sequence["answer"]
625
"""simple docstring""" def snake_case__ ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : list[int] ): """simple docstring""" # 1. Validate that path exists between current and next vertices if graph[path[curr_ind - 1]][next_ver] == 0: return False # 2. Validate that next vertex is not already in path return not any(vertex == next_ver for vertex in path ) def snake_case__ ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[int] , __lowerCamelCase : int ): """simple docstring""" # Base Case if curr_ind == len(__lowerCamelCase ): # return whether path exists between current and starting vertices return graph[path[curr_ind - 1]][path[0]] == 1 # Recursive Step for next_ver in range(0 , len(__lowerCamelCase ) ): if valid_connection(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ): # Insert current vertex into path as next transition lowerCamelCase__ : Tuple =next_ver # Validate created path if util_hamilton_cycle(__lowerCamelCase , __lowerCamelCase , curr_ind + 1 ): return True # Backtrack lowerCamelCase__ : int =-1 return False def snake_case__ ( __lowerCamelCase : list[list[int]] , __lowerCamelCase : int = 0 ): """simple docstring""" lowerCamelCase__ : Tuple =[-1] * (len(__lowerCamelCase ) + 1) # initialize start and end of path with starting index lowerCamelCase__ : Union[str, Any] =start_index # evaluate and if we find answer return path either return empty array return path if util_hamilton_cycle(__lowerCamelCase , __lowerCamelCase , 1 ) else []
625
1
"""simple docstring""" from __future__ import annotations import numpy as np from numpy import floataa from numpy.typing import NDArray def lowercase__( __SCREAMING_SNAKE_CASE : NDArray[floataa] , __SCREAMING_SNAKE_CASE : NDArray[floataa] , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int , ): lowercase_ , lowercase_ : Optional[int] = coefficient_matrix.shape lowercase_ , lowercase_ : Union[str, Any] = constant_matrix.shape if rowsa != colsa: lowercase_ : Union[str, Any] = F'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}''' raise ValueError(__SCREAMING_SNAKE_CASE ) if colsa != 1: lowercase_ : int = F'''Constant matrix must be nx1 but received {rowsa}x{colsa}''' raise ValueError(__SCREAMING_SNAKE_CASE ) if rowsa != rowsa: lowercase_ : str = ( 'Coefficient and constant matrices dimensions must be nxn and nx1 but ' F'''received {rowsa}x{colsa} and {rowsa}x{colsa}''' ) raise ValueError(__SCREAMING_SNAKE_CASE ) if len(__SCREAMING_SNAKE_CASE ) != rowsa: lowercase_ : List[str] = ( 'Number of initial values must be equal to number of rows in coefficient ' F'''matrix but received {len(__SCREAMING_SNAKE_CASE )} and {rowsa}''' ) raise ValueError(__SCREAMING_SNAKE_CASE ) if iterations <= 0: raise ValueError('Iterations must be at least 1' ) lowercase_ : NDArray[floataa] = np.concatenate( (coefficient_matrix, constant_matrix) , axis=1 ) lowercase_ , lowercase_ : Optional[int] = table.shape strictly_diagonally_dominant(__SCREAMING_SNAKE_CASE ) # Iterates the whole matrix for given number of times for _ in range(__SCREAMING_SNAKE_CASE ): lowercase_ : Dict = [] for row in range(__SCREAMING_SNAKE_CASE ): lowercase_ : Optional[Any] = 0 for col in range(__SCREAMING_SNAKE_CASE ): if col == row: lowercase_ : Dict = table[row][col] elif col == cols - 1: lowercase_ : Tuple = table[row][col] else: temp += (-1) * table[row][col] * init_val[col] lowercase_ : str = (temp + val) / denom new_val.append(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = new_val return [float(__SCREAMING_SNAKE_CASE ) for i in new_val] def lowercase__( __SCREAMING_SNAKE_CASE : NDArray[floataa] ): lowercase_ , lowercase_ : List[Any] = table.shape lowercase_ : Optional[int] = True for i in range(0 , __SCREAMING_SNAKE_CASE ): lowercase_ : int = 0 for j in range(0 , cols - 1 ): if i == j: continue else: total += table[i][j] if table[i][i] <= total: raise ValueError('Coefficient matrix is not strictly diagonally dominant' ) return is_diagonally_dominant # Test Cases if __name__ == "__main__": import doctest doctest.testmod()
425
"""simple docstring""" __SCREAMING_SNAKE_CASE =9.8_06_65 def lowercase__( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float = g ): if fluid_density <= 0: raise ValueError('Impossible fluid density' ) if volume < 0: raise ValueError('Impossible Object volume' ) if gravity <= 0: raise ValueError('Impossible Gravity' ) return fluid_density * gravity * volume if __name__ == "__main__": import doctest # run doctest doctest.testmod()
425
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCamelCase : str = { """configuration_instructblip""": [ """INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """InstructBlipConfig""", """InstructBlipQFormerConfig""", """InstructBlipVisionConfig""", ], """processing_instructblip""": ["""InstructBlipProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Tuple = [ """INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """InstructBlipQFormerModel""", """InstructBlipPreTrainedModel""", """InstructBlipForConditionalGeneration""", """InstructBlipVisionModel""", ] if TYPE_CHECKING: from .configuration_instructblip import ( INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig, ) from .processing_instructblip import InstructBlipProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_instructblip import ( INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST, InstructBlipForConditionalGeneration, InstructBlipPreTrainedModel, InstructBlipQFormerModel, InstructBlipVisionModel, ) else: import sys __lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
25
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowerCamelCase : List[str] = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : str = ["""XLNetTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Dict = ["""XLNetTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : str = [ """XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """XLNetForMultipleChoice""", """XLNetForQuestionAnswering""", """XLNetForQuestionAnsweringSimple""", """XLNetForSequenceClassification""", """XLNetForTokenClassification""", """XLNetLMHeadModel""", """XLNetModel""", """XLNetPreTrainedModel""", """load_tf_weights_in_xlnet""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Dict = [ """TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFXLNetForMultipleChoice""", """TFXLNetForQuestionAnsweringSimple""", """TFXLNetForSequenceClassification""", """TFXLNetForTokenClassification""", """TFXLNetLMHeadModel""", """TFXLNetMainLayer""", """TFXLNetModel""", """TFXLNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet import XLNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet_fast import XLNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlnet import ( XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, XLNetForMultipleChoice, XLNetForQuestionAnswering, XLNetForQuestionAnsweringSimple, XLNetForSequenceClassification, XLNetForTokenClassification, XLNetLMHeadModel, XLNetModel, XLNetPreTrainedModel, load_tf_weights_in_xlnet, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlnet import ( TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLNetForMultipleChoice, TFXLNetForQuestionAnsweringSimple, TFXLNetForSequenceClassification, TFXLNetForTokenClassification, TFXLNetLMHeadModel, TFXLNetMainLayer, TFXLNetModel, TFXLNetPreTrainedModel, ) else: import sys __lowerCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
25
1
import warnings from .generation import TFGenerationMixin class lowerCAmelCase_ ( a__ ): # warning at import time warnings.warn( "Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will " "be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead." , a__ , )
40
from ...processing_utils import ProcessorMixin class __A ( lowerCAmelCase ): lowerCAmelCase_ : str = "SpeechT5FeatureExtractor" lowerCAmelCase_ : Any = "SpeechT5Tokenizer" def __init__( self : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : str ): super().__init__(UpperCAmelCase_ , UpperCAmelCase_ ) def __call__( self : Optional[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Union[str, Any] ): lowerCAmelCase : List[str] = kwargs.pop('audio' , UpperCAmelCase_ ) lowerCAmelCase : str = kwargs.pop('text' , UpperCAmelCase_ ) lowerCAmelCase : Union[str, Any] = kwargs.pop('text_target' , UpperCAmelCase_ ) lowerCAmelCase : Optional[Any] = kwargs.pop('audio_target' , UpperCAmelCase_ ) lowerCAmelCase : int = kwargs.pop('sampling_rate' , UpperCAmelCase_ ) if audio is not None and text is not None: raise ValueError( 'Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?' ) if audio_target is not None and text_target is not None: raise ValueError( 'Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?' ) if audio is None and audio_target is None and text is None and text_target is None: raise ValueError( 'You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.' ) if audio is not None: lowerCAmelCase : Dict = self.feature_extractor(UpperCAmelCase_ , *UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , **UpperCAmelCase_ ) elif text is not None: lowerCAmelCase : List[Any] = self.tokenizer(UpperCAmelCase_ , **UpperCAmelCase_ ) else: lowerCAmelCase : Any = None if audio_target is not None: lowerCAmelCase : Tuple = self.feature_extractor(audio_target=UpperCAmelCase_ , *UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , **UpperCAmelCase_ ) lowerCAmelCase : int = targets['input_values'] elif text_target is not None: lowerCAmelCase : Optional[int] = self.tokenizer(UpperCAmelCase_ , **UpperCAmelCase_ ) lowerCAmelCase : Union[str, Any] = targets['input_ids'] else: lowerCAmelCase : Union[str, Any] = None if inputs is None: return targets if targets is not None: lowerCAmelCase : Dict = labels lowerCAmelCase : Any = targets.get('attention_mask' ) if decoder_attention_mask is not None: lowerCAmelCase : Tuple = decoder_attention_mask return inputs def lowercase__ ( self : Union[str, Any] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Dict ): lowerCAmelCase : Optional[Any] = kwargs.pop('input_values' , UpperCAmelCase_ ) lowerCAmelCase : List[str] = kwargs.pop('input_ids' , UpperCAmelCase_ ) lowerCAmelCase : Optional[Any] = kwargs.pop('labels' , UpperCAmelCase_ ) if input_values is not None and input_ids is not None: raise ValueError('Cannot process both `input_values` and `input_ids` inputs.' ) if input_values is None and input_ids is None and labels is None: raise ValueError( 'You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.' ) if input_values is not None: lowerCAmelCase : List[str] = self.feature_extractor.pad(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_ ) elif input_ids is not None: lowerCAmelCase : Dict = self.tokenizer.pad(UpperCAmelCase_ , **UpperCAmelCase_ ) else: lowerCAmelCase : str = None if labels is not None: if "input_ids" in labels or (isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and "input_ids" in labels[0]): lowerCAmelCase : int = self.tokenizer.pad(UpperCAmelCase_ , **UpperCAmelCase_ ) lowerCAmelCase : Dict = targets['input_ids'] else: lowerCAmelCase : Any = self.feature_extractor.feature_size lowerCAmelCase : str = self.feature_extractor.num_mel_bins lowerCAmelCase : Optional[int] = self.feature_extractor.pad(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_ ) lowerCAmelCase : List[Any] = feature_size_hack lowerCAmelCase : Tuple = targets['input_values'] else: lowerCAmelCase : Tuple = None if inputs is None: return targets if targets is not None: lowerCAmelCase : Union[str, Any] = labels lowerCAmelCase : List[str] = targets.get('attention_mask' ) if decoder_attention_mask is not None: lowerCAmelCase : Optional[Any] = decoder_attention_mask return inputs def lowercase__ ( self : int , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[str] ): return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ ) def lowercase__ ( self : Dict , *UpperCAmelCase_ : str , **UpperCAmelCase_ : List[str] ): return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
343
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available SCREAMING_SNAKE_CASE__ = { """configuration_maskformer""": ["""MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MaskFormerConfig"""], """configuration_maskformer_swin""": ["""MaskFormerSwinConfig"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = ["""MaskFormerFeatureExtractor"""] SCREAMING_SNAKE_CASE__ = ["""MaskFormerImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ """MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """MaskFormerForInstanceSegmentation""", """MaskFormerModel""", """MaskFormerPreTrainedModel""", ] SCREAMING_SNAKE_CASE__ = [ """MaskFormerSwinBackbone""", """MaskFormerSwinModel""", """MaskFormerSwinPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig from .configuration_maskformer_swin import MaskFormerSwinConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_maskformer import MaskFormerFeatureExtractor from .image_processing_maskformer import MaskFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskformer import ( MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskFormerForInstanceSegmentation, MaskFormerModel, MaskFormerPreTrainedModel, ) from .modeling_maskformer_swin import ( MaskFormerSwinBackbone, MaskFormerSwinModel, MaskFormerSwinPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
717
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE__ : Tuple = { """configuration_table_transformer""": [ """TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TableTransformerConfig""", """TableTransformerOnnxConfig""", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : List[str] = [ """TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TableTransformerForObjectDetection""", """TableTransformerModel""", """TableTransformerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_table_transformer import ( TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TableTransformerConfig, TableTransformerOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_table_transformer import ( TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TableTransformerForObjectDetection, TableTransformerModel, TableTransformerPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
180
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __a :List[Any] = { 'configuration_tapas': ['TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TapasConfig'], 'tokenization_tapas': ['TapasTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :Any = [ 'TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST', 'TapasForMaskedLM', 'TapasForQuestionAnswering', 'TapasForSequenceClassification', 'TapasModel', 'TapasPreTrainedModel', 'load_tf_weights_in_tapas', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :Dict = [ 'TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFTapasForMaskedLM', 'TFTapasForQuestionAnswering', 'TFTapasForSequenceClassification', 'TFTapasModel', 'TFTapasPreTrainedModel', ] if TYPE_CHECKING: from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig from .tokenization_tapas import TapasTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tapas import ( TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasPreTrainedModel, load_tf_weights_in_tapas, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_tapas import ( TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TFTapasForMaskedLM, TFTapasForQuestionAnswering, TFTapasForSequenceClassification, TFTapasModel, TFTapasPreTrainedModel, ) else: import sys __a :str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
86
import os import torch from ..logging import get_logger from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME from .versions import is_torch_version if is_torch_version('''>=''', FSDP_PYTORCH_VERSION): import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType __A = get_logger(__name__) def __a ( lowerCAmelCase_ : List[str] ,lowerCAmelCase_ : str ,lowerCAmelCase_ : List[Any] ,lowerCAmelCase_ : Tuple ,lowerCAmelCase_ : str=0 ) -> Tuple: '''simple docstring''' os.makedirs(lowerCAmelCase_ ,exist_ok=lowerCAmelCase_ ) with FSDP.state_dict_type( lowerCAmelCase_ ,fsdp_plugin.state_dict_type ,fsdp_plugin.state_dict_config ,fsdp_plugin.optim_state_dict_config ): UpperCAmelCase_= model.state_dict() if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: UpperCAmelCase_= F"""{MODEL_NAME}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}.bin""" UpperCAmelCase_= os.path.join(lowerCAmelCase_ ,lowerCAmelCase_ ) if accelerator.process_index == 0: logger.info(F"""Saving model to {output_model_file}""" ) torch.save(lowerCAmelCase_ ,lowerCAmelCase_ ) logger.info(F"""Model saved to {output_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: UpperCAmelCase_= ( F"""{MODEL_NAME}_rank{accelerator.process_index}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin""" ) UpperCAmelCase_= os.path.join(lowerCAmelCase_ ,lowerCAmelCase_ ) logger.info(F"""Saving model to {output_model_file}""" ) torch.save(lowerCAmelCase_ ,lowerCAmelCase_ ) logger.info(F"""Model saved to {output_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: UpperCAmelCase_= os.path.join(lowerCAmelCase_ ,F"""{MODEL_NAME}_{model_index}""" ) os.makedirs(lowerCAmelCase_ ,exist_ok=lowerCAmelCase_ ) logger.info(F"""Saving model to {ckpt_dir}""" ) UpperCAmelCase_= {"""model""": state_dict} dist_cp.save_state_dict( state_dict=lowerCAmelCase_ ,storage_writer=dist_cp.FileSystemWriter(lowerCAmelCase_ ) ,planner=DefaultSavePlanner() ,) logger.info(F"""Model saved to {ckpt_dir}""" ) def __a ( lowerCAmelCase_ : Dict ,lowerCAmelCase_ : str ,lowerCAmelCase_ : Optional[Any] ,lowerCAmelCase_ : Optional[int] ,lowerCAmelCase_ : Union[str, Any]=0 ) -> Optional[Any]: '''simple docstring''' accelerator.wait_for_everyone() with FSDP.state_dict_type( lowerCAmelCase_ ,fsdp_plugin.state_dict_type ,fsdp_plugin.state_dict_config ,fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if type(lowerCAmelCase_ ) != FSDP and accelerator.process_index != 0: if not fsdp_plugin.sync_module_states: raise ValueError( """Set the `sync_module_states` flag to `True` so that model states are synced across processes when """ """initializing FSDP object""" ) return UpperCAmelCase_= F"""{MODEL_NAME}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}.bin""" UpperCAmelCase_= os.path.join(lowerCAmelCase_ ,lowerCAmelCase_ ) logger.info(F"""Loading model from {input_model_file}""" ) UpperCAmelCase_= torch.load(lowerCAmelCase_ ) logger.info(F"""Model loaded from {input_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: UpperCAmelCase_= ( F"""{MODEL_NAME}_rank{accelerator.process_index}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin""" ) UpperCAmelCase_= os.path.join(lowerCAmelCase_ ,lowerCAmelCase_ ) logger.info(F"""Loading model from {input_model_file}""" ) UpperCAmelCase_= torch.load(lowerCAmelCase_ ) logger.info(F"""Model loaded from {input_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: UpperCAmelCase_= ( os.path.join(lowerCAmelCase_ ,F"""{MODEL_NAME}_{model_index}""" ) if F"""{MODEL_NAME}""" not in input_dir else input_dir ) logger.info(F"""Loading model from {ckpt_dir}""" ) UpperCAmelCase_= {"""model""": model.state_dict()} dist_cp.load_state_dict( state_dict=lowerCAmelCase_ ,storage_reader=dist_cp.FileSystemReader(lowerCAmelCase_ ) ,planner=DefaultLoadPlanner() ,) UpperCAmelCase_= state_dict["""model"""] logger.info(F"""Model loaded from {ckpt_dir}""" ) model.load_state_dict(lowerCAmelCase_ ) def __a ( lowerCAmelCase_ : str ,lowerCAmelCase_ : Optional[Any] ,lowerCAmelCase_ : str ,lowerCAmelCase_ : List[Any] ,lowerCAmelCase_ : Tuple ,lowerCAmelCase_ : Any=0 ) -> List[str]: '''simple docstring''' os.makedirs(lowerCAmelCase_ ,exist_ok=lowerCAmelCase_ ) with FSDP.state_dict_type( lowerCAmelCase_ ,fsdp_plugin.state_dict_type ,fsdp_plugin.state_dict_config ,fsdp_plugin.optim_state_dict_config ): UpperCAmelCase_= FSDP.optim_state_dict(lowerCAmelCase_ ,lowerCAmelCase_ ) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if accelerator.process_index == 0: UpperCAmelCase_= ( F"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else F"""{OPTIMIZER_NAME}_{optimizer_index}.bin""" ) UpperCAmelCase_= os.path.join(lowerCAmelCase_ ,lowerCAmelCase_ ) logger.info(F"""Saving Optimizer state to {output_optimizer_file}""" ) torch.save(lowerCAmelCase_ ,lowerCAmelCase_ ) logger.info(F"""Optimizer state saved in {output_optimizer_file}""" ) else: UpperCAmelCase_= os.path.join(lowerCAmelCase_ ,F"""{OPTIMIZER_NAME}_{optimizer_index}""" ) os.makedirs(lowerCAmelCase_ ,exist_ok=lowerCAmelCase_ ) logger.info(F"""Saving Optimizer state to {ckpt_dir}""" ) dist_cp.save_state_dict( state_dict={"""optimizer""": optim_state} ,storage_writer=dist_cp.FileSystemWriter(lowerCAmelCase_ ) ,planner=DefaultSavePlanner() ,) logger.info(F"""Optimizer state saved in {ckpt_dir}""" ) def __a ( lowerCAmelCase_ : Union[str, Any] ,lowerCAmelCase_ : Dict ,lowerCAmelCase_ : Tuple ,lowerCAmelCase_ : int ,lowerCAmelCase_ : int ,lowerCAmelCase_ : str=0 ) -> Union[str, Any]: '''simple docstring''' accelerator.wait_for_everyone() with FSDP.state_dict_type( lowerCAmelCase_ ,fsdp_plugin.state_dict_type ,fsdp_plugin.state_dict_config ,fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: UpperCAmelCase_= None # below check should work but currently it isn't working (mostly opytorch issue), # in the meantime disabling it at the cost of excess memory usage # if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: UpperCAmelCase_= ( F"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else F"""{OPTIMIZER_NAME}_{optimizer_index}.bin""" ) UpperCAmelCase_= os.path.join(lowerCAmelCase_ ,lowerCAmelCase_ ) logger.info(F"""Loading Optimizer state from {input_optimizer_file}""" ) UpperCAmelCase_= torch.load(lowerCAmelCase_ ) logger.info(F"""Optimizer state loaded from {input_optimizer_file}""" ) else: UpperCAmelCase_= ( os.path.join(lowerCAmelCase_ ,F"""{OPTIMIZER_NAME}_{optimizer_index}""" ) if F"""{OPTIMIZER_NAME}""" not in input_dir else input_dir ) logger.info(F"""Loading Optimizer from {ckpt_dir}""" ) UpperCAmelCase_= load_sharded_optimizer_state_dict( model_state_dict=model.state_dict() ,optimizer_key="""optimizer""" ,storage_reader=dist_cp.FileSystemReader(lowerCAmelCase_ ) ,) UpperCAmelCase_= optim_state["""optimizer"""] logger.info(F"""Optimizer loaded from {ckpt_dir}""" ) UpperCAmelCase_= FSDP.optim_state_dict_to_load(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ) optimizer.load_state_dict(lowerCAmelCase_ )
593
0
"""simple docstring""" class SCREAMING_SNAKE_CASE_ : '''simple docstring''' def __init__( self , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None) -> Optional[int]: '''simple docstring''' snake_case__ : int = data snake_case__ : Any = previous snake_case__ : Optional[Any] = next_node def __str__( self) -> str: '''simple docstring''' return f"""{self.data}""" def UpperCAmelCase ( self) -> int: '''simple docstring''' return self.data def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' return self.next def UpperCAmelCase ( self) -> str: '''simple docstring''' return self.previous class SCREAMING_SNAKE_CASE_ : '''simple docstring''' def __init__( self , lowerCamelCase__) -> Union[str, Any]: '''simple docstring''' snake_case__ : List[Any] = head def __iter__( self) -> Any: '''simple docstring''' return self def UpperCAmelCase ( self) -> Optional[Any]: '''simple docstring''' if not self.current: raise StopIteration else: snake_case__ : Any = self.current.get_data() snake_case__ : int = self.current.get_next() return value class SCREAMING_SNAKE_CASE_ : '''simple docstring''' def __init__( self) -> str: '''simple docstring''' snake_case__ : str = None # First node in list snake_case__ : List[str] = None # Last node in list def __str__( self) -> Any: '''simple docstring''' snake_case__ : List[Any] = self.head snake_case__ : Any = [] while current is not None: nodes.append(current.get_data()) snake_case__ : List[str] = current.get_next() return " ".join(str(lowerCamelCase__) for node in nodes) def __contains__( self , lowerCamelCase__) -> Union[str, Any]: '''simple docstring''' snake_case__ : Optional[Any] = self.head while current: if current.get_data() == value: return True snake_case__ : Tuple = current.get_next() return False def __iter__( self) -> Union[str, Any]: '''simple docstring''' return LinkedListIterator(self.head) def UpperCAmelCase ( self) -> Dict: '''simple docstring''' if self.head: return self.head.get_data() return None def UpperCAmelCase ( self) -> Tuple: '''simple docstring''' if self.tail: return self.tail.get_data() return None def UpperCAmelCase ( self , lowerCamelCase__) -> None: '''simple docstring''' if self.head is None: snake_case__ : Any = node snake_case__ : List[str] = node else: self.insert_before_node(self.head , lowerCamelCase__) def UpperCAmelCase ( self , lowerCamelCase__) -> None: '''simple docstring''' if self.head is None: self.set_head(lowerCamelCase__) else: self.insert_after_node(self.tail , lowerCamelCase__) def UpperCAmelCase ( self , lowerCamelCase__) -> None: '''simple docstring''' snake_case__ : Optional[int] = Node(lowerCamelCase__) if self.head is None: self.set_head(lowerCamelCase__) else: self.set_tail(lowerCamelCase__) def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__) -> None: '''simple docstring''' snake_case__ : Union[str, Any] = node snake_case__ : Any = node.previous if node.get_previous() is None: snake_case__ : Dict = node_to_insert else: snake_case__ : Dict = node_to_insert snake_case__ : List[Any] = node_to_insert def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__) -> None: '''simple docstring''' snake_case__ : List[str] = node snake_case__ : Union[str, Any] = node.next if node.get_next() is None: snake_case__ : Tuple = node_to_insert else: snake_case__ : List[Any] = node_to_insert snake_case__ : str = node_to_insert def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__) -> None: '''simple docstring''' snake_case__ : int = 1 snake_case__ : int = Node(lowerCamelCase__) snake_case__ : Tuple = self.head while node: if current_position == position: self.insert_before_node(lowerCamelCase__ , lowerCamelCase__) return current_position += 1 snake_case__ : Dict = node.next self.insert_after_node(self.tail , lowerCamelCase__) def UpperCAmelCase ( self , lowerCamelCase__) -> Node: '''simple docstring''' snake_case__ : Union[str, Any] = self.head while node: if node.get_data() == item: return node snake_case__ : Union[str, Any] = node.get_next() raise Exception("Node not found") def UpperCAmelCase ( self , lowerCamelCase__) -> List[Any]: '''simple docstring''' if (node := self.get_node(lowerCamelCase__)) is not None: if node == self.head: snake_case__ : Union[str, Any] = self.head.get_next() if node == self.tail: snake_case__ : Tuple = self.tail.get_previous() self.remove_node_pointers(lowerCamelCase__) @staticmethod def UpperCAmelCase ( lowerCamelCase__) -> None: '''simple docstring''' if node.get_next(): snake_case__ : List[str] = node.previous if node.get_previous(): snake_case__ : Optional[Any] = node.next snake_case__ : List[Any] = None snake_case__ : Dict = None def UpperCAmelCase ( self) -> Tuple: '''simple docstring''' return self.head is None def A__ ( ) -> None: '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
150
"""simple docstring""" def A__ ( _UpperCAmelCase : int = 1_00_00_00 ) -> int: '''simple docstring''' snake_case__ : List[Any] = limit + 1 snake_case__ : Union[str, Any] = [0] * limit for first_term in range(1 , _UpperCAmelCase ): for n in range(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): snake_case__ : List[Any] = first_term + n / first_term if common_difference % 4: # d must be divisble by 4 continue else: common_difference /= 4 if ( first_term > common_difference and first_term < 4 * common_difference ): # since x,y,z are positive integers frequency[n] += 1 # so z>0 and a>d ,also 4d<a snake_case__ : List[Any] = sum(1 for x in frequency[1:limit] if x == 10 ) return count if __name__ == "__main__": print(f"{solution() = }")
150
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _SCREAMING_SNAKE_CASE : Any = { "configuration_nezha": ["NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP", "NezhaConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE : Optional[Any] = [ "NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST", "NezhaForNextSentencePrediction", "NezhaForMaskedLM", "NezhaForPreTraining", "NezhaForMultipleChoice", "NezhaForQuestionAnswering", "NezhaForSequenceClassification", "NezhaForTokenClassification", "NezhaModel", "NezhaPreTrainedModel", ] if TYPE_CHECKING: from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nezha import ( NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, NezhaPreTrainedModel, ) else: import sys _SCREAMING_SNAKE_CASE : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
436
'''simple docstring''' from graphs.minimum_spanning_tree_kruskal import kruskal def _UpperCamelCase ( ): """simple docstring""" __magic_name__ : Optional[int] = 9 __magic_name__ : Tuple = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] __magic_name__ : List[str] = kruskal(UpperCamelCase__ , UpperCamelCase__ ) __magic_name__ : List[Any] = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] assert sorted(UpperCamelCase__ ) == sorted(UpperCamelCase__ )
436
1
from __future__ import annotations def _a ( UpperCAmelCase , UpperCAmelCase ) -> float: """simple docstring""" lowerCamelCase__ : Dict = sorted(numsa + numsa ) lowerCamelCase__ , lowerCamelCase__ : Tuple = divmod(len(UpperCAmelCase ) , 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() _A : List[Any] = [float(x) for x in input('Enter the elements of first array: ').split()] _A : Tuple = [float(x) for x in input('Enter the elements of second array: ').split()] print(F'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
130
import math def _a ( UpperCAmelCase ) -> str: """simple docstring""" lowerCamelCase__ : List[Any] = 0 lowerCamelCase__ : List[Any] = 0 while num > 0: lowerCamelCase__ : Tuple = num % 8 lowerCamelCase__ : List[str] = octal + (remainder * math.floor(math.pow(10 , UpperCAmelCase ) )) counter += 1 lowerCamelCase__ : Optional[Any] = math.floor(num / 8 ) # basically /= 8 without remainder if any # This formatting removes trailing '.0' from `octal`. return f"0o{int(UpperCAmelCase )}" def _a ( ) -> None: """simple docstring""" print('''\n2 in octal is:''' ) print(decimal_to_octal(2 ) ) # = 2 print('''\n8 in octal is:''' ) print(decimal_to_octal(8 ) ) # = 10 print('''\n65 in octal is:''' ) print(decimal_to_octal(65 ) ) # = 101 print('''\n216 in octal is:''' ) print(decimal_to_octal(216 ) ) # = 330 print('''\n512 in octal is:''' ) print(decimal_to_octal(512 ) ) # = 1000 print('''\n''' ) if __name__ == "__main__": main()
130
1
from __future__ import annotations from numpy import array, cos, cross, floataa, radians, sin from numpy.typing import NDArray def _UpperCAmelCase (UpperCamelCase_ : float , UpperCamelCase_ : float , UpperCamelCase_ : bool = False ): '''simple docstring''' if radian_mode: return [magnitude * cos(UpperCamelCase_ ), magnitude * sin(UpperCamelCase_ )] return [magnitude * cos(radians(UpperCamelCase_ ) ), magnitude * sin(radians(UpperCamelCase_ ) )] def _UpperCAmelCase (UpperCamelCase_ : NDArray[floataa] , UpperCamelCase_ : NDArray[floataa] , UpperCamelCase_ : float = 10**-1 ): '''simple docstring''' _lowerCAmelCase : NDArray[floataa] = cross(UpperCamelCase_ , UpperCamelCase_ ) _lowerCAmelCase : float = sum(UpperCamelCase_ ) return abs(UpperCamelCase_ ) < eps if __name__ == "__main__": # Test to check if it works _lowerCamelCase : List[Any] = array( [ polar_force(7_1_8.4, 1_8_0 - 3_0), polar_force(8_7_9.5_4, 4_5), polar_force(1_0_0, -9_0), ] ) _lowerCamelCase : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem 1 in image_data/2D_problems.jpg _lowerCamelCase : Union[str, Any] = array( [ polar_force(3_0 * 9.8_1, 1_5), polar_force(2_1_5, 1_8_0 - 4_5), polar_force(2_6_4, 9_0 - 3_0), ] ) _lowerCamelCase : Optional[Any] = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem in image_data/2D_problems_1.jpg _lowerCamelCase : List[str] = array([[0, -2_0_0_0], [0, -1_2_0_0], [0, 1_5_6_0_0], [0, -1_2_4_0_0]]) _lowerCamelCase : Optional[Any] = array([[0, 0], [6, 0], [1_0, 0], [1_2, 0]]) assert in_static_equilibrium(forces, location) import doctest doctest.testmod()
429
from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase : Optional[Any] = logging.get_logger(__name__) _lowerCamelCase : Union[str, Any] = { "microsoft/cvt-13": "https://huggingface.co/microsoft/cvt-13/resolve/main/config.json", # See all Cvt models at https://huggingface.co/models?filter=cvt } class __snake_case (_a ): lowerCAmelCase__ = "cvt" def __init__( self : Any , _UpperCAmelCase : Any=3 , _UpperCAmelCase : Dict=[7, 3, 3] , _UpperCAmelCase : Union[str, Any]=[4, 2, 2] , _UpperCAmelCase : str=[2, 1, 1] , _UpperCAmelCase : Optional[int]=[64, 192, 384] , _UpperCAmelCase : List[str]=[1, 3, 6] , _UpperCAmelCase : Tuple=[1, 2, 10] , _UpperCAmelCase : List[Any]=[4.0, 4.0, 4.0] , _UpperCAmelCase : Union[str, Any]=[0.0, 0.0, 0.0] , _UpperCAmelCase : List[Any]=[0.0, 0.0, 0.0] , _UpperCAmelCase : Union[str, Any]=[0.0, 0.0, 0.1] , _UpperCAmelCase : List[Any]=[True, True, True] , _UpperCAmelCase : Tuple=[False, False, True] , _UpperCAmelCase : Optional[Any]=["dw_bn", "dw_bn", "dw_bn"] , _UpperCAmelCase : List[str]=[3, 3, 3] , _UpperCAmelCase : List[str]=[1, 1, 1] , _UpperCAmelCase : Tuple=[2, 2, 2] , _UpperCAmelCase : List[Any]=[1, 1, 1] , _UpperCAmelCase : Dict=[1, 1, 1] , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : Tuple=1E-12 , **_UpperCAmelCase : int , ) -> Dict: '''simple docstring''' super().__init__(**_UpperCAmelCase ) _lowerCAmelCase : List[str] = num_channels _lowerCAmelCase : Dict = patch_sizes _lowerCAmelCase : List[Any] = patch_stride _lowerCAmelCase : List[str] = patch_padding _lowerCAmelCase : List[Any] = embed_dim _lowerCAmelCase : Optional[Any] = num_heads _lowerCAmelCase : Any = depth _lowerCAmelCase : Optional[int] = mlp_ratio _lowerCAmelCase : Optional[Any] = attention_drop_rate _lowerCAmelCase : List[Any] = drop_rate _lowerCAmelCase : Tuple = drop_path_rate _lowerCAmelCase : Optional[Any] = qkv_bias _lowerCAmelCase : Tuple = cls_token _lowerCAmelCase : Optional[Any] = qkv_projection_method _lowerCAmelCase : int = kernel_qkv _lowerCAmelCase : Union[str, Any] = padding_kv _lowerCAmelCase : List[Any] = stride_kv _lowerCAmelCase : List[str] = padding_q _lowerCAmelCase : Optional[int] = stride_q _lowerCAmelCase : List[str] = initializer_range _lowerCAmelCase : Union[str, Any] = layer_norm_eps
429
1
'''simple docstring''' from typing import Dict, Iterable, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends if is_vision_available(): import PIL # soft dependency if is_pytesseract_available(): import pytesseract lowerCAmelCase : List[Any] = logging.get_logger(__name__) def _A ( A ,A ,A ) -> Tuple: return [ int(1_0_0_0 * (box[0] / width) ), int(1_0_0_0 * (box[1] / height) ), int(1_0_0_0 * (box[2] / width) ), int(1_0_0_0 * (box[3] / height) ), ] def _A ( A ,A ,A ) -> Dict: lowercase : Tuple = to_pil_image(A ) lowercase , lowercase : Dict = pil_image.size lowercase : Optional[Any] = pytesseract.image_to_data(A ,lang=A ,output_type="dict" ,config=A ) lowercase , lowercase , lowercase , lowercase , lowercase : Union[str, Any] = data["text"], data["left"], data["top"], data["width"], data["height"] # filter empty words and corresponding coordinates lowercase : Union[str, Any] = [idx for idx, word in enumerate(A ) if not word.strip()] lowercase : Union[str, Any] = [word for idx, word in enumerate(A ) if idx not in irrelevant_indices] lowercase : List[str] = [coord for idx, coord in enumerate(A ) if idx not in irrelevant_indices] lowercase : List[str] = [coord for idx, coord in enumerate(A ) if idx not in irrelevant_indices] lowercase : Tuple = [coord for idx, coord in enumerate(A ) if idx not in irrelevant_indices] lowercase : Optional[Any] = [coord for idx, coord in enumerate(A ) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format lowercase : Tuple = [] for x, y, w, h in zip(A ,A ,A ,A ): lowercase : Union[str, Any] = [x, y, x + w, y + h] actual_boxes.append(A ) # finally, normalize the bounding boxes lowercase : List[Any] = [] for box in actual_boxes: normalized_boxes.append(normalize_box(A ,A ,A ) ) assert len(A ) == len(A ), "Not as many words as there are bounding boxes" return words, normalized_boxes class _UpperCamelCase ( SCREAMING_SNAKE_CASE): '''simple docstring''' _snake_case = ['''pixel_values'''] def __init__( self , a_ = True , a_ = None , a_ = PILImageResampling.BILINEAR , a_ = True , a_ = 1 / 2_5_5 , a_ = True , a_ = None , a_ = None , a_ = True , a_ = None , a_ = "" , **a_ , ) -> None: super().__init__(**a_ ) lowercase : Dict = size if size is not None else {"height": 2_2_4, "width": 2_2_4} lowercase : int = get_size_dict(a_ ) lowercase : Optional[Any] = do_resize lowercase : Any = size lowercase : Dict = resample lowercase : Any = do_rescale lowercase : List[str] = rescale_value lowercase : Optional[Any] = do_normalize lowercase : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowercase : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD lowercase : Optional[Any] = apply_ocr lowercase : Optional[Any] = ocr_lang lowercase : Optional[Any] = tesseract_config def a__ ( self , a_ , a_ , a_ = PILImageResampling.BILINEAR , a_ = None , **a_ , ) -> np.ndarray: lowercase : Dict = get_size_dict(a_ ) if "height" not in size or "width" not in size: raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' ) lowercase : List[str] = (size["height"], size["width"]) return resize(a_ , size=a_ , resample=a_ , data_format=a_ , **a_ ) def a__ ( self , a_ , a_ , a_ = None , **a_ , ) -> np.ndarray: return rescale(a_ , scale=a_ , data_format=a_ , **a_ ) def a__ ( self , a_ , a_ , a_ , a_ = None , **a_ , ) -> np.ndarray: return normalize(a_ , mean=a_ , std=a_ , data_format=a_ , **a_ ) def a__ ( self , a_ , a_ = None , a_ = None , a_=None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = ChannelDimension.FIRST , **a_ , ) -> PIL.Image.Image: lowercase : Tuple = do_resize if do_resize is not None else self.do_resize lowercase : Tuple = size if size is not None else self.size lowercase : int = get_size_dict(a_ ) lowercase : Tuple = resample if resample is not None else self.resample lowercase : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale lowercase : int = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase : List[Any] = do_normalize if do_normalize is not None else self.do_normalize lowercase : List[Any] = image_mean if image_mean is not None else self.image_mean lowercase : int = image_std if image_std is not None else self.image_std lowercase : Optional[Any] = apply_ocr if apply_ocr is not None else self.apply_ocr lowercase : Dict = ocr_lang if ocr_lang is not None else self.ocr_lang lowercase : int = tesseract_config if tesseract_config is not None else self.tesseract_config lowercase : Dict = make_list_of_images(a_ ) if not valid_images(a_ ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("If do_normalize is True, image_mean and image_std must be specified." ) # All transformations expect numpy arrays. lowercase : Optional[Any] = [to_numpy_array(a_ ) for image in images] # Tesseract OCR to get words + normalized bounding boxes if apply_ocr: requires_backends(self , "pytesseract" ) lowercase : Optional[Any] = [] lowercase : List[str] = [] for image in images: lowercase , lowercase : Optional[int] = apply_tesseract(a_ , a_ , a_ ) words_batch.append(a_ ) boxes_batch.append(a_ ) if do_resize: lowercase : Tuple = [self.resize(image=a_ , size=a_ , resample=a_ ) for image in images] if do_rescale: lowercase : Tuple = [self.rescale(image=a_ , scale=a_ ) for image in images] if do_normalize: lowercase : Union[str, Any] = [self.normalize(image=a_ , mean=a_ , std=a_ ) for image in images] lowercase : Any = [to_channel_dimension_format(a_ , a_ ) for image in images] lowercase : int = BatchFeature(data={"pixel_values": images} , tensor_type=a_ ) if apply_ocr: lowercase : Dict = words_batch lowercase : Union[str, Any] = boxes_batch return data
425
'''simple docstring''' import os import tempfile import unittest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) class _UpperCamelCase ( SCREAMING_SNAKE_CASE): '''simple docstring''' def __init__( self , a_ , a_=1_3 , a_=7 , a_=True , a_=True , a_=False , a_=True , a_=9_9 , a_=3_2 , a_=5 , a_=4 , a_=3_7 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_1_2 , a_=1_6 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_=None , ) -> Any: lowercase : List[str] = parent lowercase : str = batch_size lowercase : int = seq_length lowercase : Any = is_training lowercase : List[Any] = use_input_mask lowercase : str = use_token_type_ids lowercase : List[str] = use_labels lowercase : Optional[Any] = vocab_size lowercase : List[Any] = hidden_size lowercase : List[Any] = num_hidden_layers lowercase : str = num_attention_heads lowercase : Union[str, Any] = intermediate_size lowercase : Union[str, Any] = hidden_act lowercase : Optional[Any] = hidden_dropout_prob lowercase : Union[str, Any] = attention_probs_dropout_prob lowercase : List[str] = max_position_embeddings lowercase : Tuple = type_vocab_size lowercase : Dict = type_sequence_label_size lowercase : Optional[Any] = initializer_range lowercase : int = num_labels lowercase : int = num_choices lowercase : Tuple = scope def a__ ( self ) -> Dict: lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase : Union[str, Any] = None if self.use_input_mask: lowercase : int = random_attention_mask([self.batch_size, self.seq_length] ) lowercase : str = None lowercase : str = None lowercase : Optional[int] = None if self.use_labels: lowercase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices ) lowercase : Dict = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def a__ ( self ) -> Any: return DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def a__ ( self , a_ , a_ , a_ , a_ , a_ , a_ ) -> List[Any]: lowercase : Tuple = DistilBertModel(config=a_ ) model.to(a_ ) model.eval() lowercase : Optional[Any] = model(a_ , a_ ) lowercase : List[Any] = model(a_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def a__ ( self , a_ , a_ , a_ , a_ , a_ , a_ ) -> str: lowercase : List[Any] = DistilBertForMaskedLM(config=a_ ) model.to(a_ ) model.eval() lowercase : Dict = model(a_ , attention_mask=a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def a__ ( self , a_ , a_ , a_ , a_ , a_ , a_ ) -> int: lowercase : Optional[Any] = DistilBertForQuestionAnswering(config=a_ ) model.to(a_ ) model.eval() lowercase : List[Any] = model( a_ , attention_mask=a_ , start_positions=a_ , end_positions=a_ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def a__ ( self , a_ , a_ , a_ , a_ , a_ , a_ ) -> List[str]: lowercase : Union[str, Any] = self.num_labels lowercase : Optional[int] = DistilBertForSequenceClassification(a_ ) model.to(a_ ) model.eval() lowercase : Dict = model(a_ , attention_mask=a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def a__ ( self , a_ , a_ , a_ , a_ , a_ , a_ ) -> List[str]: lowercase : List[Any] = self.num_labels lowercase : Optional[Any] = DistilBertForTokenClassification(config=a_ ) model.to(a_ ) model.eval() lowercase : Optional[Any] = model(a_ , attention_mask=a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def a__ ( self , a_ , a_ , a_ , a_ , a_ , a_ ) -> Optional[int]: lowercase : Optional[int] = self.num_choices lowercase : Tuple = DistilBertForMultipleChoice(config=a_ ) model.to(a_ ) model.eval() lowercase : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase : Tuple = model( a_ , attention_mask=a_ , labels=a_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def a__ ( self ) -> Tuple: lowercase : Any = self.prepare_config_and_inputs() ((lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase)) : int = config_and_inputs lowercase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class _UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase): '''simple docstring''' _snake_case = ( ( DistilBertModel, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, ) if is_torch_available() else None ) _snake_case = ( { '''feature-extraction''': DistilBertModel, '''fill-mask''': DistilBertForMaskedLM, '''question-answering''': DistilBertForQuestionAnswering, '''text-classification''': DistilBertForSequenceClassification, '''token-classification''': DistilBertForTokenClassification, '''zero-shot''': DistilBertForSequenceClassification, } if is_torch_available() else {} ) _snake_case = True _snake_case = True _snake_case = True _snake_case = True def a__ ( self ) -> int: lowercase : str = DistilBertModelTester(self ) lowercase : Optional[Any] = ConfigTester(self , config_class=a_ , dim=3_7 ) def a__ ( self ) -> List[Any]: self.config_tester.run_common_tests() def a__ ( self ) -> List[Any]: lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*a_ ) def a__ ( self ) -> Tuple: lowercase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*a_ ) def a__ ( self ) -> Union[str, Any]: lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*a_ ) def a__ ( self ) -> Optional[Any]: lowercase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*a_ ) def a__ ( self ) -> Optional[Any]: lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*a_ ) def a__ ( self ) -> List[str]: lowercase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*a_ ) @slow def a__ ( self ) -> List[Any]: for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase : Union[str, Any] = DistilBertModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) @slow @require_torch_gpu def a__ ( self ) -> Tuple: lowercase , lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # BertForMultipleChoice behaves incorrectly in JIT environments. if model_class == DistilBertForMultipleChoice: return lowercase : Any = True lowercase : List[str] = model_class(config=a_ ) lowercase : Optional[int] = self._prepare_for_class(a_ , a_ ) lowercase : Union[str, Any] = torch.jit.trace( a_ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(a_ , os.path.join(a_ , "traced_model.pt" ) ) lowercase : str = torch.jit.load(os.path.join(a_ , "traced_model.pt" ) , map_location=a_ ) loaded(inputs_dict["input_ids"].to(a_ ) , inputs_dict["attention_mask"].to(a_ ) ) @require_torch class _UpperCamelCase ( unittest.TestCase): '''simple docstring''' @slow def a__ ( self ) -> List[str]: lowercase : Dict = DistilBertModel.from_pretrained("distilbert-base-uncased" ) lowercase : Union[str, Any] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] ) lowercase : str = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): lowercase : Union[str, Any] = model(a_ , attention_mask=a_ )[0] lowercase : List[str] = torch.Size((1, 1_1, 7_6_8) ) self.assertEqual(output.shape , a_ ) lowercase : Optional[int] = torch.tensor( [[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a_ , atol=1e-4 ) )
425
1
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from typing import Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import randn_tensor from .scheduling_utils import SchedulerMixin class snake_case_ ( __A , __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = 1 @register_to_config def __init__( self : Union[str, Any] , _UpperCamelCase : Tuple=2_0_0_0 , _UpperCamelCase : str=0.1 , _UpperCamelCase : Any=2_0 , _UpperCamelCase : Optional[int]=1e-3 ) ->List[Any]: snake_case_ = None snake_case_ = None snake_case_ = None def snake_case__( self : str , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, torch.device] = None ) ->List[Any]: snake_case_ = torch.linspace(1 , self.config.sampling_eps , _UpperCamelCase , device=_UpperCamelCase ) def snake_case__( self : Optional[Any] , _UpperCamelCase : int , _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : int=None ) ->Optional[int]: if self.timesteps is None: raise ValueError( '''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' ) # TODO(Patrick) better comments + non-PyTorch # postprocess model score snake_case_ = ( -0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min ) snake_case_ = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) ) snake_case_ = std.flatten() while len(std.shape ) < len(score.shape ): snake_case_ = std.unsqueeze(-1 ) snake_case_ = -score / std # compute snake_case_ = -1.0 / len(self.timesteps ) snake_case_ = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min) snake_case_ = beta_t.flatten() while len(beta_t.shape ) < len(x.shape ): snake_case_ = beta_t.unsqueeze(-1 ) snake_case_ = -0.5 * beta_t * x snake_case_ = torch.sqrt(_UpperCamelCase ) snake_case_ = drift - diffusion**2 * score snake_case_ = x + drift * dt # add noise snake_case_ = randn_tensor(x.shape , layout=x.layout , generator=_UpperCamelCase , device=x.device , dtype=x.dtype ) snake_case_ = x_mean + diffusion * math.sqrt(-dt ) * noise return x, x_mean def __len__( self : int ) ->Union[str, Any]: return self.config.num_train_timesteps
39
import unittest from transformers import TrOCRConfig from transformers.testing_utils import is_torch_available, require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM @require_torch class lowercase : def __init__( self : Any , _lowercase : List[Any] , _lowercase : Optional[Any]=99 , _lowercase : Optional[int]=13 , _lowercase : Tuple=16 , _lowercase : Union[str, Any]=7 , _lowercase : Optional[Any]=True , _lowercase : int=True , _lowercase : Optional[Any]=True , _lowercase : str=False , _lowercase : Union[str, Any]=True , _lowercase : Tuple=2 , _lowercase : Any=32 , _lowercase : int=4 , _lowercase : Dict=4 , _lowercase : Dict=30 , _lowercase : Union[str, Any]=0 , _lowercase : List[str]=1 , _lowercase : Optional[Any]=2 , _lowercase : Tuple=None , ): SCREAMING_SNAKE_CASE__ : Any = parent SCREAMING_SNAKE_CASE__ : List[Any] = batch_size SCREAMING_SNAKE_CASE__ : List[str] = decoder_seq_length # For common tests SCREAMING_SNAKE_CASE__ : Optional[Any] = self.decoder_seq_length SCREAMING_SNAKE_CASE__ : Optional[int] = is_training SCREAMING_SNAKE_CASE__ : Tuple = use_attention_mask SCREAMING_SNAKE_CASE__ : Any = use_labels SCREAMING_SNAKE_CASE__ : Any = vocab_size SCREAMING_SNAKE_CASE__ : Union[str, Any] = d_model SCREAMING_SNAKE_CASE__ : Tuple = d_model SCREAMING_SNAKE_CASE__ : Optional[int] = decoder_layers SCREAMING_SNAKE_CASE__ : List[str] = decoder_layers SCREAMING_SNAKE_CASE__ : Optional[Any] = decoder_ffn_dim SCREAMING_SNAKE_CASE__ : List[Any] = decoder_attention_heads SCREAMING_SNAKE_CASE__ : Optional[int] = decoder_attention_heads SCREAMING_SNAKE_CASE__ : str = eos_token_id SCREAMING_SNAKE_CASE__ : List[Any] = bos_token_id SCREAMING_SNAKE_CASE__ : str = pad_token_id SCREAMING_SNAKE_CASE__ : str = decoder_start_token_id SCREAMING_SNAKE_CASE__ : Optional[Any] = use_cache SCREAMING_SNAKE_CASE__ : Optional[int] = max_position_embeddings SCREAMING_SNAKE_CASE__ : Tuple = None SCREAMING_SNAKE_CASE__ : int = decoder_seq_length SCREAMING_SNAKE_CASE__ : Optional[int] = 2 SCREAMING_SNAKE_CASE__ : Tuple = 1 def lowercase__ ( self : Dict ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE__ : Optional[Any] = None if self.use_attention_mask: SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = None if self.use_labels: SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE__ : Optional[int] = TrOCRConfig( vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , ) return (config, input_ids, attention_mask, lm_labels) def lowercase__ ( self : Dict , _lowercase : Any , _lowercase : Dict , _lowercase : Optional[Any] , _lowercase : Optional[Any] , ): SCREAMING_SNAKE_CASE__ : Dict = True SCREAMING_SNAKE_CASE__ : Optional[int] = TrOCRDecoder(config=_lowercase ).to(_lowercase ).eval() SCREAMING_SNAKE_CASE__ : Optional[int] = input_ids[:2] input_ids[input_ids == 0] += 1 # first forward pass SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_lowercase , use_cache=_lowercase ) SCREAMING_SNAKE_CASE__ : List[str] = model(_lowercase ) SCREAMING_SNAKE_CASE__ : Tuple = model(_lowercase , use_cache=_lowercase ) self.parent.assertTrue(len(_lowercase ) == len(_lowercase ) ) self.parent.assertTrue(len(_lowercase ) == len(_lowercase ) + 1 ) SCREAMING_SNAKE_CASE__ : int = outputs['''past_key_values'''] # create hypothetical next token and extent to next_input_ids SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1 # append to next input_ids and SCREAMING_SNAKE_CASE__ : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 ) SCREAMING_SNAKE_CASE__ : int = model(_lowercase )['''last_hidden_state'''] SCREAMING_SNAKE_CASE__ : List[Any] = model(_lowercase , past_key_values=_lowercase )['''last_hidden_state'''] # select random slice SCREAMING_SNAKE_CASE__ : int = ids_tensor((1,) , output_from_past.shape[-1] ).item() SCREAMING_SNAKE_CASE__ : Dict = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() SCREAMING_SNAKE_CASE__ : str = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(_lowercase , _lowercase , atol=1E-3 ) def lowercase__ ( self : Optional[int] ): SCREAMING_SNAKE_CASE__ : Optional[int] = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = config_and_inputs SCREAMING_SNAKE_CASE__ : int = {'''input_ids''': input_ids, '''attention_mask''': attention_mask} return config, inputs_dict @require_torch class lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): lowerCamelCase : List[str] = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else () lowerCamelCase : Dict = (TrOCRForCausalLM,) if is_torch_available() else () lowerCamelCase : Tuple = {'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {} lowerCamelCase : Any = True lowerCamelCase : int = False def lowercase__ ( self : List[Any] ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = TrOCRStandaloneDecoderModelTester(self , is_training=_lowercase ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = ConfigTester(self , config_class=_lowercase ) def lowercase__ ( self : Optional[Any] ): pass def lowercase__ ( self : List[Any] ): pass def lowercase__ ( self : str ): pass def lowercase__ ( self : Dict ): self.config_tester.run_common_tests() def lowercase__ ( self : Optional[Any] ): SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*_lowercase ) def lowercase__ ( self : Optional[Any] ): return @unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :) def lowercase__ ( self : Tuple ): pass
35
0
'''simple docstring''' import itertools import json import os import unittest from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class a_ ( UpperCAmelCase__ , unittest.TestCase ): lowercase_ : int = RobertaTokenizer lowercase_ : int = RobertaTokenizerFast lowercase_ : int = True lowercase_ : Dict = {'''cls_token''': '''<s>'''} def lowercase__ ( self : Union[str, Any] ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __snake_case = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] __snake_case = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) ) __snake_case = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] __snake_case = {'unk_token': '<unk>'} __snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) __snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(__lowerCAmelCase ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(__lowerCAmelCase ) ) def lowercase__ ( self : Tuple , **__lowerCAmelCase : List[str] ): kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def lowercase__ ( self : Dict , **__lowerCAmelCase : Tuple ): kwargs.update(self.special_tokens_map ) return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def lowercase__ ( self : Optional[Any] , __lowerCAmelCase : int ): __snake_case = 'lower newer' __snake_case = 'lower newer' return input_text, output_text def lowercase__ ( self : Union[str, Any] ): __snake_case = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) __snake_case = 'lower newer' __snake_case = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er'] __snake_case = tokenizer.tokenize(__lowerCAmelCase ) # , add_prefix_space=True) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) __snake_case = tokens + [tokenizer.unk_token] __snake_case = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9] self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase ) def lowercase__ ( self : Tuple ): __snake_case = self.get_tokenizer() self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=__lowerCAmelCase ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] ) self.assertListEqual( tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=__lowerCAmelCase ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] , ) @slow def lowercase__ ( self : int ): __snake_case = self.tokenizer_class.from_pretrained('roberta-base' ) __snake_case = tokenizer.encode('sequence builders' , add_special_tokens=__lowerCAmelCase ) __snake_case = tokenizer.encode('multi-sequence build' , add_special_tokens=__lowerCAmelCase ) __snake_case = tokenizer.encode( 'sequence builders' , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase ) __snake_case = tokenizer.encode( 'sequence builders' , 'multi-sequence build' , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase ) __snake_case = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase ) __snake_case = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase , __lowerCAmelCase ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def lowercase__ ( self : int ): __snake_case = self.get_tokenizer() __snake_case = 'Encode this sequence.' __snake_case = tokenizer.byte_encoder[' '.encode('utf-8' )[0]] # Testing encoder arguments __snake_case = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase ) __snake_case = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase ) __snake_case = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase ) __snake_case = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) tokenizer.add_special_tokens({'bos_token': '<s>'} ) __snake_case = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) __snake_case = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase ) # Testing spaces after special tokens __snake_case = '<mask>' tokenizer.add_special_tokens( {'mask_token': AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase )} ) # mask token has a left space __snake_case = tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) __snake_case = 'Encode <mask> sequence' __snake_case = 'Encode <mask>sequence' __snake_case = tokenizer.encode(__lowerCAmelCase ) __snake_case = encoded.index(__lowerCAmelCase ) __snake_case = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) __snake_case = tokenizer.encode(__lowerCAmelCase ) __snake_case = encoded.index(__lowerCAmelCase ) __snake_case = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase ) def lowercase__ ( self : List[str] ): pass def lowercase__ ( self : Dict ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): __snake_case = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase ) __snake_case = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase ) __snake_case = 'A, <mask> AllenNLP sentence.' __snake_case = tokenizer_r.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase ) __snake_case = tokenizer_p.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , ) __snake_case = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] ) __snake_case = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] ) self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] ) self.assertSequenceEqual( __lowerCAmelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) self.assertSequenceEqual( __lowerCAmelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) def lowercase__ ( self : Optional[int] ): for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): __snake_case = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase ) __snake_case = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) __snake_case = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state['add_prefix_space'] , __lowerCAmelCase ) self.assertEqual(post_processor_state['add_prefix_space'] , __lowerCAmelCase ) self.assertEqual(post_processor_state['trim_offsets'] , __lowerCAmelCase ) def lowercase__ ( self : Optional[Any] ): # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and # `trim_offsets` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): __snake_case = 'hello' # `hello` is a token in the vocabulary of `pretrained_name` __snake_case = F'{text_of_1_token} {text_of_1_token}' __snake_case = self.rust_tokenizer_class.from_pretrained( __lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase ) __snake_case = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__lowerCAmelCase ) + 1, len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , ) __snake_case = self.rust_tokenizer_class.from_pretrained( __lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase ) __snake_case = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__lowerCAmelCase ) + 1, len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , ) __snake_case = self.rust_tokenizer_class.from_pretrained( __lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase ) __snake_case = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__lowerCAmelCase ), len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , ) __snake_case = self.rust_tokenizer_class.from_pretrained( __lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase ) __snake_case = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__lowerCAmelCase ), len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , ) __snake_case = F' {text}' # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) __snake_case = self.rust_tokenizer_class.from_pretrained( __lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase ) __snake_case = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__lowerCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(__lowerCAmelCase ) + 1, 1 + len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , ) __snake_case = self.rust_tokenizer_class.from_pretrained( __lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase ) __snake_case = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(__lowerCAmelCase ), 1 + len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , ) __snake_case = self.rust_tokenizer_class.from_pretrained( __lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase ) __snake_case = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(__lowerCAmelCase ), 1 + len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
427
'''simple docstring''' import os from argparse import ArgumentParser, Namespace from ..data import SingleSentenceClassificationProcessor as Processor from ..pipelines import TextClassificationPipeline from ..utils import is_tf_available, is_torch_available, logging from . import BaseTransformersCLICommand if not is_tf_available() and not is_torch_available(): raise RuntimeError("""At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training""") # TF training parameters _lowercase = False _lowercase = False def lowerCamelCase__ ( a ): return TrainCommand(a ) class a_ ( UpperCAmelCase__ ): @staticmethod def lowercase__ ( __lowerCAmelCase : ArgumentParser ): __snake_case = parser.add_parser('train' , help='CLI tool to train a model on a task.' ) train_parser.add_argument( '--train_data' , type=__lowerCAmelCase , required=__lowerCAmelCase , help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' , ) train_parser.add_argument( '--column_label' , type=__lowerCAmelCase , default=0 , help='Column of the dataset csv file with example labels.' ) train_parser.add_argument( '--column_text' , type=__lowerCAmelCase , default=1 , help='Column of the dataset csv file with example texts.' ) train_parser.add_argument( '--column_id' , type=__lowerCAmelCase , default=2 , help='Column of the dataset csv file with example ids.' ) train_parser.add_argument( '--skip_first_row' , action='store_true' , help='Skip the first row of the csv file (headers).' ) train_parser.add_argument('--validation_data' , type=__lowerCAmelCase , default='' , help='path to validation dataset.' ) train_parser.add_argument( '--validation_split' , type=__lowerCAmelCase , default=0.1 , help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' , ) train_parser.add_argument('--output' , type=__lowerCAmelCase , default='./' , help='path to saved the trained model.' ) train_parser.add_argument( '--task' , type=__lowerCAmelCase , default='text_classification' , help='Task to train the model on.' ) train_parser.add_argument( '--model' , type=__lowerCAmelCase , default='bert-base-uncased' , help='Model\'s name or path to stored model.' ) train_parser.add_argument('--train_batch_size' , type=__lowerCAmelCase , default=3_2 , help='Batch size for training.' ) train_parser.add_argument('--valid_batch_size' , type=__lowerCAmelCase , default=6_4 , help='Batch size for validation.' ) train_parser.add_argument('--learning_rate' , type=__lowerCAmelCase , default=3E-5 , help='Learning rate.' ) train_parser.add_argument('--adam_epsilon' , type=__lowerCAmelCase , default=1E-08 , help='Epsilon for Adam optimizer.' ) train_parser.set_defaults(func=__lowerCAmelCase ) def __init__( self : List[Any] , __lowerCAmelCase : Namespace ): __snake_case = logging.get_logger('transformers-cli/training' ) __snake_case = 'tf' if is_tf_available() else 'torch' os.makedirs(args.output , exist_ok=__lowerCAmelCase ) __snake_case = args.output __snake_case = args.column_label __snake_case = args.column_text __snake_case = args.column_id self.logger.info(F'Loading {args.task} pipeline for {args.model}' ) if args.task == "text_classification": __snake_case = TextClassificationPipeline.from_pretrained(args.model ) elif args.task == "token_classification": raise NotImplementedError elif args.task == "question_answering": raise NotImplementedError self.logger.info(F'Loading dataset from {args.train_data}' ) __snake_case = Processor.create_from_csv( args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , ) __snake_case = None if args.validation_data: self.logger.info(F'Loading validation dataset from {args.validation_data}' ) __snake_case = Processor.create_from_csv( args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , ) __snake_case = args.validation_split __snake_case = args.train_batch_size __snake_case = args.valid_batch_size __snake_case = args.learning_rate __snake_case = args.adam_epsilon def lowercase__ ( self : Dict ): if self.framework == "tf": return self.run_tf() return self.run_torch() def lowercase__ ( self : Optional[int] ): raise NotImplementedError def lowercase__ ( self : Dict ): self.pipeline.fit( self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , ) # Save trained pipeline self.pipeline.save_pretrained(self.output )
427
1
import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal __A : Union[str, Any] = datasets.utils.logging.get_logger(__name__) __A : Union[str, Any] = ['''names''', '''prefix'''] __A : List[str] = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols'''] __A : Any = ['''encoding_errors''', '''on_bad_lines'''] __A : Optional[Any] = ['''date_format'''] @dataclass class __A ( datasets.BuilderConfig ): lowerCAmelCase_ : str = "," lowerCAmelCase_ : Optional[str] = None lowerCAmelCase_ : Optional[Union[int, List[int], str]] = "infer" lowerCAmelCase_ : Optional[List[str]] = None lowerCAmelCase_ : Optional[List[str]] = None lowerCAmelCase_ : Optional[Union[int, str, List[int], List[str]]] = None lowerCAmelCase_ : Optional[Union[List[int], List[str]]] = None lowerCAmelCase_ : Optional[str] = None lowerCAmelCase_ : bool = True lowerCAmelCase_ : Optional[Literal["c", "python", "pyarrow"]] = None lowerCAmelCase_ : Dict[Union[int, str], Callable[[Any], Any]] = None lowerCAmelCase_ : Optional[list] = None lowerCAmelCase_ : Optional[list] = None lowerCAmelCase_ : bool = False lowerCAmelCase_ : Optional[Union[int, List[int]]] = None lowerCAmelCase_ : Optional[int] = None lowerCAmelCase_ : Optional[Union[str, List[str]]] = None lowerCAmelCase_ : bool = True lowerCAmelCase_ : bool = True lowerCAmelCase_ : bool = False lowerCAmelCase_ : bool = True lowerCAmelCase_ : Optional[str] = None lowerCAmelCase_ : str = "." lowerCAmelCase_ : Optional[str] = None lowerCAmelCase_ : str = '"' lowerCAmelCase_ : int = 0 lowerCAmelCase_ : Optional[str] = None lowerCAmelCase_ : Optional[str] = None lowerCAmelCase_ : Optional[str] = None lowerCAmelCase_ : Optional[str] = None lowerCAmelCase_ : bool = True lowerCAmelCase_ : bool = True lowerCAmelCase_ : int = 0 lowerCAmelCase_ : bool = True lowerCAmelCase_ : bool = False lowerCAmelCase_ : Optional[str] = None lowerCAmelCase_ : int = 1_0000 lowerCAmelCase_ : Optional[datasets.Features] = None lowerCAmelCase_ : Optional[str] = "strict" lowerCAmelCase_ : Literal["error", "warn", "skip"] = "error" lowerCAmelCase_ : Optional[str] = None def lowercase__ ( self : List[str] ): if self.delimiter is not None: lowerCAmelCase : Optional[Any] = self.delimiter if self.column_names is not None: lowerCAmelCase : List[str] = self.column_names @property def lowercase__ ( self : Union[str, Any] ): lowerCAmelCase : int = { 'sep': self.sep, 'header': self.header, 'names': self.names, 'index_col': self.index_col, 'usecols': self.usecols, 'prefix': self.prefix, 'mangle_dupe_cols': self.mangle_dupe_cols, 'engine': self.engine, 'converters': self.converters, 'true_values': self.true_values, 'false_values': self.false_values, 'skipinitialspace': self.skipinitialspace, 'skiprows': self.skiprows, 'nrows': self.nrows, 'na_values': self.na_values, 'keep_default_na': self.keep_default_na, 'na_filter': self.na_filter, 'verbose': self.verbose, 'skip_blank_lines': self.skip_blank_lines, 'thousands': self.thousands, 'decimal': self.decimal, 'lineterminator': self.lineterminator, 'quotechar': self.quotechar, 'quoting': self.quoting, 'escapechar': self.escapechar, 'comment': self.comment, 'encoding': self.encoding, 'dialect': self.dialect, 'error_bad_lines': self.error_bad_lines, 'warn_bad_lines': self.warn_bad_lines, 'skipfooter': self.skipfooter, 'doublequote': self.doublequote, 'memory_map': self.memory_map, 'float_precision': self.float_precision, 'chunksize': self.chunksize, 'encoding_errors': self.encoding_errors, 'on_bad_lines': self.on_bad_lines, 'date_format': self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , _lowerCAmelCase ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class __A ( datasets.ArrowBasedBuilder ): lowerCAmelCase_ : int = CsvConfig def lowercase__ ( self : Any ): return datasets.DatasetInfo(features=self.config.features ) def lowercase__ ( self : Dict , UpperCAmelCase_ : Dict ): if not self.config.data_files: raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}" ) lowerCAmelCase : List[Any] = dl_manager.download_and_extract(self.config.data_files ) if isinstance(_lowerCAmelCase , (str, list, tuple) ): lowerCAmelCase : List[str] = data_files if isinstance(_lowerCAmelCase , _lowerCAmelCase ): lowerCAmelCase : Dict = [files] lowerCAmelCase : Union[str, Any] = [dl_manager.iter_files(_lowerCAmelCase ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )] lowerCAmelCase : Any = [] for split_name, files in data_files.items(): if isinstance(_lowerCAmelCase , _lowerCAmelCase ): lowerCAmelCase : Union[str, Any] = [files] lowerCAmelCase : Union[str, Any] = [dl_manager.iter_files(_lowerCAmelCase ) for file in files] splits.append(datasets.SplitGenerator(name=_lowerCAmelCase , gen_kwargs={'files': files} ) ) return splits def lowercase__ ( self : str , UpperCAmelCase_ : Dict ): if self.config.features is not None: lowerCAmelCase : Tuple = self.config.features.arrow_schema if all(not require_storage_cast(_lowerCAmelCase ) for feature in self.config.features.values() ): # cheaper cast lowerCAmelCase : Tuple = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=_lowerCAmelCase ) else: # more expensive cast; allows str <-> int/float or str to Audio for example lowerCAmelCase : str = table_cast(_lowerCAmelCase , _lowerCAmelCase ) return pa_table def lowercase__ ( self : str , UpperCAmelCase_ : Union[str, Any] ): lowerCAmelCase : Optional[Any] = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str lowerCAmelCase : Any = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(_lowerCAmelCase ) else object for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(_lowerCAmelCase ) ): lowerCAmelCase : Dict = pd.read_csv(_lowerCAmelCase , iterator=_lowerCAmelCase , dtype=_lowerCAmelCase , **self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(_lowerCAmelCase ): lowerCAmelCase : Tuple = pa.Table.from_pandas(_lowerCAmelCase ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(_lowerCAmelCase ) except ValueError as e: logger.error(f"Failed to read file '{file}' with error {type(_lowerCAmelCase )}: {e}" ) raise
343
import sys UpperCamelCase = ( "73167176531330624919225119674426574742355349194934" "96983520312774506326239578318016984801869478851843" "85861560789112949495459501737958331952853208805511" "12540698747158523863050715693290963295227443043557" "66896648950445244523161731856403098711121722383113" "62229893423380308135336276614282806444486645238749" "30358907296290491560440772390713810515859307960866" "70172427121883998797908792274921901699720888093776" "65727333001053367881220235421809751254540594752243" "52584907711670556013604839586446706324415722155397" "53697817977846174064955149290862569321978468622482" "83972241375657056057490261407972968652414535100474" "82166370484403199890008895243450658541227588666881" "16427171479924442928230863465674813919123162824586" "17866458359124566529476545682848912883142607690042" "24219022671055626321111109370544217506941658960408" "07198403850962455444362981230987879927244284909188" "84580156166097919133875499200524063689912560717606" "05886116467109405077541002256983155200055935729725" "71636269561882670428252483600823257530420752963450" ) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> int: _lowercase : List[Any] = 1 for digit in s: product *= int(SCREAMING_SNAKE_CASE ) return product def __magic_name__ ( SCREAMING_SNAKE_CASE = N ) -> int: _lowercase : Dict = -sys.maxsize - 1 _lowercase : Tuple = n[:13] _lowercase : List[Any] = 13 while cur_index < len(SCREAMING_SNAKE_CASE ) - 13: if int(n[cur_index] ) >= int(substr[0] ): _lowercase : List[str] = substr[1:] + n[cur_index] cur_index += 1 else: _lowercase : str = max(SCREAMING_SNAKE_CASE , str_eval(SCREAMING_SNAKE_CASE ) ) _lowercase : Dict = n[cur_index : cur_index + 13] cur_index += 13 return largest_product if __name__ == "__main__": print(f'''{solution() = }''')
66
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowercase_ = { """configuration_graphormer""": ["""GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GraphormerConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """GraphormerForGraphClassification""", """GraphormerModel""", """GraphormerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_graphormer import ( GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST, GraphormerForGraphClassification, GraphormerModel, GraphormerPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
131
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : Optional[int] , _A : Optional[int] , _A : Tuple=7 , _A : Optional[int]=3 , _A : Optional[Any]=18 , _A : Dict=30 , _A : str=400 , _A : Optional[int]=True , _A : str=None , _A : str=True , _A : str=None , _A : List[str]=True , ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = size if size is not None else {'''shortest_edge''': 20} __SCREAMING_SNAKE_CASE : Optional[int] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} __SCREAMING_SNAKE_CASE : List[str] = parent __SCREAMING_SNAKE_CASE : Dict = batch_size __SCREAMING_SNAKE_CASE : List[Any] = num_channels __SCREAMING_SNAKE_CASE : Union[str, Any] = image_size __SCREAMING_SNAKE_CASE : Union[str, Any] = min_resolution __SCREAMING_SNAKE_CASE : Tuple = max_resolution __SCREAMING_SNAKE_CASE : Union[str, Any] = do_resize __SCREAMING_SNAKE_CASE : int = size __SCREAMING_SNAKE_CASE : Optional[Any] = do_center_crop __SCREAMING_SNAKE_CASE : Union[str, Any] = crop_size __SCREAMING_SNAKE_CASE : Optional[int] = do_flip_channel_order def UpperCAmelCase__ ( self : str ): """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = MobileViTImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = MobileViTImageProcessingTester(self ) @property def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_A , '''do_resize''' ) ) self.assertTrue(hasattr(_A , '''size''' ) ) self.assertTrue(hasattr(_A , '''do_center_crop''' ) ) self.assertTrue(hasattr(_A , '''center_crop''' ) ) self.assertTrue(hasattr(_A , '''do_flip_channel_order''' ) ) def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 20} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) __SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def UpperCAmelCase__ ( self : Dict ): """simple docstring""" pass def UpperCAmelCase__ ( self : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A ) for image in image_inputs: self.assertIsInstance(_A , Image.Image ) # Test not batched input __SCREAMING_SNAKE_CASE : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE : Any = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __SCREAMING_SNAKE_CASE : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A ) for image in image_inputs: self.assertIsInstance(_A , np.ndarray ) # Test not batched input __SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __SCREAMING_SNAKE_CASE : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A ) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor ) # Test not batched input __SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE : int = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
131
1
def A ( _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : Optional[int] = len(_lowerCamelCase ) for _ in range(_lowerCamelCase ): for i in range(_ % 2 , arr_size - 1 , 2 ): if arr[i + 1] < arr[i]: _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = arr[i + 1], arr[i] return arr if __name__ == "__main__": _snake_case = list(range(10, 0, -1)) print(f'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
500
def A ( _lowerCamelCase = 1_000_000 ): '''simple docstring''' _lowerCAmelCase : int = [i - 1 for i in range(limit + 1 )] for i in range(2 , limit + 1 ): if phi[i] == i - 1: for j in range(2 * i , limit + 1 , _lowerCamelCase ): phi[j] -= phi[j] // i return sum(phi[2 : limit + 1] ) if __name__ == "__main__": print(solution())
500
1
import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class _a ( unittest.TestCase ): def __init__( self , lowercase_ , lowercase_=7 , lowercase_=3 , lowercase_=18 , lowercase_=30 , lowercase_=400 , lowercase_=True , lowercase_=None , lowercase_=True , ) -> int: lowerCAmelCase : Any = size if size is not None else {"""height""": 18, """width""": 18} lowerCAmelCase : int = parent lowerCAmelCase : str = batch_size lowerCAmelCase : List[str] = num_channels lowerCAmelCase : int = image_size lowerCAmelCase : str = min_resolution lowerCAmelCase : Optional[Any] = max_resolution lowerCAmelCase : Tuple = do_resize lowerCAmelCase : Dict = size lowerCAmelCase : Dict = apply_ocr def _snake_case ( self ) -> List[str]: return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class _a ( snake_case_ , unittest.TestCase ): _UpperCamelCase = LayoutLMvaImageProcessor if is_pytesseract_available() else None def _snake_case ( self ) -> Tuple: lowerCAmelCase : Any = LayoutLMvaImageProcessingTester(self ) @property def _snake_case ( self ) -> Tuple: return self.image_processor_tester.prepare_image_processor_dict() def _snake_case ( self ) -> str: lowerCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowercase_ , """do_resize""" ) ) self.assertTrue(hasattr(lowercase_ , """size""" ) ) self.assertTrue(hasattr(lowercase_ , """apply_ocr""" ) ) def _snake_case ( self ) -> List[Any]: lowerCAmelCase : Any = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} ) lowerCAmelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} ) def _snake_case ( self ) -> List[Any]: pass def _snake_case ( self ) -> int: # Initialize image_processing lowerCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCAmelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ ) for image in image_inputs: self.assertIsInstance(lowercase_ , Image.Image ) # Test not batched input lowerCAmelCase : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ) self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) self.assertIsInstance(encoding.words , lowercase_ ) self.assertIsInstance(encoding.boxes , lowercase_ ) # Test batched lowerCAmelCase : List[Any] = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) def _snake_case ( self ) -> Tuple: # Initialize image_processing lowerCAmelCase : Any = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCAmelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_ ) for image in image_inputs: self.assertIsInstance(lowercase_ , np.ndarray ) # Test not batched input lowerCAmelCase : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched lowerCAmelCase : List[Any] = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) def _snake_case ( self ) -> Optional[int]: # Initialize image_processing lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCAmelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_ ) for image in image_inputs: self.assertIsInstance(lowercase_ , torch.Tensor ) # Test not batched input lowerCAmelCase : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched lowerCAmelCase : Optional[Any] = image_processing(lowercase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) def _snake_case ( self ) -> List[str]: # with apply_OCR = True lowerCAmelCase : List[str] = LayoutLMvaImageProcessor() from datasets import load_dataset lowerCAmelCase : str = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" ) lowerCAmelCase : str = Image.open(ds[0]["""file"""] ).convert("""RGB""" ) lowerCAmelCase : Optional[int] = image_processing(lowercase_ , return_tensors="""pt""" ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) ) self.assertEqual(len(encoding.words ) , len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 lowerCAmelCase : Optional[int] = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231 lowerCAmelCase : Tuple = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , lowercase_ ) self.assertListEqual(encoding.boxes , lowercase_ ) # with apply_OCR = False lowerCAmelCase : Union[str, Any] = LayoutLMvaImageProcessor(apply_ocr=lowercase_ ) lowerCAmelCase : Dict = image_processing(lowercase_ , return_tensors="""pt""" ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
717
from ..utils import DummyObject, requires_backends class _a ( metaclass=snake_case_ ): _UpperCamelCase: List[Any] = ["keras_nlp"] def __init__( self , *lowercase_ , **lowercase_ ) -> Tuple: requires_backends(self , ["""keras_nlp"""] )
693
0
"""simple docstring""" import unicodedata from dataclasses import dataclass from typing import Optional, Union import numpy as np from transformers.data.data_collator import DataCollatorMixin from transformers.file_utils import PaddingStrategy from transformers.tokenization_utils_base import PreTrainedTokenizerBase def _lowerCamelCase ( lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] ): if isinstance(lowercase__ , lowercase__ ): lowercase__ : Dict = np.full((len(lowercase__ ), sequence_length, 2) , lowercase__ ) else: lowercase__ : Dict = np.full((len(lowercase__ ), sequence_length) , lowercase__ ) for i, tensor in enumerate(lowercase__ ): if padding_side == "right": if isinstance(lowercase__ , lowercase__ ): lowercase__ : int = tensor[:sequence_length] else: lowercase__ : Dict = tensor[:sequence_length] else: if isinstance(lowercase__ , lowercase__ ): lowercase__ : Dict = tensor[:sequence_length] else: lowercase__ : Union[str, Any] = tensor[:sequence_length] return out_tensor.tolist() def _lowerCamelCase ( lowerCamelCase__ : Optional[Any] ): lowercase__ : Dict = ord(lowercase__ ) if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26): return True lowercase__ : Tuple = unicodedata.category(lowercase__ ) if cat.startswith("""P""" ): return True return False @dataclass class _SCREAMING_SNAKE_CASE ( lowercase_ ): """simple docstring""" _a : Optional[Any] = 42 _a : str = True _a : int = None _a : str = None _a : int = -1_00 _a : List[Any] = '''pt''' def UpperCAmelCase__( self , lowerCamelCase__ ) -> Optional[int]: import torch lowercase__ : Tuple = """label""" if """label""" in features[0].keys() else """labels""" lowercase__ : List[str] = [feature[label_name] for feature in features] if label_name in features[0].keys() else None lowercase__ : Union[str, Any] = self.tokenizer.pad( lowerCamelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" if labels is None else None , ) if labels is None: return batch lowercase__ : Dict = torch.tensor(batch["""entity_ids"""] ).shape[1] lowercase__ : Any = self.tokenizer.padding_side if padding_side == "right": lowercase__ : Any = [ list(lowerCamelCase__ ) + [self.label_pad_token_id] * (sequence_length - len(lowerCamelCase__ )) for label in labels ] else: lowercase__ : int = [ [self.label_pad_token_id] * (sequence_length - len(lowerCamelCase__ )) + list(lowerCamelCase__ ) for label in labels ] lowercase__ : Tuple = [feature["""ner_tags"""] for feature in features] lowercase__ : Union[str, Any] = padding_tensor(lowerCamelCase__ , -1 , lowerCamelCase__ , lowerCamelCase__ ) lowercase__ : int = [feature["""original_entity_spans"""] for feature in features] lowercase__ : List[str] = padding_tensor(lowerCamelCase__ , (-1, -1) , lowerCamelCase__ , lowerCamelCase__ ) lowercase__ : Optional[int] = {k: torch.tensor(lowerCamelCase__ , dtype=torch.intaa ) for k, v in batch.items()} return batch
200
'''simple docstring''' def __UpperCAmelCase (lowercase__ = 1000 ) -> int: '''simple docstring''' return sum(e for e in range(3 ,lowercase__ ) if e % 3 == 0 or e % 5 == 0 ) if __name__ == "__main__": print(F'{solution() = }')
685
0
import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotSmallConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html A__ = """platform""" import jax import jax.numpy as jnp from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import ( FlaxBlenderbotSmallForConditionalGeneration, FlaxBlenderbotSmallModel, shift_tokens_right, ) def _lowerCamelCase ( a_ : str , a_ : Union[str, Any] , a_ : List[str]=None , a_ : Optional[Any]=None , a_ : List[str]=None , a_ : List[str]=None , a_ : Optional[Any]=None , a_ : int=None , ): if attention_mask is None: lowerCamelCase :Union[str, Any] = np.where(input_ids != config.pad_token_id , 1 , 0) if decoder_attention_mask is None: lowerCamelCase :Optional[int] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0) if head_mask is None: lowerCamelCase :List[Any] = np.ones((config.encoder_layers, config.encoder_attention_heads)) if decoder_head_mask is None: lowerCamelCase :List[Any] = np.ones((config.decoder_layers, config.decoder_attention_heads)) if cross_attn_head_mask is None: lowerCamelCase :Optional[Any] = np.ones((config.decoder_layers, config.decoder_attention_heads)) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class _lowerCAmelCase : def __init__( self : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : List[Any]=13 , __snake_case : Any=7 , __snake_case : List[str]=True , __snake_case : Any=False , __snake_case : Optional[int]=99 , __snake_case : List[Any]=16 , __snake_case : str=2 , __snake_case : List[Any]=4 , __snake_case : List[Any]=4 , __snake_case : Tuple="gelu" , __snake_case : str=0.1 , __snake_case : str=0.1 , __snake_case : Optional[Any]=32 , __snake_case : int=2 , __snake_case : Optional[int]=1 , __snake_case : str=0 , __snake_case : str=0.0_2 , ): lowerCamelCase :Optional[Any] = parent lowerCamelCase :Any = batch_size lowerCamelCase :str = seq_length lowerCamelCase :Optional[Any] = is_training lowerCamelCase :Tuple = use_labels lowerCamelCase :List[str] = vocab_size lowerCamelCase :Any = hidden_size lowerCamelCase :str = num_hidden_layers lowerCamelCase :Any = num_attention_heads lowerCamelCase :Tuple = intermediate_size lowerCamelCase :List[Any] = hidden_act lowerCamelCase :Tuple = hidden_dropout_prob lowerCamelCase :Dict = attention_probs_dropout_prob lowerCamelCase :List[Any] = max_position_embeddings lowerCamelCase :List[str] = eos_token_id lowerCamelCase :Tuple = pad_token_id lowerCamelCase :Tuple = bos_token_id lowerCamelCase :int = initializer_range def snake_case ( self : str ): lowerCamelCase :str = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size ) lowerCamelCase :Union[str, Any] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 ) lowerCamelCase :List[Any] = shift_tokens_right(__snake_case , 1 , 2 ) lowerCamelCase :Optional[int] = BlenderbotSmallConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=__snake_case , ) lowerCamelCase :Dict = prepare_blenderbot_inputs_dict(__snake_case , __snake_case , __snake_case ) return config, inputs_dict def snake_case ( self : Optional[Any] ): lowerCamelCase , lowerCamelCase :Optional[int] = self.prepare_config_and_inputs() return config, inputs_dict def snake_case ( self : List[str] , __snake_case : Tuple , __snake_case : Optional[int] , __snake_case : Union[str, Any] ): lowerCamelCase :Tuple = 20 lowerCamelCase :Union[str, Any] = model_class_name(__snake_case ) lowerCamelCase :Optional[Any] = model.encode(inputs_dict['''input_ids'''] ) lowerCamelCase , lowerCamelCase :Dict = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) lowerCamelCase :Optional[Any] = model.init_cache(decoder_input_ids.shape[0] , __snake_case , __snake_case ) lowerCamelCase :Union[str, Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' ) lowerCamelCase :int = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) lowerCamelCase :Optional[Any] = model.decode( decoder_input_ids[:, :-1] , __snake_case , decoder_attention_mask=__snake_case , past_key_values=__snake_case , decoder_position_ids=__snake_case , ) lowerCamelCase :Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' ) lowerCamelCase :str = model.decode( decoder_input_ids[:, -1:] , __snake_case , decoder_attention_mask=__snake_case , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__snake_case , ) lowerCamelCase :Optional[int] = model.decode(__snake_case , __snake_case ) lowerCamelCase :Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=F"Max diff is {diff}" ) def snake_case ( self : Optional[int] , __snake_case : List[str] , __snake_case : List[Any] , __snake_case : Tuple ): lowerCamelCase :List[str] = 20 lowerCamelCase :Union[str, Any] = model_class_name(__snake_case ) lowerCamelCase :Any = model.encode(inputs_dict['''input_ids'''] ) lowerCamelCase , lowerCamelCase :Optional[int] = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) lowerCamelCase :str = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) lowerCamelCase :Optional[int] = model.init_cache(decoder_input_ids.shape[0] , __snake_case , __snake_case ) lowerCamelCase :Any = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) lowerCamelCase :int = model.decode( decoder_input_ids[:, :-1] , __snake_case , decoder_attention_mask=__snake_case , past_key_values=__snake_case , decoder_position_ids=__snake_case , ) lowerCamelCase :Union[str, Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' ) lowerCamelCase :int = model.decode( decoder_input_ids[:, -1:] , __snake_case , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__snake_case , decoder_position_ids=__snake_case , ) lowerCamelCase :str = model.decode(__snake_case , __snake_case , decoder_attention_mask=__snake_case ) lowerCamelCase :Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=F"Max diff is {diff}" ) @require_flax class _lowerCAmelCase ( unittest.TestCase ): _UpperCAmelCase = 9_9 def snake_case ( self : Optional[Any] ): lowerCamelCase :Dict = np.array( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], # note padding [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ] , dtype=np.intaa , ) lowerCamelCase :Union[str, Any] = input_ids.shape[0] lowerCamelCase :Any = BlenderbotSmallConfig( vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size def snake_case ( self : List[str] ): lowerCamelCase , lowerCamelCase , lowerCamelCase :List[Any] = self._get_config_and_data() lowerCamelCase :str = FlaxBlenderbotSmallForConditionalGeneration(__snake_case ) lowerCamelCase :Any = lm_model(input_ids=__snake_case ) lowerCamelCase :Union[str, Any] = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs['''logits'''].shape , __snake_case ) def snake_case ( self : Optional[Any] ): lowerCamelCase :int = BlenderbotSmallConfig( vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , ) lowerCamelCase :Optional[int] = FlaxBlenderbotSmallForConditionalGeneration(__snake_case ) lowerCamelCase :List[Any] = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa ) lowerCamelCase :List[Any] = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa ) lowerCamelCase :List[str] = lm_model(input_ids=__snake_case , decoder_input_ids=__snake_case ) lowerCamelCase :Tuple = (*summary.shape, config.vocab_size) self.assertEqual(outputs['''logits'''].shape , __snake_case ) def snake_case ( self : Dict ): lowerCamelCase :Optional[Any] = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa ) lowerCamelCase :Union[str, Any] = shift_tokens_right(__snake_case , 1 , 2 ) lowerCamelCase :Dict = np.equal(__snake_case , 1 ).astype(np.floataa ).sum() lowerCamelCase :str = np.equal(__snake_case , 1 ).astype(np.floataa ).sum() self.assertEqual(shifted.shape , input_ids.shape ) self.assertEqual(__snake_case , n_pad_before - 1 ) self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() ) @require_flax class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase , __SCREAMING_SNAKE_CASE ): _UpperCAmelCase = True _UpperCAmelCase = ( ( FlaxBlenderbotSmallModel, FlaxBlenderbotSmallForConditionalGeneration, ) if is_flax_available() else () ) _UpperCAmelCase = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else () def snake_case ( self : int ): lowerCamelCase :Optional[int] = FlaxBlenderbotSmallModelTester(self ) def snake_case ( self : Union[str, Any] ): lowerCamelCase , lowerCamelCase :Any = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(__snake_case , __snake_case , __snake_case ) def snake_case ( self : str ): lowerCamelCase , lowerCamelCase :Any = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(__snake_case , __snake_case , __snake_case ) def snake_case ( self : Union[str, Any] ): lowerCamelCase , lowerCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowerCamelCase :Tuple = self._prepare_for_class(__snake_case , __snake_case ) lowerCamelCase :Optional[int] = model_class(__snake_case ) @jax.jit def encode_jitted(__snake_case : Tuple , __snake_case : Optional[int]=None , **__snake_case : Optional[Any] ): return model.encode(input_ids=__snake_case , attention_mask=__snake_case ) with self.subTest('''JIT Enabled''' ): lowerCamelCase :Optional[int] = encode_jitted(**__snake_case ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): lowerCamelCase :Any = encode_jitted(**__snake_case ).to_tuple() self.assertEqual(len(__snake_case ) , len(__snake_case ) ) for jitted_output, output in zip(__snake_case , __snake_case ): self.assertEqual(jitted_output.shape , output.shape ) def snake_case ( self : int ): lowerCamelCase , lowerCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowerCamelCase :Tuple = model_class(__snake_case ) lowerCamelCase :Dict = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] ) lowerCamelCase :Tuple = { '''decoder_input_ids''': inputs_dict['''decoder_input_ids'''], '''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''], '''encoder_outputs''': encoder_outputs, } @jax.jit def decode_jitted(__snake_case : Any , __snake_case : Any , __snake_case : Union[str, Any] ): return model.decode( decoder_input_ids=__snake_case , decoder_attention_mask=__snake_case , encoder_outputs=__snake_case , ) with self.subTest('''JIT Enabled''' ): lowerCamelCase :Tuple = decode_jitted(**__snake_case ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): lowerCamelCase :Optional[Any] = decode_jitted(**__snake_case ).to_tuple() self.assertEqual(len(__snake_case ) , len(__snake_case ) ) for jitted_output, output in zip(__snake_case , __snake_case ): self.assertEqual(jitted_output.shape , output.shape ) @slow def snake_case ( self : List[str] ): for model_class_name in self.all_model_classes: lowerCamelCase :Optional[Any] = model_class_name.from_pretrained('''facebook/blenderbot_small-90M''' ) # FlaxBlenderbotForSequenceClassification expects eos token in input_ids lowerCamelCase :Optional[int] = np.ones((1, 1) ) * model.config.eos_token_id lowerCamelCase :Union[str, Any] = model(__snake_case ) self.assertIsNotNone(__snake_case )
49
import operator as op def _lowerCamelCase ( a_ : Tuple): lowerCamelCase :int = [] lowerCamelCase :List[str] = lambda a_ , a_: int(x / y) # noqa: E731 integer division operation lowerCamelCase :Optional[int] = { '''^''': op.pow, '''*''': op.mul, '''/''': div, '''+''': op.add, '''-''': op.sub, } # operators & their respective operation # print table header print('''Symbol'''.center(8) , '''Action'''.center(12) , '''Stack''' , sep=''' | ''') print('''-''' * (30 + len(a_))) for x in post_fix: if x.isdigit(): # if x in digit stack.append(a_) # append x to stack # output in tabular format print(x.rjust(8) , ('''push(''' + x + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''') else: lowerCamelCase :Optional[Any] = stack.pop() # pop stack # output in tabular format print(''''''.rjust(8) , ('''pop(''' + b + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''') lowerCamelCase :str = stack.pop() # pop stack # output in tabular format print(''''''.rjust(8) , ('''pop(''' + a + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''') stack.append( str(opr[x](int(a_) , int(a_)))) # evaluate the 2 values popped from stack & push result to stack # output in tabular format print( x.rjust(8) , ('''push(''' + a + x + b + ''')''').ljust(12) , ''','''.join(a_) , sep=''' | ''' , ) return int(stack[0]) if __name__ == "__main__": A__ = input("""\n\nEnter a Postfix Equation (space separated) = """).split(""" """) print("""\n\tResult = """, solve(Postfix))
49
1
'''simple docstring''' from collections import Counter from timeit import timeit def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] = "" , ) -> str: """simple docstring""" return sum(c % 2 for c in Counter(input_str.replace(" " , "" ).lower() ).values() ) < 2 def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] = "" ) -> Optional[Any]: """simple docstring""" if len(snake_case__ ) == 0: return True UpperCAmelCase_ : List[Any] = input_str.replace(" " , "" ).lower() # character_freq_dict: Stores the frequency of every character in the input string UpperCAmelCase_ : Dict = {} for character in lower_case_input_str: UpperCAmelCase_ : Optional[int] = character_freq_dict.get(snake_case__ , 0 ) + 1 UpperCAmelCase_ : Tuple = 0 for character_count in character_freq_dict.values(): if character_count % 2: odd_char += 1 if odd_char > 1: return False return True def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] = "" ) -> List[str]: """simple docstring""" print("\nFor string = " , snake_case__ , ":" ) print( "> can_string_be_rearranged_as_palindrome_counter()" , "\tans =" , can_string_be_rearranged_as_palindrome_counter(snake_case__ ) , "\ttime =" , timeit( "z.can_string_be_rearranged_as_palindrome_counter(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , ) print( "> can_string_be_rearranged_as_palindrome()" , "\tans =" , can_string_be_rearranged_as_palindrome(snake_case__ ) , "\ttime =" , timeit( "z.can_string_be_rearranged_as_palindrome(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , ) if __name__ == "__main__": _lowerCamelCase = input( """Enter string to determine if it can be rearranged as a palindrome or not: """ ).strip() benchmark(check_str) _lowerCamelCase = can_string_be_rearranged_as_palindrome_counter(check_str) print(f"""{check_str} can {'' if status else 'not '}be rearranged as a palindrome""")
71
"""simple docstring""" def A ( snake_case__ , snake_case__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = len(snake_case__ ) SCREAMING_SNAKE_CASE__ = len(snake_case__ ) SCREAMING_SNAKE_CASE__ = ( first_str_length if first_str_length > second_str_length else second_str_length ) SCREAMING_SNAKE_CASE__ = [] for char_count in range(snake_case__ ): if char_count < first_str_length: output_list.append(first_str[char_count] ) if char_count < second_str_length: output_list.append(second_str[char_count] ) return "".join(snake_case__ ) if __name__ == "__main__": print(alternative_string_arrange("AB", "XYZ"), end=" ")
196
0
'''simple docstring''' import unittest import numpy as np import torch from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase): __A : Union[str, Any] =DDIMPipeline __A : List[str] =UNCONDITIONAL_IMAGE_GENERATION_PARAMS __A : Optional[int] =PipelineTesterMixin.required_optional_params - { "num_images_per_prompt", "latents", "callback", "callback_steps", } __A : str =UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS __A : List[Any] =False def UpperCamelCase__ ( self ): torch.manual_seed(0 ) UpperCAmelCase_ : Dict = UNetaDModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=("DownBlock2D", "AttnDownBlock2D") ,up_block_types=("AttnUpBlock2D", "UpBlock2D") ,) UpperCAmelCase_ : List[Any] = DDIMScheduler() UpperCAmelCase_ : Optional[int] = {"unet": unet, "scheduler": scheduler} return components def UpperCamelCase__ ( self ,_snake_case ,_snake_case=0 ): if str(_snake_case ).startswith("mps" ): UpperCAmelCase_ : str = torch.manual_seed(_snake_case ) else: UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=_snake_case ).manual_seed(_snake_case ) UpperCAmelCase_ : List[str] = { "batch_size": 1, "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs def UpperCamelCase__ ( self ): UpperCAmelCase_ : str = "cpu" UpperCAmelCase_ : Any = self.get_dummy_components() UpperCAmelCase_ : Tuple = self.pipeline_class(**_snake_case ) pipe.to(_snake_case ) pipe.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(_snake_case ) UpperCAmelCase_ : int = pipe(**_snake_case ).images UpperCAmelCase_ : Tuple = image[0, -3:, -3:, -1] self.assertEqual(image.shape ,(1, 32, 32, 3) ) UpperCAmelCase_ : str = np.array( [1.0_00E00, 5.7_17E-01, 4.7_17E-01, 1.0_00E00, 0.0_00E00, 1.0_00E00, 3.0_00E-04, 0.0_00E00, 9.0_00E-04] ) UpperCAmelCase_ : str = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(_snake_case ,1E-3 ) def UpperCamelCase__ ( self ): super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) def UpperCamelCase__ ( self ): super().test_save_load_local(expected_max_difference=3E-3 ) def UpperCamelCase__ ( self ): super().test_save_load_optional_components(expected_max_difference=3E-3 ) def UpperCamelCase__ ( self ): super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class _snake_case (unittest.TestCase): def UpperCamelCase__ ( self ): UpperCAmelCase_ : str = "google/ddpm-cifar10-32" UpperCAmelCase_ : Optional[Any] = UNetaDModel.from_pretrained(_snake_case ) UpperCAmelCase_ : Any = DDIMScheduler() UpperCAmelCase_ : Tuple = DDIMPipeline(unet=_snake_case ,scheduler=_snake_case ) ddim.to(_snake_case ) ddim.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase_ : Any = torch.manual_seed(0 ) UpperCAmelCase_ : Optional[Any] = ddim(generator=_snake_case ,eta=0.0 ,output_type="numpy" ).images UpperCAmelCase_ : Any = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) UpperCAmelCase_ : List[str] = np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCamelCase__ ( self ): UpperCAmelCase_ : List[Any] = "google/ddpm-ema-bedroom-256" UpperCAmelCase_ : str = UNetaDModel.from_pretrained(_snake_case ) UpperCAmelCase_ : List[Any] = DDIMScheduler.from_pretrained(_snake_case ) UpperCAmelCase_ : Optional[int] = DDIMPipeline(unet=_snake_case ,scheduler=_snake_case ) ddpm.to(_snake_case ) ddpm.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase_ : int = torch.manual_seed(0 ) UpperCAmelCase_ : int = ddpm(generator=_snake_case ,output_type="numpy" ).images UpperCAmelCase_ : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 2_56, 2_56, 3) UpperCAmelCase_ : List[Any] = np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
703
'''simple docstring''' import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _lowerCamelCase = logging.get_logger(__name__) _lowerCamelCase = { """vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_config_file""": """tokenizer_config.json""", } _lowerCamelCase = { """vocab_file""": { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json""" }, """merges_file""": { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt""" }, """tokenizer_config_file""": { """facebook/blenderbot_small-90M""": ( """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json""" ) }, } _lowerCamelCase = {"""facebook/blenderbot_small-90M""": 512} def a__ ( _SCREAMING_SNAKE_CASE : Dict ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : List[Any] = set() UpperCAmelCase_ : int = word[0] for char in word[1:]: pairs.add((prev_char, char) ) UpperCAmelCase_ : Any = char UpperCAmelCase_ : str = set(_SCREAMING_SNAKE_CASE ) return pairs class _snake_case (__SCREAMING_SNAKE_CASE): __A : List[Any] =VOCAB_FILES_NAMES __A : List[Any] =PRETRAINED_VOCAB_FILES_MAP __A : Optional[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __A : Union[str, Any] =["input_ids", "attention_mask"] def __init__( self ,_snake_case ,_snake_case ,_snake_case="__start__" ,_snake_case="__end__" ,_snake_case="__unk__" ,_snake_case="__null__" ,**_snake_case ,): super().__init__(unk_token=_snake_case ,bos_token=_snake_case ,eos_token=_snake_case ,pad_token=_snake_case ,**_snake_case ) with open(_snake_case ,encoding="utf-8" ) as vocab_handle: UpperCAmelCase_ : Optional[int] = json.load(_snake_case ) UpperCAmelCase_ : Dict = {v: k for k, v in self.encoder.items()} with open(_snake_case ,encoding="utf-8" ) as merges_handle: UpperCAmelCase_ : int = merges_handle.read().split("\n" )[1:-1] UpperCAmelCase_ : str = [tuple(merge.split() ) for merge in merges] UpperCAmelCase_ : str = dict(zip(_snake_case ,range(len(_snake_case ) ) ) ) UpperCAmelCase_ : Any = {} @property def UpperCamelCase__ ( self ): return len(self.encoder ) def UpperCamelCase__ ( self ): return dict(self.encoder ,**self.added_tokens_encoder ) def UpperCamelCase__ ( self ,_snake_case ): if token in self.cache: return self.cache[token] UpperCAmelCase_ : List[Any] = re.sub("([.,!?()])" ,R" \1" ,_snake_case ) UpperCAmelCase_ : Tuple = re.sub("(')" ,R" \1 " ,_snake_case ) UpperCAmelCase_ : Optional[int] = re.sub(R"\s{2,}" ," " ,_snake_case ) if "\n" in token: UpperCAmelCase_ : Optional[int] = token.replace("\n" ," __newln__" ) UpperCAmelCase_ : List[str] = token.split(" " ) UpperCAmelCase_ : List[str] = [] for token in tokens: if not len(_snake_case ): continue UpperCAmelCase_ : Optional[Any] = token.lower() UpperCAmelCase_ : Union[str, Any] = tuple(_snake_case ) UpperCAmelCase_ : List[Any] = tuple(list(word[:-1] ) + [word[-1] + "</w>"] ) UpperCAmelCase_ : List[Any] = get_pairs(_snake_case ) if not pairs: words.append(_snake_case ) continue while True: UpperCAmelCase_ : Tuple = min(_snake_case ,key=lambda _snake_case : self.bpe_ranks.get(_snake_case ,float("inf" ) ) ) if bigram not in self.bpe_ranks: break UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = bigram UpperCAmelCase_ : Tuple = [] UpperCAmelCase_ : Optional[int] = 0 while i < len(_snake_case ): try: UpperCAmelCase_ : List[Any] = word.index(_snake_case ,_snake_case ) new_word.extend(word[i:j] ) UpperCAmelCase_ : Optional[Any] = j except ValueError: new_word.extend(word[i:] ) break if word[i] == first and i < len(_snake_case ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 UpperCAmelCase_ : Tuple = tuple(_snake_case ) UpperCAmelCase_ : Union[str, Any] = new_word if len(_snake_case ) == 1: break else: UpperCAmelCase_ : int = get_pairs(_snake_case ) UpperCAmelCase_ : List[Any] = "@@ ".join(_snake_case ) UpperCAmelCase_ : Tuple = word[:-4] UpperCAmelCase_ : int = word words.append(_snake_case ) return " ".join(_snake_case ) def UpperCamelCase__ ( self ,_snake_case ): UpperCAmelCase_ : List[str] = [] UpperCAmelCase_ : List[Any] = re.findall(R"\S+\n?" ,_snake_case ) for token in words: split_tokens.extend(list(self.bpe(_snake_case ).split(" " ) ) ) return split_tokens def UpperCamelCase__ ( self ,_snake_case ): UpperCAmelCase_ : Union[str, Any] = token.lower() return self.encoder.get(_snake_case ,self.encoder.get(self.unk_token ) ) def UpperCamelCase__ ( self ,_snake_case ): return self.decoder.get(_snake_case ,self.unk_token ) def UpperCamelCase__ ( self ,_snake_case ): UpperCAmelCase_ : int = " ".join(_snake_case ).replace("@@ " ,"" ).strip() return out_string def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ): if not os.path.isdir(_snake_case ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return UpperCAmelCase_ : Dict = os.path.join( _snake_case ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) UpperCAmelCase_ : int = os.path.join( _snake_case ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(_snake_case ,"w" ,encoding="utf-8" ) as f: f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_snake_case ,ensure_ascii=_snake_case ) + "\n" ) UpperCAmelCase_ : Any = 0 with open(_snake_case ,"w" ,encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _snake_case : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' " Please check that the tokenizer is not corrupted!" ) UpperCAmelCase_ : Optional[int] = token_index writer.write(" ".join(_snake_case ) + "\n" ) index += 1 return vocab_file, merge_file
323
0
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer _lowercase = logging.get_logger(__name__) _lowercase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} _lowercase = { '''vocab_file''': { '''squeezebert/squeezebert-uncased''': ( '''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt''' ), '''squeezebert/squeezebert-mnli''': '''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt''', '''squeezebert/squeezebert-mnli-headless''': ( '''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''squeezebert/squeezebert-uncased''': ( '''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json''' ), '''squeezebert/squeezebert-mnli''': ( '''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json''' ), '''squeezebert/squeezebert-mnli-headless''': ( '''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json''' ), }, } _lowercase = { '''squeezebert/squeezebert-uncased''': 5_12, '''squeezebert/squeezebert-mnli''': 5_12, '''squeezebert/squeezebert-mnli-headless''': 5_12, } _lowercase = { '''squeezebert/squeezebert-uncased''': {'''do_lower_case''': True}, '''squeezebert/squeezebert-mnli''': {'''do_lower_case''': True}, '''squeezebert/squeezebert-mnli-headless''': {'''do_lower_case''': True}, } class __A ( A_ ): UpperCamelCase :List[Any] = VOCAB_FILES_NAMES UpperCamelCase :str = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase :List[str] = PRETRAINED_INIT_CONFIGURATION UpperCamelCase :str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase :Dict = SqueezeBertTokenizer def __init__(self , __magic_name__=None , __magic_name__=None , __magic_name__=True , __magic_name__="[UNK]" , __magic_name__="[SEP]" , __magic_name__="[PAD]" , __magic_name__="[CLS]" , __magic_name__="[MASK]" , __magic_name__=True , __magic_name__=None , **__magic_name__ , ): super().__init__( __magic_name__ , tokenizer_file=__magic_name__ , do_lower_case=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , tokenize_chinese_chars=__magic_name__ , strip_accents=__magic_name__ , **__magic_name__ , ) lowerCamelCase__ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" , __magic_name__ ) != do_lower_case or normalizer_state.get("""strip_accents""" , __magic_name__ ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" , __magic_name__ ) != tokenize_chinese_chars ): lowerCamelCase__ : Tuple = getattr(__magic_name__ , normalizer_state.pop("""type""" ) ) lowerCamelCase__ : int = do_lower_case lowerCamelCase__ : Dict = strip_accents lowerCamelCase__ : Tuple = tokenize_chinese_chars lowerCamelCase__ : Tuple = normalizer_class(**__magic_name__ ) lowerCamelCase__ : List[str] = do_lower_case def _snake_case (self , __magic_name__ , __magic_name__=None ): lowerCamelCase__ : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _snake_case (self , __magic_name__ , __magic_name__ = None ): lowerCamelCase__ : List[str] = [self.sep_token_id] lowerCamelCase__ : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _snake_case (self , __magic_name__ , __magic_name__ = None ): lowerCamelCase__ : List[Any] = self._tokenizer.model.save(__magic_name__ , name=__magic_name__ ) return tuple(__magic_name__ )
157
from ...configuration_utils import PretrainedConfig class __A ( A_ ): UpperCamelCase :str = '''bert-generation''' def __init__(self , __magic_name__=50358 , __magic_name__=1024 , __magic_name__=24 , __magic_name__=16 , __magic_name__=4096 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=512 , __magic_name__=0.02 , __magic_name__=1E-12 , __magic_name__=0 , __magic_name__=2 , __magic_name__=1 , __magic_name__="absolute" , __magic_name__=True , **__magic_name__ , ): super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ ) lowerCamelCase__ : Union[str, Any] = vocab_size lowerCamelCase__ : Any = hidden_size lowerCamelCase__ : int = num_hidden_layers lowerCamelCase__ : List[str] = num_attention_heads lowerCamelCase__ : str = hidden_act lowerCamelCase__ : Any = intermediate_size lowerCamelCase__ : Dict = hidden_dropout_prob lowerCamelCase__ : int = attention_probs_dropout_prob lowerCamelCase__ : Union[str, Any] = max_position_embeddings lowerCamelCase__ : Tuple = initializer_range lowerCamelCase__ : Optional[Any] = layer_norm_eps lowerCamelCase__ : Dict = position_embedding_type lowerCamelCase__ : Optional[Any] = use_cache
157
1
'''simple docstring''' from __future__ import annotations import csv import requests from bsa import BeautifulSoup def UpperCamelCase ( lowercase_ : str = "" ) -> dict[str, float]: '''simple docstring''' lowercase =url or '''https://www.imdb.com/chart/top/?ref_=nv_mv_250''' lowercase =BeautifulSoup(requests.get(lowercase_ ).text , '''html.parser''' ) lowercase =soup.find_all('''td''' , attrs='''titleColumn''' ) lowercase =soup.find_all('''td''' , class_='''ratingColumn imdbRating''' ) return { title.a.text: float(rating.strong.text ) for title, rating in zip(lowercase_ , lowercase_ ) } def UpperCamelCase ( lowercase_ : str = "IMDb_Top_250_Movies.csv" ) -> None: '''simple docstring''' lowercase =get_imdb_top_aaa_movies() with open(lowercase_ , '''w''' , newline='''''' ) as out_file: lowercase =csv.writer(lowercase_ ) writer.writerow(['''Movie title''', '''IMDb rating'''] ) for title, rating in movies.items(): writer.writerow([title, rating] ) if __name__ == "__main__": write_movies()
710
'''simple docstring''' from __future__ import annotations import math class __magic_name__ : def __init__( self , snake_case_ ): lowercase =size # approximate the overall size of segment tree with given value lowercase =[0 for i in range(0 , 4 * size )] # create array to store lazy update lowercase =[0 for i in range(0 , 4 * size )] lowercase =[0 for i in range(0 , 4 * size )] # flag for lazy update def _A( self , snake_case_ ): return idx * 2 def _A( self , snake_case_ ): return idx * 2 + 1 def _A( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): if left_element == right_element: lowercase =a[left_element - 1] else: lowercase =(left_element + right_element) // 2 self.build(self.left(snake_case_ ) , snake_case_ , snake_case_ , snake_case_ ) self.build(self.right(snake_case_ ) , mid + 1 , snake_case_ , snake_case_ ) lowercase =max( self.segment_tree[self.left(snake_case_ )] , self.segment_tree[self.right(snake_case_ )] ) def _A( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): if self.flag[idx] is True: lowercase =self.lazy[idx] lowercase =False if left_element != right_element: lowercase =self.lazy[idx] lowercase =self.lazy[idx] lowercase =True lowercase =True if right_element < a or left_element > b: return True if left_element >= a and right_element <= b: lowercase =val if left_element != right_element: lowercase =val lowercase =val lowercase =True lowercase =True return True lowercase =(left_element + right_element) // 2 self.update(self.left(snake_case_ ) , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) self.update(self.right(snake_case_ ) , mid + 1 , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) lowercase =max( self.segment_tree[self.left(snake_case_ )] , self.segment_tree[self.right(snake_case_ )] ) return True def _A( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): if self.flag[idx] is True: lowercase =self.lazy[idx] lowercase =False if left_element != right_element: lowercase =self.lazy[idx] lowercase =self.lazy[idx] lowercase =True lowercase =True if right_element < a or left_element > b: return -math.inf if left_element >= a and right_element <= b: return self.segment_tree[idx] lowercase =(left_element + right_element) // 2 lowercase =self.query(self.left(snake_case_ ) , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) lowercase =self.query(self.right(snake_case_ ) , mid + 1 , snake_case_ , snake_case_ , snake_case_ ) return max(snake_case_ , snake_case_ ) def __str__( self ): return str([self.query(1 , 1 , self.size , snake_case_ , snake_case_ ) for i in range(1 , self.size + 1 )] ) if __name__ == "__main__": _UpperCAmelCase : List[str] = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8] _UpperCAmelCase : int = 15 _UpperCAmelCase : int = SegmentTree(size) segt.build(1, 1, size, A) print(segt.query(1, 1, size, 4, 6)) print(segt.query(1, 1, size, 7, 11)) print(segt.query(1, 1, size, 7, 12)) segt.update(1, 1, size, 1, 3, 1_11) print(segt.query(1, 1, size, 1, 15)) segt.update(1, 1, size, 7, 8, 2_35) print(segt)
145
0
from __future__ import annotations UpperCAmelCase__ = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0] UpperCAmelCase__ = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1] def _a ( a :list[float] ) -> list[float]: a = [] a = len(a ) for i in range(a ): a = -1 for j in range(i + 1 , a ): if arr[i] < arr[j]: a = arr[j] break result.append(a ) return result def _a ( a :list[float] ) -> list[float]: a = [] for i, outer in enumerate(a ): a = -1 for inner in arr[i + 1 :]: if outer < inner: a = inner break result.append(a ) return result def _a ( a :list[float] ) -> list[float]: a = len(a ) a = [] a = [-1] * arr_size for index in reversed(range(a ) ): if stack: while stack[-1] <= arr[index]: stack.pop() if not stack: break if stack: a = stack[-1] stack.append(arr[index] ) return result if __name__ == "__main__": from doctest import testmod from timeit import timeit testmod() print(next_greatest_element_slow(arr)) print(next_greatest_element_fast(arr)) print(next_greatest_element(arr)) UpperCAmelCase__ = ( "from __main__ import arr, next_greatest_element_slow, " "next_greatest_element_fast, next_greatest_element" ) print( "next_greatest_element_slow():", timeit("next_greatest_element_slow(arr)", setup=setup), ) print( "next_greatest_element_fast():", timeit("next_greatest_element_fast(arr)", setup=setup), ) print( " next_greatest_element():", timeit("next_greatest_element(arr)", setup=setup), )
117
from typing import Any import numpy as np def _a ( a :np.ndarray ) -> bool: return np.array_equal(a , matrix.conjugate().T ) def _a ( a :np.ndarray , a :np.ndarray ) -> Any: a = v.conjugate().T a = v_star.dot(a ) assert isinstance(a , np.ndarray ) return (v_star_dot.dot(a )) / (v_star.dot(a )) def _a ( ) -> None: a = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] ) a = np.array([[1], [2], [3]] ) assert is_hermitian(a ), F"""{a} is not hermitian.""" print(rayleigh_quotient(a , a ) ) a = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] ) assert is_hermitian(a ), F"""{a} is not hermitian.""" assert rayleigh_quotient(a , a ) == float(3 ) if __name__ == "__main__": import doctest doctest.testmod() tests()
117
1
from __future__ import annotations def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> float: lowerCamelCase__ : int = sorted(numsa + numsa ) lowerCamelCase__ , lowerCamelCase__ : Any = divmod(len(_UpperCAmelCase ) , 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() _UpperCAmelCase : int = [float(x) for x in input("""Enter the elements of first array: """).split()] _UpperCAmelCase : Tuple = [float(x) for x in input("""Enter the elements of second array: """).split()] print(F"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
188
import contextlib from multiprocessing import Pool, RLock from tqdm.auto import tqdm from ..utils import experimental, logging _UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__) class lowerCAmelCase : UpperCAmelCase__ = None @experimental def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any: if ParallelBackendConfig.backend_name is None: return _map_with_multiprocessing_pool( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) return _map_with_joblib(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any: lowerCamelCase__ : int = num_proc if num_proc <= len(_UpperCAmelCase ) else len(_UpperCAmelCase ) lowerCamelCase__ : Optional[int] = [] # We organize the splits ourselve (contiguous splits) for index in range(_UpperCAmelCase ): lowerCamelCase__ : str = len(_UpperCAmelCase ) // num_proc lowerCamelCase__ : List[str] = len(_UpperCAmelCase ) % num_proc lowerCamelCase__ : Tuple = div * index + min(_UpperCAmelCase , _UpperCAmelCase ) lowerCamelCase__ : int = start + div + (1 if index < mod else 0) split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) ) if len(_UpperCAmelCase ) != sum(len(i[1] ) for i in split_kwds ): raise ValueError( F"""Error dividing inputs iterable among processes. """ F"""Total number of objects {len(_UpperCAmelCase )}, """ F"""length: {sum(len(i[1] ) for i in split_kwds )}""" ) logger.info( F"""Spawning {num_proc} processes for {len(_UpperCAmelCase )} objects in slices of {[len(i[1] ) for i in split_kwds]}""" ) lowerCamelCase__ , lowerCamelCase__ : List[str] = None, None if not disable_tqdm: lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = (RLock(),), tqdm.set_lock with Pool(_UpperCAmelCase , initargs=_UpperCAmelCase , initializer=_UpperCAmelCase ) as pool: lowerCamelCase__ : Any = pool.map(_UpperCAmelCase , _UpperCAmelCase ) logger.info(F"""Finished {num_proc} processes""" ) lowerCamelCase__ : Union[str, Any] = [obj for proc_res in mapped for obj in proc_res] logger.info(F"""Unpacked {len(_UpperCAmelCase )} objects""" ) return mapped def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> int: # progress bar is not yet supported for _map_with_joblib, because tqdm couldn't accurately be applied to joblib, # and it requires monkey-patching joblib internal classes which is subject to change import joblib with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=_UpperCAmelCase ): return joblib.Parallel()( joblib.delayed(_UpperCAmelCase )((function, obj, types, None, True, None) ) for obj in iterable ) @experimental @contextlib.contextmanager def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Tuple: lowerCamelCase__ : List[str] = backend_name if backend_name == "spark": from joblibspark import register_spark register_spark() # TODO: call create_cache_and_write_probe if "download" in steps # TODO: raise NotImplementedError when Dataset.map etc is called try: yield finally: lowerCamelCase__ : str = None
188
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) SCREAMING_SNAKE_CASE__ : Optional[int] = {"configuration_encoder_decoder": ["EncoderDecoderConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["EncoderDecoderModel"] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : str = ["TFEncoderDecoderModel"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : List[str] = ["FlaxEncoderDecoderModel"] if TYPE_CHECKING: from .configuration_encoder_decoder import EncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encoder_decoder import EncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_encoder_decoder import TFEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel else: import sys SCREAMING_SNAKE_CASE__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
85
from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import RegNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class snake_case : def __init__( self : Tuple , a_ : int , a_ : Optional[int]=3 , a_ : Tuple=32 , a_ : Any=3 , a_ : Tuple=10 , a_ : Optional[int]=[10, 20, 30, 40] , a_ : List[Any]=[1, 1, 2, 1] , a_ : int=True , a_ : Optional[Any]=True , a_ : Any="relu" , a_ : int=3 , a_ : List[Any]=None , )-> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = parent SCREAMING_SNAKE_CASE__ : Optional[int] = batch_size SCREAMING_SNAKE_CASE__ : int = image_size SCREAMING_SNAKE_CASE__ : Tuple = num_channels SCREAMING_SNAKE_CASE__ : Tuple = embeddings_size SCREAMING_SNAKE_CASE__ : str = hidden_sizes SCREAMING_SNAKE_CASE__ : Optional[int] = depths SCREAMING_SNAKE_CASE__ : Optional[Any] = is_training SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_labels SCREAMING_SNAKE_CASE__ : Dict = hidden_act SCREAMING_SNAKE_CASE__ : Tuple = num_labels SCREAMING_SNAKE_CASE__ : List[Any] = scope SCREAMING_SNAKE_CASE__ : str = len(a_ ) def __lowercase( self : Union[str, Any] )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE__ : Any = None if self.use_labels: SCREAMING_SNAKE_CASE__ : Any = ids_tensor([self.batch_size] , self.num_labels ) SCREAMING_SNAKE_CASE__ : Tuple = self.get_config() return config, pixel_values, labels def __lowercase( self : str )-> str: """simple docstring""" return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def __lowercase( self : List[str] , a_ : int , a_ : Any , a_ : Optional[Any] )-> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = TFRegNetModel(config=a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a_ , training=a_ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def __lowercase( self : Union[str, Any] , a_ : Dict , a_ : int , a_ : Optional[Any] )-> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = self.num_labels SCREAMING_SNAKE_CASE__ : Tuple = TFRegNetForImageClassification(a_ ) SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ , labels=a_ , training=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowercase( self : List[str] )-> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = config_and_inputs SCREAMING_SNAKE_CASE__ : Optional[Any] = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class snake_case ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): lowercase_ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else () lowercase_ = ( {'feature-extraction': TFRegNetModel, 'image-classification': TFRegNetForImageClassification} if is_tf_available() else {} ) lowercase_ = False lowercase_ = False lowercase_ = False lowercase_ = False lowercase_ = False def __lowercase( self : int )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = TFRegNetModelTester(self ) SCREAMING_SNAKE_CASE__ : int = ConfigTester(self , config_class=a_ , has_text_modality=a_ ) def __lowercase( self : List[Any] )-> Tuple: """simple docstring""" return @unittest.skip(reason='RegNet does not use inputs_embeds' ) def __lowercase( self : str )-> Optional[int]: """simple docstring""" pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , ) @slow def __lowercase( self : Any )-> List[Any]: """simple docstring""" super().test_keras_fit() @unittest.skip(reason='RegNet does not support input and output embeddings' ) def __lowercase( self : Any )-> List[Any]: """simple docstring""" pass def __lowercase( self : Tuple )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE__ : List[Any] = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE__ : Optional[int] = ['pixel_values'] self.assertListEqual(arg_names[:1] , a_ ) def __lowercase( self : str )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a_ ) def __lowercase( self : List[Any] )-> Optional[Any]: """simple docstring""" def check_hidden_states_output(a_ : int , a_ : Union[str, Any] , a_ : Tuple ): SCREAMING_SNAKE_CASE__ : Any = model_class(a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = model(**self._prepare_for_class(a_ , a_ ) , training=a_ ) SCREAMING_SNAKE_CASE__ : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.num_stages self.assertEqual(len(a_ ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE__ : Dict = ['basic', 'bottleneck'] for model_class in self.all_model_classes: for layer_type in layers_type: SCREAMING_SNAKE_CASE__ : List[Any] = layer_type SCREAMING_SNAKE_CASE__ : Union[str, Any] = True check_hidden_states_output(a_ , a_ , a_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE__ : int = True check_hidden_states_output(a_ , a_ , a_ ) def __lowercase( self : Optional[int] )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(a_ : str , a_ : Tuple , a_ : Optional[int] , a_ : Union[str, Any]={} ): SCREAMING_SNAKE_CASE__ : int = model(a_ , return_dict=a_ , **a_ ) SCREAMING_SNAKE_CASE__ : str = model(a_ , return_dict=a_ , **a_ ).to_tuple() def recursive_check(a_ : List[Any] , a_ : int ): if isinstance(a_ , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(a_ , a_ ): recursive_check(a_ , a_ ) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(a_ , a_ ) ) , msg=( 'Tuple and dict output are not equal. Difference:' F''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}''' ) , ) recursive_check(a_ , a_ ) for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(a_ ) SCREAMING_SNAKE_CASE__ : int = self._prepare_for_class(a_ , a_ ) SCREAMING_SNAKE_CASE__ : Dict = self._prepare_for_class(a_ , a_ ) check_equivalence(a_ , a_ , a_ ) SCREAMING_SNAKE_CASE__ : List[str] = self._prepare_for_class(a_ , a_ , return_labels=a_ ) SCREAMING_SNAKE_CASE__ : Optional[int] = self._prepare_for_class(a_ , a_ , return_labels=a_ ) check_equivalence(a_ , a_ , a_ ) SCREAMING_SNAKE_CASE__ : str = self._prepare_for_class(a_ , a_ ) SCREAMING_SNAKE_CASE__ : List[str] = self._prepare_for_class(a_ , a_ ) check_equivalence(a_ , a_ , a_ , {'output_hidden_states': True} ) SCREAMING_SNAKE_CASE__ : int = self._prepare_for_class(a_ , a_ , return_labels=a_ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._prepare_for_class(a_ , a_ , return_labels=a_ ) check_equivalence(a_ , a_ , a_ , {'output_hidden_states': True} ) def __lowercase( self : str )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a_ ) @slow def __lowercase( self : Any )-> List[str]: """simple docstring""" for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE__ : Optional[int] = TFRegNetModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class snake_case ( unittest.TestCase ): @cached_property def __lowercase( self : List[Any] )-> int: """simple docstring""" return ( AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def __lowercase( self : Any )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) SCREAMING_SNAKE_CASE__ : List[Any] = self.default_image_processor SCREAMING_SNAKE_CASE__ : Any = prepare_img() SCREAMING_SNAKE_CASE__ : str = image_processor(images=a_ , return_tensors='tf' ) # forward pass SCREAMING_SNAKE_CASE__ : Tuple = model(**a_ , training=a_ ) # verify the logits SCREAMING_SNAKE_CASE__ : Optional[int] = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , a_ ) SCREAMING_SNAKE_CASE__ : Any = tf.constant([-0.4180, -1.5051, -3.4836] ) tf.debugging.assert_near(outputs.logits[0, :3] , a_ , atol=1e-4 )
85
1
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SegformerConfig, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _UpperCAmelCase : List[str] = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase=False ) -> Tuple: lowerCamelCase__ : Union[str, Any] = OrderedDict() for key, value in state_dict.items(): if encoder_only and not key.startswith('head' ): lowerCamelCase__ : Tuple = 'segformer.encoder.' + key if key.startswith('backbone' ): lowerCamelCase__ : Dict = key.replace('backbone' , 'segformer.encoder' ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 lowerCamelCase__ : Any = key[key.find('patch_embed' ) + len('patch_embed' )] lowerCamelCase__ : List[str] = key.replace(F"""patch_embed{idx}""" , F"""patch_embeddings.{int(_UpperCAmelCase )-1}""" ) if "norm" in key: lowerCamelCase__ : Optional[int] = key.replace('norm' , 'layer_norm' ) if "segformer.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 lowerCamelCase__ : Tuple = key[key.find('segformer.encoder.layer_norm' ) + len('segformer.encoder.layer_norm' )] lowerCamelCase__ : Dict = key.replace(F"""layer_norm{idx}""" , F"""layer_norm.{int(_UpperCAmelCase )-1}""" ) if "layer_norm1" in key: lowerCamelCase__ : List[Any] = key.replace('layer_norm1' , 'layer_norm_1' ) if "layer_norm2" in key: lowerCamelCase__ : List[str] = key.replace('layer_norm2' , 'layer_norm_2' ) if "block" in key: # replace for example block1 by block.0 lowerCamelCase__ : Optional[int] = key[key.find('block' ) + len('block' )] lowerCamelCase__ : Any = key.replace(F"""block{idx}""" , F"""block.{int(_UpperCAmelCase )-1}""" ) if "attn.q" in key: lowerCamelCase__ : Tuple = key.replace('attn.q' , 'attention.self.query' ) if "attn.proj" in key: lowerCamelCase__ : Tuple = key.replace('attn.proj' , 'attention.output.dense' ) if "attn" in key: lowerCamelCase__ : Tuple = key.replace('attn' , 'attention.self' ) if "fc1" in key: lowerCamelCase__ : List[Any] = key.replace('fc1' , 'dense1' ) if "fc2" in key: lowerCamelCase__ : Union[str, Any] = key.replace('fc2' , 'dense2' ) if "linear_pred" in key: lowerCamelCase__ : Optional[Any] = key.replace('linear_pred' , 'classifier' ) if "linear_fuse" in key: lowerCamelCase__ : Union[str, Any] = key.replace('linear_fuse.conv' , 'linear_fuse' ) lowerCamelCase__ : List[Any] = key.replace('linear_fuse.bn' , 'batch_norm' ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 lowerCamelCase__ : Any = key[key.find('linear_c' ) + len('linear_c' )] lowerCamelCase__ : Optional[int] = key.replace(F"""linear_c{idx}""" , F"""linear_c.{int(_UpperCAmelCase )-1}""" ) if key.startswith('head' ): lowerCamelCase__ : int = key.replace('head' , 'classifier' ) lowerCamelCase__ : Any = value return new_state_dict def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]: # for each of the encoder blocks: for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) lowerCamelCase__ : int = state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.weight""" ) lowerCamelCase__ : List[str] = state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.bias""" ) # next, add keys and values (in that order) to the state dict lowerCamelCase__ : Any = kv_weight[ : config.hidden_sizes[i], : ] lowerCamelCase__ : List[str] = kv_bias[: config.hidden_sizes[i]] lowerCamelCase__ : Optional[int] = kv_weight[ config.hidden_sizes[i] :, : ] lowerCamelCase__ : List[Any] = kv_bias[ config.hidden_sizes[i] : ] def SCREAMING_SNAKE_CASE ( ) -> List[str]: lowerCamelCase__ : Optional[int] = 'http://images.cocodataset.org/val2017/000000039769.jpg' lowerCamelCase__ : Union[str, Any] = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ) return image @torch.no_grad() def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]: lowerCamelCase__ : List[Any] = SegformerConfig() lowerCamelCase__ : List[Any] = False # set attributes based on model_name lowerCamelCase__ : Optional[int] = 'huggingface/label-files' if "segformer" in model_name: lowerCamelCase__ : List[Any] = model_name[len('segformer.' ) : len('segformer.' ) + 2] if "ade" in model_name: lowerCamelCase__ : Tuple = 150 lowerCamelCase__ : Any = 'ade20k-id2label.json' lowerCamelCase__ : List[Any] = (1, 150, 128, 128) elif "city" in model_name: lowerCamelCase__ : List[str] = 19 lowerCamelCase__ : Optional[Any] = 'cityscapes-id2label.json' lowerCamelCase__ : str = (1, 19, 128, 128) else: raise ValueError(F"""Model {model_name} not supported""" ) elif "mit" in model_name: lowerCamelCase__ : Union[str, Any] = True lowerCamelCase__ : str = model_name[4:6] lowerCamelCase__ : str = 1000 lowerCamelCase__ : str = 'imagenet-1k-id2label.json' lowerCamelCase__ : Tuple = (1, 1000) else: raise ValueError(F"""Model {model_name} not supported""" ) # set config attributes lowerCamelCase__ : int = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type='dataset' ) , 'r' ) ) lowerCamelCase__ : int = {int(_UpperCAmelCase ): v for k, v in idalabel.items()} lowerCamelCase__ : List[str] = idalabel lowerCamelCase__ : List[str] = {v: k for k, v in idalabel.items()} if size == "b0": pass elif size == "b1": lowerCamelCase__ : int = [64, 128, 320, 512] lowerCamelCase__ : Optional[Any] = 256 elif size == "b2": lowerCamelCase__ : Any = [64, 128, 320, 512] lowerCamelCase__ : List[Any] = 768 lowerCamelCase__ : Optional[Any] = [3, 4, 6, 3] elif size == "b3": lowerCamelCase__ : Optional[int] = [64, 128, 320, 512] lowerCamelCase__ : Optional[Any] = 768 lowerCamelCase__ : Optional[int] = [3, 4, 18, 3] elif size == "b4": lowerCamelCase__ : Any = [64, 128, 320, 512] lowerCamelCase__ : List[Any] = 768 lowerCamelCase__ : List[str] = [3, 8, 27, 3] elif size == "b5": lowerCamelCase__ : Union[str, Any] = [64, 128, 320, 512] lowerCamelCase__ : Dict = 768 lowerCamelCase__ : str = [3, 6, 40, 3] else: raise ValueError(F"""Size {size} not supported""" ) # load image processor (only resize + normalize) lowerCamelCase__ : Tuple = SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=_UpperCAmelCase , align=_UpperCAmelCase , do_random_crop=_UpperCAmelCase ) # prepare image lowerCamelCase__ : int = prepare_img() lowerCamelCase__ : Optional[Any] = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).pixel_values logger.info(F"""Converting model {model_name}...""" ) # load original state dict if encoder_only: lowerCamelCase__ : Dict = torch.load(_UpperCAmelCase , map_location=torch.device('cpu' ) ) else: lowerCamelCase__ : List[str] = torch.load(_UpperCAmelCase , map_location=torch.device('cpu' ) )['state_dict'] # rename keys lowerCamelCase__ : Optional[int] = rename_keys(_UpperCAmelCase , encoder_only=_UpperCAmelCase ) if not encoder_only: del state_dict["decode_head.conv_seg.weight"] del state_dict["decode_head.conv_seg.bias"] # key and value matrices need special treatment read_in_k_v(_UpperCAmelCase , _UpperCAmelCase ) # create HuggingFace model and load state dict if encoder_only: lowerCamelCase__ : Optional[int] = False lowerCamelCase__ : List[Any] = SegformerForImageClassification(_UpperCAmelCase ) else: lowerCamelCase__ : Tuple = SegformerForSemanticSegmentation(_UpperCAmelCase ) model.load_state_dict(_UpperCAmelCase ) model.eval() # forward pass lowerCamelCase__ : List[str] = model(_UpperCAmelCase ) lowerCamelCase__ : Tuple = outputs.logits # set expected_slice based on model name # ADE20k checkpoints if model_name == "segformer.b0.512x512.ade.160k": lowerCamelCase__ : List[Any] = torch.tensor( [ [[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]], [[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]], [[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]], ] ) elif model_name == "segformer.b1.512x512.ade.160k": lowerCamelCase__ : List[str] = torch.tensor( [ [[-7.5_820, -8.7_231, -8.3_215], [-8.0_600, -10.3_529, -10.0_304], [-7.5_208, -9.4_103, -9.6_239]], [[-12.6_918, -13.8_994, -13.7_137], [-13.3_196, -15.7_523, -15.4_789], [-12.9_343, -14.8_757, -14.9_689]], [[-11.1_911, -11.9_421, -11.3_243], [-11.3_342, -13.6_839, -13.3_581], [-10.3_909, -12.1_832, -12.4_858]], ] ) elif model_name == "segformer.b2.512x512.ade.160k": lowerCamelCase__ : Optional[Any] = torch.tensor( [ [[-11.8_173, -14.3_850, -16.3_128], [-14.5_648, -16.5_804, -18.6_568], [-14.7_223, -15.7_387, -18.4_218]], [[-15.7_290, -17.9_171, -19.4_423], [-18.3_105, -19.9_448, -21.4_661], [-17.9_296, -18.6_497, -20.7_910]], [[-15.0_783, -17.0_336, -18.2_789], [-16.8_771, -18.6_870, -20.1_612], [-16.2_454, -17.1_426, -19.5_055]], ] ) elif model_name == "segformer.b3.512x512.ade.160k": lowerCamelCase__ : Dict = torch.tensor( [ [[-9.0_878, -10.2_081, -10.1_891], [-9.3_144, -10.7_941, -10.9_843], [-9.2_294, -10.3_855, -10.5_704]], [[-12.2_316, -13.9_068, -13.6_102], [-12.9_161, -14.3_702, -14.3_235], [-12.5_233, -13.7_174, -13.7_932]], [[-14.6_275, -15.2_490, -14.9_727], [-14.3_400, -15.9_687, -16.2_827], [-14.1_484, -15.4_033, -15.8_937]], ] ) elif model_name == "segformer.b4.512x512.ade.160k": lowerCamelCase__ : Optional[int] = torch.tensor( [ [[-12.3_144, -13.2_447, -14.0_802], [-13.3_614, -14.5_816, -15.6_117], [-13.3_340, -14.4_433, -16.2_219]], [[-19.2_781, -20.4_128, -20.7_506], [-20.6_153, -21.6_566, -22.0_998], [-19.9_800, -21.0_430, -22.1_494]], [[-18.8_739, -19.7_804, -21.1_834], [-20.1_233, -21.6_765, -23.2_944], [-20.0_315, -21.2_641, -23.6_944]], ] ) elif model_name == "segformer.b5.640x640.ade.160k": lowerCamelCase__ : int = torch.tensor( [ [[-9.5_524, -12.0_835, -11.7_348], [-10.5_229, -13.6_446, -14.5_662], [-9.5_842, -12.8_851, -13.9_414]], [[-15.3_432, -17.5_323, -17.0_818], [-16.3_330, -18.9_255, -19.2_101], [-15.1_340, -17.7_848, -18.3_971]], [[-12.6_072, -14.9_486, -14.6_631], [-13.7_629, -17.0_907, -17.7_745], [-12.7_899, -16.1_695, -17.1_671]], ] ) # Cityscapes checkpoints elif model_name == "segformer.b0.1024x1024.city.160k": lowerCamelCase__ : Optional[Any] = torch.tensor( [ [[-11.9_295, -13.4_057, -14.8_106], [-13.3_431, -14.8_179, -15.3_781], [-14.2_836, -15.5_942, -16.1_588]], [[-11.4_906, -12.8_067, -13.6_564], [-13.1_189, -14.0_500, -14.1_543], [-13.8_748, -14.5_136, -14.8_789]], [[0.5_374, 0.1_067, -0.4_742], [0.1_141, -0.2_255, -0.7_099], [-0.3_000, -0.5_924, -1.3_105]], ] ) elif model_name == "segformer.b0.512x1024.city.160k": lowerCamelCase__ : Optional[int] = torch.tensor( [ [[-7.8_217, -9.8_767, -10.1_717], [-9.4_438, -10.9_058, -11.4_047], [-9.7_939, -12.3_495, -12.1_079]], [[-7.1_514, -9.5_336, -10.0_860], [-9.7_776, -11.6_822, -11.8_439], [-10.1_411, -12.7_655, -12.8_972]], [[0.3_021, 0.0_805, -0.2_310], [-0.0_328, -0.1_605, -0.2_714], [-0.1_408, -0.5_477, -0.6_976]], ] ) elif model_name == "segformer.b0.640x1280.city.160k": lowerCamelCase__ : Dict = torch.tensor( [ [ [-1.1_372e01, -1.2_787e01, -1.3_477e01], [-1.2_536e01, -1.4_194e01, -1.4_409e01], [-1.3_217e01, -1.4_888e01, -1.5_327e01], ], [ [-1.4_791e01, -1.7_122e01, -1.8_277e01], [-1.7_163e01, -1.9_192e01, -1.9_533e01], [-1.7_897e01, -1.9_991e01, -2.0_315e01], ], [ [7.6_723e-01, 4.1_921e-01, -7.7_878e-02], [4.7_772e-01, 9.5_557e-03, -2.8_082e-01], [3.6_032e-01, -2.4_826e-01, -5.1_168e-01], ], ] ) elif model_name == "segformer.b0.768x768.city.160k": lowerCamelCase__ : List[Any] = torch.tensor( [ [[-9.4_959, -11.3_087, -11.7_479], [-11.0_025, -12.6_540, -12.3_319], [-11.4_064, -13.0_487, -12.9_905]], [[-9.8_905, -11.3_084, -12.0_854], [-11.1_726, -12.7_698, -12.9_583], [-11.5_985, -13.3_278, -14.1_774]], [[0.2_213, 0.0_192, -0.2_466], [-0.1_731, -0.4_213, -0.4_874], [-0.3_126, -0.6_541, -1.1_389]], ] ) elif model_name == "segformer.b1.1024x1024.city.160k": lowerCamelCase__ : Union[str, Any] = torch.tensor( [ [[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]], [[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]], [[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]], ] ) elif model_name == "segformer.b2.1024x1024.city.160k": lowerCamelCase__ : Dict = torch.tensor( [ [[-16.0_976, -16.4_856, -17.3_962], [-16.6_234, -19.0_342, -19.7_685], [-16.0_900, -18.0_661, -19.1_180]], [[-18.4_750, -18.8_488, -19.5_074], [-19.4_030, -22.1_570, -22.5_977], [-19.1_191, -20.8_486, -22.3_783]], [[-4.5_178, -5.5_037, -6.5_109], [-5.0_884, -7.2_174, -8.0_334], [-4.4_156, -5.8_117, -7.2_970]], ] ) elif model_name == "segformer.b3.1024x1024.city.160k": lowerCamelCase__ : Optional[Any] = torch.tensor( [ [[-14.2_081, -14.4_732, -14.1_977], [-14.5_867, -16.4_423, -16.6_356], [-13.4_441, -14.9_685, -16.8_696]], [[-14.4_576, -14.7_073, -15.0_451], [-15.0_816, -17.6_237, -17.9_873], [-14.4_213, -16.0_199, -18.5_992]], [[-4.7_349, -4.9_588, -5.0_966], [-4.3_210, -6.9_325, -7.2_591], [-3.4_312, -4.7_484, -7.1_917]], ] ) elif model_name == "segformer.b4.1024x1024.city.160k": lowerCamelCase__ : Optional[Any] = torch.tensor( [ [[-11.7_737, -11.9_526, -11.3_273], [-13.6_692, -14.4_574, -13.8_878], [-13.8_937, -14.6_924, -15.9_345]], [[-14.6_706, -14.5_330, -14.1_306], [-16.1_502, -16.8_180, -16.4_269], [-16.8_338, -17.8_939, -20.1_746]], [[1.0_491, 0.8_289, 1.0_310], [1.1_044, 0.5_219, 0.8_055], [1.0_899, 0.6_926, 0.5_590]], ] ) elif model_name == "segformer.b5.1024x1024.city.160k": lowerCamelCase__ : Optional[Any] = torch.tensor( [ [[-12.5_641, -13.4_777, -13.0_684], [-13.9_587, -15.8_983, -16.6_557], [-13.3_109, -15.7_350, -16.3_141]], [[-14.7_074, -15.4_352, -14.5_944], [-16.6_353, -18.1_663, -18.6_120], [-15.1_702, -18.0_329, -18.1_547]], [[-1.7_990, -2.0_951, -1.7_784], [-2.6_397, -3.8_245, -3.9_686], [-1.5_264, -2.8_126, -2.9_316]], ] ) else: lowerCamelCase__ : str = logits.argmax(-1 ).item() print('Predicted class:' , model.config.idalabel[predicted_class_idx] ) # verify logits if not encoder_only: assert logits.shape == expected_shape assert torch.allclose(logits[0, :3, :3, :3] , _UpperCAmelCase , atol=1e-2 ) # finally, save model and image processor logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase ) model.save_pretrained(_UpperCAmelCase ) image_processor.save_pretrained(_UpperCAmelCase ) if __name__ == "__main__": _UpperCAmelCase : int = argparse.ArgumentParser() parser.add_argument( """--model_name""", default="""segformer.b0.512x512.ade.160k""", type=str, help="""Name of the model you'd like to convert.""", ) parser.add_argument( """--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file).""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) _UpperCAmelCase : int = parser.parse_args() convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
188
import inspect import unittest from transformers import RegNetConfig, is_flax_available from transformers.testing_utils import require_flax, slow from transformers.utils import cached_property, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCAmelCase ( unittest.TestCase ): def __init__( self : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any]=3 , UpperCAmelCase : str=32 , UpperCAmelCase : Union[str, Any]=3 , UpperCAmelCase : Dict=10 , UpperCAmelCase : List[str]=[10, 20, 30, 40] , UpperCAmelCase : Any=[1, 1, 2, 1] , UpperCAmelCase : List[str]=True , UpperCAmelCase : List[str]=True , UpperCAmelCase : Dict="relu" , UpperCAmelCase : Tuple=3 , UpperCAmelCase : Dict=None , ) -> Optional[Any]: lowerCamelCase__ : Union[str, Any] = parent lowerCamelCase__ : Optional[Any] = batch_size lowerCamelCase__ : Dict = image_size lowerCamelCase__ : int = num_channels lowerCamelCase__ : int = embeddings_size lowerCamelCase__ : str = hidden_sizes lowerCamelCase__ : Any = depths lowerCamelCase__ : str = is_training lowerCamelCase__ : List[Any] = use_labels lowerCamelCase__ : Union[str, Any] = hidden_act lowerCamelCase__ : Dict = num_labels lowerCamelCase__ : Dict = scope lowerCamelCase__ : List[str] = len(UpperCAmelCase ) def A_ ( self : Optional[int] ) -> Optional[int]: lowerCamelCase__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase__ : str = self.get_config() return config, pixel_values def A_ ( self : Optional[int] ) -> Dict: return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def A_ ( self : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any ) -> Optional[Any]: lowerCamelCase__ : Optional[Any] = FlaxRegNetModel(config=UpperCAmelCase ) lowerCamelCase__ : Optional[Any] = model(UpperCAmelCase ) # Output shape (b, c, h, w) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def A_ ( self : str , UpperCAmelCase : int , UpperCAmelCase : Tuple ) -> Tuple: lowerCamelCase__ : List[str] = self.num_labels lowerCamelCase__ : str = FlaxRegNetForImageClassification(config=UpperCAmelCase ) lowerCamelCase__ : Union[str, Any] = model(UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A_ ( self : Dict ) -> str: lowerCamelCase__ : Optional[int] = self.prepare_config_and_inputs() lowerCamelCase__ , lowerCamelCase__ : Any = config_and_inputs lowerCamelCase__ : Dict = {'pixel_values': pixel_values} return config, inputs_dict @require_flax class lowerCAmelCase ( __UpperCamelCase, unittest.TestCase ): UpperCAmelCase__ = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else () UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False def A_ ( self : List[Any] ) -> None: lowerCamelCase__ : List[Any] = FlaxRegNetModelTester(self ) lowerCamelCase__ : Union[str, Any] = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase ) def A_ ( self : int ) -> Tuple: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def A_ ( self : Any ) -> str: return def A_ ( self : Optional[Any] ) -> Optional[Any]: lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase ) def A_ ( self : Dict ) -> Optional[Any]: lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase ) @unittest.skip(reason='RegNet does not use inputs_embeds' ) def A_ ( self : Dict ) -> Dict: pass @unittest.skip(reason='RegNet does not support input and output embeddings' ) def A_ ( self : Any ) -> Tuple: pass def A_ ( self : Union[str, Any] ) -> Union[str, Any]: lowerCamelCase__ , lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : Optional[int] = model_class(UpperCAmelCase ) lowerCamelCase__ : Optional[Any] = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ : List[Any] = [*signature.parameters.keys()] lowerCamelCase__ : int = ['pixel_values'] self.assertListEqual(arg_names[:1] , UpperCAmelCase ) def A_ ( self : int ) -> List[str]: def check_hidden_states_output(UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] ): lowerCamelCase__ : Any = model_class(UpperCAmelCase ) lowerCamelCase__ : Optional[Any] = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) ) lowerCamelCase__ : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowerCamelCase__ : Optional[int] = self.model_tester.num_stages self.assertEqual(len(UpperCAmelCase ) , expected_num_stages + 1 ) lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : List[str] = True check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase__ : List[Any] = True check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) def A_ ( self : Dict ) -> int: lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowerCamelCase__ : int = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) lowerCamelCase__ : Tuple = model_class(UpperCAmelCase ) @jax.jit def model_jitted(UpperCAmelCase : Tuple , **UpperCAmelCase : Tuple ): return model(pixel_values=UpperCAmelCase , **UpperCAmelCase ) with self.subTest('JIT Enabled' ): lowerCamelCase__ : Dict = model_jitted(**UpperCAmelCase ).to_tuple() with self.subTest('JIT Disabled' ): with jax.disable_jit(): lowerCamelCase__ : Optional[int] = model_jitted(**UpperCAmelCase ).to_tuple() self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) ) for jitted_output, output in zip(UpperCAmelCase , UpperCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]: lowerCamelCase__ : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_flax class lowerCAmelCase ( unittest.TestCase ): @cached_property def A_ ( self : Any ) -> List[Any]: return AutoImageProcessor.from_pretrained('facebook/regnet-y-040' ) if is_vision_available() else None @slow def A_ ( self : Dict ) -> Tuple: lowerCamelCase__ : Union[str, Any] = FlaxRegNetForImageClassification.from_pretrained('facebook/regnet-y-040' ) lowerCamelCase__ : Dict = self.default_image_processor lowerCamelCase__ : List[Any] = prepare_img() lowerCamelCase__ : List[str] = image_processor(images=UpperCAmelCase , return_tensors='np' ) lowerCamelCase__ : List[Any] = model(**UpperCAmelCase ) # verify the logits lowerCamelCase__ : Union[str, Any] = (1, 1000) self.assertEqual(outputs.logits.shape , UpperCAmelCase ) lowerCamelCase__ : Tuple = jnp.array([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] ) self.assertTrue(jnp.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1e-4 ) )
188
1
'''simple docstring''' import unittest from accelerate import debug_launcher from accelerate.test_utils import require_cpu, test_ops, test_script @require_cpu class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def A ( self : Dict ): """simple docstring""" debug_launcher(test_script.main ) def A ( self : Union[str, Any] ): """simple docstring""" debug_launcher(test_ops.main )
430
'''simple docstring''' def __lowerCamelCase ( A__ ) -> str: """simple docstring""" UpperCamelCase = int(A__ ) if decimal in (0, 1): # Exit cases for the recursion return str(A__ ) UpperCamelCase , UpperCamelCase = divmod(A__ , 2 ) return binary_recursive(A__ ) + str(A__ ) def __lowerCamelCase ( A__ ) -> str: """simple docstring""" UpperCamelCase = str(A__ ).strip() if not number: raise ValueError('No input value was provided' ) UpperCamelCase = '-' if number.startswith('-' ) else '' UpperCamelCase = number.lstrip('-' ) if not number.isnumeric(): raise ValueError('Input value is not an integer' ) return F"""{negative}0b{binary_recursive(int(A__ ) )}""" if __name__ == "__main__": from doctest import testmod testmod()
430
1
import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class _lowerCAmelCase ( _lowercase ): A__ = ['image_processor', 'tokenizer'] A__ = 'OwlViTImageProcessor' A__ = ('CLIPTokenizer', 'CLIPTokenizerFast') def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ): lowerCAmelCase__ : Optional[Any] = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , __UpperCAmelCase , ) lowerCAmelCase__ : str = kwargs.pop('''feature_extractor''' ) lowerCAmelCase__ : List[str] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(__UpperCAmelCase , __UpperCAmelCase ) def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="max_length" , __UpperCAmelCase="np" , **__UpperCAmelCase ): if text is None and query_images is None and images is None: raise ValueError( '''You have to specify at least one text or query image or image. All three cannot be none.''' ) if text is not None: if isinstance(__UpperCAmelCase , __UpperCAmelCase ) or (isinstance(__UpperCAmelCase , __UpperCAmelCase ) and not isinstance(text[0] , __UpperCAmelCase )): lowerCAmelCase__ : int = [self.tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )] elif isinstance(__UpperCAmelCase , __UpperCAmelCase ) and isinstance(text[0] , __UpperCAmelCase ): lowerCAmelCase__ : Optional[int] = [] # Maximum number of queries across batch lowerCAmelCase__ : List[Any] = max([len(__UpperCAmelCase ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(__UpperCAmelCase ) != max_num_queries: lowerCAmelCase__ : Dict = t + [''' '''] * (max_num_queries - len(__UpperCAmelCase )) lowerCAmelCase__ : str = self.tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) encodings.append(__UpperCAmelCase ) else: raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''' ) if return_tensors == "np": lowerCAmelCase__ : int = np.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 ) lowerCAmelCase__ : Tuple = np.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp lowerCAmelCase__ : Optional[Any] = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 ) lowerCAmelCase__ : Union[str, Any] = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch lowerCAmelCase__ : Tuple = torch.cat([encoding['''input_ids'''] for encoding in encodings] , dim=0 ) lowerCAmelCase__ : Optional[int] = torch.cat([encoding['''attention_mask'''] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf lowerCAmelCase__ : Optional[Any] = tf.stack([encoding['''input_ids'''] for encoding in encodings] , axis=0 ) lowerCAmelCase__ : Optional[int] = tf.stack([encoding['''attention_mask'''] for encoding in encodings] , axis=0 ) else: raise ValueError('''Target return tensor type could not be returned''' ) lowerCAmelCase__ : Any = BatchEncoding() lowerCAmelCase__ : Any = input_ids lowerCAmelCase__ : Tuple = attention_mask if query_images is not None: lowerCAmelCase__ : List[Any] = BatchEncoding() lowerCAmelCase__ : Union[str, Any] = self.image_processor( __UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ).pixel_values lowerCAmelCase__ : Optional[Any] = query_pixel_values if images is not None: lowerCAmelCase__ : List[str] = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) if text is not None and images is not None: lowerCAmelCase__ : Tuple = image_features.pixel_values return encoding elif query_images is not None and images is not None: lowerCAmelCase__ : int = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase ) def __magic_name__( self , *__UpperCAmelCase , **__UpperCAmelCase ): return self.image_processor.post_process(*__UpperCAmelCase , **__UpperCAmelCase ) def __magic_name__( self , *__UpperCAmelCase , **__UpperCAmelCase ): return self.image_processor.post_process_object_detection(*__UpperCAmelCase , **__UpperCAmelCase ) def __magic_name__( self , *__UpperCAmelCase , **__UpperCAmelCase ): return self.image_processor.post_process_image_guided_detection(*__UpperCAmelCase , **__UpperCAmelCase ) def __magic_name__( self , *__UpperCAmelCase , **__UpperCAmelCase ): return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase ) def __magic_name__( self , *__UpperCAmelCase , **__UpperCAmelCase ): return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase ) @property def __magic_name__( self ): warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __UpperCAmelCase , ) return self.image_processor_class @property def __magic_name__( self ): warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __UpperCAmelCase , ) return self.image_processor
470
import contextlib import csv import json import os import sqlitea import tarfile import textwrap import zipfile import pyarrow as pa import pyarrow.parquet as pq import pytest import datasets import datasets.config @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( ) -> Optional[Any]: lowerCAmelCase__ : List[str] = 10 lowerCAmelCase__ : Optional[Any] = datasets.Features( { '''tokens''': datasets.Sequence(datasets.Value('''string''' ) ), '''labels''': datasets.Sequence(datasets.ClassLabel(names=['''negative''', '''positive'''] ) ), '''answers''': datasets.Sequence( { '''text''': datasets.Value('''string''' ), '''answer_start''': datasets.Value('''int32''' ), } ), '''id''': datasets.Value('''int64''' ), } ) lowerCAmelCase__ : List[Any] = datasets.Dataset.from_dict( { '''tokens''': [['''foo'''] * 5] * n, '''labels''': [[1] * 5] * n, '''answers''': [{'''answer_start''': [97], '''text''': ['''1976''']}] * 10, '''id''': list(range(UpperCamelCase ) ), } , features=UpperCamelCase , ) return dataset @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> List[Any]: lowerCAmelCase__ : str = str(tmp_path_factory.mktemp('''data''' ) / '''file.arrow''' ) dataset.map(cache_file_name=UpperCamelCase ) return filename # FILE_CONTENT + files lowerCAmelCase_ = """\ Text data. Second line of data.""" @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> Optional[int]: lowerCAmelCase__ : Union[str, Any] = tmp_path_factory.mktemp('''data''' ) / '''file.txt''' lowerCAmelCase__ : int = FILE_CONTENT with open(UpperCamelCase , '''w''' ) as f: f.write(UpperCamelCase ) return filename @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> Optional[Any]: import bza lowerCAmelCase__ : List[Any] = tmp_path_factory.mktemp('''data''' ) / '''file.txt.bz2''' lowerCAmelCase__ : Optional[Any] = bytes(UpperCamelCase , '''utf-8''' ) with bza.open(UpperCamelCase , '''wb''' ) as f: f.write(UpperCamelCase ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> List[Any]: import gzip lowerCAmelCase__ : List[str] = str(tmp_path_factory.mktemp('''data''' ) / '''file.txt.gz''' ) lowerCAmelCase__ : Any = bytes(UpperCamelCase , '''utf-8''' ) with gzip.open(UpperCamelCase , '''wb''' ) as f: f.write(UpperCamelCase ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> Tuple: if datasets.config.LZ4_AVAILABLE: import lza.frame lowerCAmelCase__ : Union[str, Any] = tmp_path_factory.mktemp('''data''' ) / '''file.txt.lz4''' lowerCAmelCase__ : Tuple = bytes(UpperCamelCase , '''utf-8''' ) with lza.frame.open(UpperCamelCase , '''wb''' ) as f: f.write(UpperCamelCase ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Optional[Any]: if datasets.config.PY7ZR_AVAILABLE: import pyazr lowerCAmelCase__ : int = tmp_path_factory.mktemp('''data''' ) / '''file.txt.7z''' with pyazr.SevenZipFile(UpperCamelCase , '''w''' ) as archive: archive.write(UpperCamelCase , arcname=os.path.basename(UpperCamelCase ) ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Union[str, Any]: import tarfile lowerCAmelCase__ : List[str] = tmp_path_factory.mktemp('''data''' ) / '''file.txt.tar''' with tarfile.TarFile(UpperCamelCase , '''w''' ) as f: f.add(UpperCamelCase , arcname=os.path.basename(UpperCamelCase ) ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> Optional[int]: import lzma lowerCAmelCase__ : Union[str, Any] = tmp_path_factory.mktemp('''data''' ) / '''file.txt.xz''' lowerCAmelCase__ : str = bytes(UpperCamelCase , '''utf-8''' ) with lzma.open(UpperCamelCase , '''wb''' ) as f: f.write(UpperCamelCase ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Any: import zipfile lowerCAmelCase__ : Optional[int] = tmp_path_factory.mktemp('''data''' ) / '''file.txt.zip''' with zipfile.ZipFile(UpperCamelCase , '''w''' ) as f: f.write(UpperCamelCase , arcname=os.path.basename(UpperCamelCase ) ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> List[str]: if datasets.config.ZSTANDARD_AVAILABLE: import zstandard as zstd lowerCAmelCase__ : Tuple = tmp_path_factory.mktemp('''data''' ) / '''file.txt.zst''' lowerCAmelCase__ : int = bytes(UpperCamelCase , '''utf-8''' ) with zstd.open(UpperCamelCase , '''wb''' ) as f: f.write(UpperCamelCase ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> Dict: lowerCAmelCase__ : Optional[int] = tmp_path_factory.mktemp('''data''' ) / '''file.xml''' lowerCAmelCase__ : Tuple = textwrap.dedent( '''\ <?xml version="1.0" encoding="UTF-8" ?> <tmx version="1.4"> <header segtype="sentence" srclang="ca" /> <body> <tu> <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv> <tuv xml:lang="en"><seg>Content 1</seg></tuv> </tu> <tu> <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv> <tuv xml:lang="en"><seg>Content 2</seg></tuv> </tu> <tu> <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv> <tuv xml:lang="en"><seg>Content 3</seg></tuv> </tu> <tu> <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv> <tuv xml:lang="en"><seg>Content 4</seg></tuv> </tu> <tu> <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv> <tuv xml:lang="en"><seg>Content 5</seg></tuv> </tu> </body> </tmx>''' ) with open(UpperCamelCase , '''w''' ) as f: f.write(UpperCamelCase ) return filename lowerCAmelCase_ = [ {"""col_1""": """0""", """col_2""": 0, """col_3""": 0.0}, {"""col_1""": """1""", """col_2""": 1, """col_3""": 1.0}, {"""col_1""": """2""", """col_2""": 2, """col_3""": 2.0}, {"""col_1""": """3""", """col_2""": 3, """col_3""": 3.0}, ] lowerCAmelCase_ = [ {"""col_1""": """4""", """col_2""": 4, """col_3""": 4.0}, {"""col_1""": """5""", """col_2""": 5, """col_3""": 5.0}, ] lowerCAmelCase_ = { """col_1""": ["""0""", """1""", """2""", """3"""], """col_2""": [0, 1, 2, 3], """col_3""": [0.0, 1.0, 2.0, 3.0], } lowerCAmelCase_ = [ {"""col_3""": 0.0, """col_1""": """0""", """col_2""": 0}, {"""col_3""": 1.0, """col_1""": """1""", """col_2""": 1}, ] lowerCAmelCase_ = [ {"""col_1""": """s0""", """col_2""": 0, """col_3""": 0.0}, {"""col_1""": """s1""", """col_2""": 1, """col_3""": 1.0}, {"""col_1""": """s2""", """col_2""": 2, """col_3""": 2.0}, {"""col_1""": """s3""", """col_2""": 3, """col_3""": 3.0}, ] @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( ) -> Tuple: return DATA_DICT_OF_LISTS @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> Any: lowerCAmelCase__ : str = datasets.Dataset.from_dict(UpperCamelCase ) lowerCAmelCase__ : List[str] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.arrow''' ) dataset.map(cache_file_name=UpperCamelCase ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> Any: lowerCAmelCase__ : List[str] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.sqlite''' ) with contextlib.closing(sqlitea.connect(UpperCamelCase ) ) as con: lowerCAmelCase__ : int = con.cursor() cur.execute('''CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)''' ) for item in DATA: cur.execute('''INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)''' , tuple(item.values() ) ) con.commit() return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> int: lowerCAmelCase__ : List[str] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.csv''' ) with open(UpperCamelCase , '''w''' , newline='''''' ) as f: lowerCAmelCase__ : List[str] = csv.DictWriter(UpperCamelCase , fieldnames=['''col_1''', '''col_2''', '''col_3'''] ) writer.writeheader() for item in DATA: writer.writerow(UpperCamelCase ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> List[Any]: lowerCAmelCase__ : Dict = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.csv''' ) with open(UpperCamelCase , '''w''' , newline='''''' ) as f: lowerCAmelCase__ : Tuple = csv.DictWriter(UpperCamelCase , fieldnames=['''col_1''', '''col_2''', '''col_3'''] ) writer.writeheader() for item in DATA: writer.writerow(UpperCamelCase ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> List[str]: import bza lowerCAmelCase__ : Optional[int] = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.bz2''' with open(UpperCamelCase , '''rb''' ) as f: lowerCAmelCase__ : List[str] = f.read() # data = bytes(FILE_CONTENT, "utf-8") with bza.open(UpperCamelCase , '''wb''' ) as f: f.write(UpperCamelCase ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Tuple: lowerCAmelCase__ : Optional[int] = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.zip''' with zipfile.ZipFile(UpperCamelCase , '''w''' ) as f: f.write(UpperCamelCase , arcname=os.path.basename(UpperCamelCase ) ) f.write(UpperCamelCase , arcname=os.path.basename(UpperCamelCase ) ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Tuple: lowerCAmelCase__ : Union[str, Any] = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.zip''' with zipfile.ZipFile(UpperCamelCase , '''w''' ) as f: f.write(UpperCamelCase , arcname=os.path.basename(csv_path.replace('''.csv''' , '''.CSV''' ) ) ) f.write(UpperCamelCase , arcname=os.path.basename(csva_path.replace('''.csv''' , '''.CSV''' ) ) ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str: lowerCAmelCase__ : Union[str, Any] = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.csv.zip''' with zipfile.ZipFile(UpperCamelCase , '''w''' ) as f: f.write(UpperCamelCase , arcname=os.path.join('''main_dir''' , os.path.basename(UpperCamelCase ) ) ) f.write(UpperCamelCase , arcname=os.path.join('''main_dir''' , os.path.basename(UpperCamelCase ) ) ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> Dict: lowerCAmelCase__ : str = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.parquet''' ) lowerCAmelCase__ : List[Any] = pa.schema( { '''col_1''': pa.string(), '''col_2''': pa.intaa(), '''col_3''': pa.floataa(), } ) with open(UpperCamelCase , '''wb''' ) as f: lowerCAmelCase__ : str = pq.ParquetWriter(UpperCamelCase , schema=UpperCamelCase ) lowerCAmelCase__ : List[Any] = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(UpperCamelCase ) )] for k in DATA[0]} , schema=UpperCamelCase ) writer.write_table(UpperCamelCase ) writer.close() return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> Dict: lowerCAmelCase__ : List[Any] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.json''' ) lowerCAmelCase__ : List[str] = {'''data''': DATA} with open(UpperCamelCase , '''w''' ) as f: json.dump(UpperCamelCase , UpperCamelCase ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> Tuple: lowerCAmelCase__ : Tuple = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.json''' ) lowerCAmelCase__ : Optional[Any] = {'''data''': DATA_DICT_OF_LISTS} with open(UpperCamelCase , '''w''' ) as f: json.dump(UpperCamelCase , UpperCamelCase ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> int: lowerCAmelCase__ : Union[str, Any] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl''' ) with open(UpperCamelCase , '''w''' ) as f: for item in DATA: f.write(json.dumps(UpperCamelCase ) + '''\n''' ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> str: lowerCAmelCase__ : Optional[int] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.jsonl''' ) with open(UpperCamelCase , '''w''' ) as f: for item in DATA: f.write(json.dumps(UpperCamelCase ) + '''\n''' ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> Optional[Any]: lowerCAmelCase__ : Dict = str(tmp_path_factory.mktemp('''data''' ) / '''dataset_312.jsonl''' ) with open(UpperCamelCase , '''w''' ) as f: for item in DATA_312: f.write(json.dumps(UpperCamelCase ) + '''\n''' ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> List[Any]: lowerCAmelCase__ : Optional[int] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset-str.jsonl''' ) with open(UpperCamelCase , '''w''' ) as f: for item in DATA_STR: f.write(json.dumps(UpperCamelCase ) + '''\n''' ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> int: import gzip lowerCAmelCase__ : Dict = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.txt.gz''' ) with open(UpperCamelCase , '''rb''' ) as orig_file: with gzip.open(UpperCamelCase , '''wb''' ) as zipped_file: zipped_file.writelines(UpperCamelCase ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> List[Any]: import gzip lowerCAmelCase__ : int = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.gz''' ) with open(UpperCamelCase , '''rb''' ) as orig_file: with gzip.open(UpperCamelCase , '''wb''' ) as zipped_file: zipped_file.writelines(UpperCamelCase ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]: lowerCAmelCase__ : str = tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.zip''' with zipfile.ZipFile(UpperCamelCase , '''w''' ) as f: f.write(UpperCamelCase , arcname=os.path.basename(UpperCamelCase ) ) f.write(UpperCamelCase , arcname=os.path.basename(UpperCamelCase ) ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[int]: lowerCAmelCase__ : List[Any] = tmp_path_factory.mktemp('''data''' ) / '''dataset_nested.jsonl.zip''' with zipfile.ZipFile(UpperCamelCase , '''w''' ) as f: f.write(UpperCamelCase , arcname=os.path.join('''nested''' , os.path.basename(UpperCamelCase ) ) ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]: lowerCAmelCase__ : Tuple = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.jsonl.zip''' with zipfile.ZipFile(UpperCamelCase , '''w''' ) as f: f.write(UpperCamelCase , arcname=os.path.join('''main_dir''' , os.path.basename(UpperCamelCase ) ) ) f.write(UpperCamelCase , arcname=os.path.join('''main_dir''' , os.path.basename(UpperCamelCase ) ) ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int: lowerCAmelCase__ : Optional[Any] = tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.tar''' with tarfile.TarFile(UpperCamelCase , '''w''' ) as f: f.add(UpperCamelCase , arcname=os.path.basename(UpperCamelCase ) ) f.add(UpperCamelCase , arcname=os.path.basename(UpperCamelCase ) ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[int]: lowerCAmelCase__ : Union[str, Any] = tmp_path_factory.mktemp('''data''' ) / '''dataset_nested.jsonl.tar''' with tarfile.TarFile(UpperCamelCase , '''w''' ) as f: f.add(UpperCamelCase , arcname=os.path.join('''nested''' , os.path.basename(UpperCamelCase ) ) ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> Optional[Any]: lowerCAmelCase__ : Dict = ['''0''', '''1''', '''2''', '''3'''] lowerCAmelCase__ : Any = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.txt''' ) with open(UpperCamelCase , '''w''' ) as f: for item in data: f.write(item + '''\n''' ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> Any: lowerCAmelCase__ : int = ['''0''', '''1''', '''2''', '''3'''] lowerCAmelCase__ : int = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.txt''' ) with open(UpperCamelCase , '''w''' ) as f: for item in data: f.write(item + '''\n''' ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> Optional[int]: lowerCAmelCase__ : Optional[Any] = ['''0''', '''1''', '''2''', '''3'''] lowerCAmelCase__ : str = tmp_path_factory.mktemp('''data''' ) / '''dataset.abc''' with open(UpperCamelCase , '''w''' ) as f: for item in data: f.write(item + '''\n''' ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any: lowerCAmelCase__ : Optional[int] = tmp_path_factory.mktemp('''data''' ) / '''dataset.text.zip''' with zipfile.ZipFile(UpperCamelCase , '''w''' ) as f: f.write(UpperCamelCase , arcname=os.path.basename(UpperCamelCase ) ) f.write(UpperCamelCase , arcname=os.path.basename(UpperCamelCase ) ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]: lowerCAmelCase__ : Any = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.text.zip''' with zipfile.ZipFile(UpperCamelCase , '''w''' ) as f: f.write(UpperCamelCase , arcname=os.path.join('''main_dir''' , os.path.basename(UpperCamelCase ) ) ) f.write(UpperCamelCase , arcname=os.path.join('''main_dir''' , os.path.basename(UpperCamelCase ) ) ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int: lowerCAmelCase__ : str = tmp_path_factory.mktemp('''data''' ) / '''dataset.ext.zip''' with zipfile.ZipFile(UpperCamelCase , '''w''' ) as f: f.write(UpperCamelCase , arcname=os.path.basename('''unsupported.ext''' ) ) f.write(UpperCamelCase , arcname=os.path.basename('''unsupported_2.ext''' ) ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> Dict: lowerCAmelCase__ : List[Any] = '''\n'''.join(['''First''', '''Second\u2029with Unicode new line''', '''Third'''] ) lowerCAmelCase__ : Union[str, Any] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset_with_unicode_new_lines.txt''' ) with open(UpperCamelCase , '''w''' , encoding='''utf-8''' ) as f: f.write(UpperCamelCase ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( ) -> Optional[int]: return os.path.join('''tests''' , '''features''' , '''data''' , '''test_image_rgb.jpg''' ) @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( ) -> Optional[Any]: return os.path.join('''tests''' , '''features''' , '''data''' , '''test_audio_44100.wav''' ) @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Union[str, Any]: lowerCAmelCase__ : Tuple = tmp_path_factory.mktemp('''data''' ) / '''dataset.img.zip''' with zipfile.ZipFile(UpperCamelCase , '''w''' ) as f: f.write(UpperCamelCase , arcname=os.path.basename(UpperCamelCase ) ) f.write(UpperCamelCase , arcname=os.path.basename(UpperCamelCase ).replace('''.jpg''' , '''2.jpg''' ) ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> Dict: lowerCAmelCase__ : Optional[Any] = tmp_path_factory.mktemp('''data_dir''' ) (data_dir / "subdir").mkdir() with open(data_dir / '''subdir''' / '''train.txt''' , '''w''' ) as f: f.write('''foo\n''' * 10 ) with open(data_dir / '''subdir''' / '''test.txt''' , '''w''' ) as f: f.write('''bar\n''' * 10 ) # hidden file with open(data_dir / '''subdir''' / '''.test.txt''' , '''w''' ) as f: f.write('''bar\n''' * 10 ) # hidden directory (data_dir / ".subdir").mkdir() with open(data_dir / '''.subdir''' / '''train.txt''' , '''w''' ) as f: f.write('''foo\n''' * 10 ) with open(data_dir / '''.subdir''' / '''test.txt''' , '''w''' ) as f: f.write('''bar\n''' * 10 ) return data_dir
470
1
import heapq as hq import math from collections.abc import Iterator class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self : Union[str, Any] , __A : Dict ): snake_case__ : Tuple = str(id_ ) snake_case__ : int = None snake_case__ : int = None snake_case__ : Optional[int] = [] snake_case__ : str = {} # {vertex:distance} def __lt__( self : Tuple , __A : Tuple ): return self.key < other.key def __repr__( self : Optional[Any] ): return self.id def _lowercase ( self : Optional[Any] , __A : List[Any] ): self.neighbors.append(__A ) def _lowercase ( self : Union[str, Any] , __A : int , __A : List[Any] ): snake_case__ : Any = weight def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] ): # add the neighbors: graph[a - 1].add_neighbor(graph[b - 1] ) graph[b - 1].add_neighbor(graph[a - 1] ) # add the edges: graph[a - 1].add_edge(graph[b - 1] , snake_case_ ) graph[b - 1].add_edge(graph[a - 1] , snake_case_ ) def SCREAMING_SNAKE_CASE ( snake_case_ : list , snake_case_ : Vertex ): snake_case__ : List[Any] = [] for u in graph: snake_case__ : Union[str, Any] = math.inf snake_case__ : Tuple = None snake_case__ : str = 0 snake_case__ : Tuple = graph[:] while q: snake_case__ : str = min(snake_case_ ) q.remove(snake_case_ ) for v in u.neighbors: if (v in q) and (u.edges[v.id] < v.key): snake_case__ : Tuple = u snake_case__ : str = u.edges[v.id] for i in range(1 , len(snake_case_ ) ): a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) ) return a def SCREAMING_SNAKE_CASE ( snake_case_ : list , snake_case_ : Vertex ): for u in graph: snake_case__ : Any = math.inf snake_case__ : Optional[Any] = None snake_case__ : Any = 0 snake_case__ : Optional[Any] = list(snake_case_ ) hq.heapify(snake_case_ ) while h: snake_case__ : str = hq.heappop(snake_case_ ) for v in u.neighbors: if (v in h) and (u.edges[v.id] < v.key): snake_case__ : int = u snake_case__ : str = u.edges[v.id] hq.heapify(snake_case_ ) for i in range(1 , len(snake_case_ ) ): yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) def SCREAMING_SNAKE_CASE ( ): pass if __name__ == "__main__": import doctest doctest.testmod()
297
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_convbert import ConvBertTokenizer __lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) __lowerCamelCase : str = {"""vocab_file""": """vocab.txt"""} __lowerCamelCase : str = { """vocab_file""": { """YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt""", """YituTech/conv-bert-medium-small""": ( """https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt""" ), """YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt""", } } __lowerCamelCase : Dict = { """YituTech/conv-bert-base""": 512, """YituTech/conv-bert-medium-small""": 512, """YituTech/conv-bert-small""": 512, } __lowerCamelCase : List[Any] = { """YituTech/conv-bert-base""": {"""do_lower_case""": True}, """YituTech/conv-bert-medium-small""": {"""do_lower_case""": True}, """YituTech/conv-bert-small""": {"""do_lower_case""": True}, } class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ): """simple docstring""" a_ = VOCAB_FILES_NAMES a_ = PRETRAINED_VOCAB_FILES_MAP a_ = PRETRAINED_INIT_CONFIGURATION a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a_ = ConvBertTokenizer def __init__( self : Tuple , __A : Any=None , __A : Union[str, Any]=None , __A : List[Any]=True , __A : Optional[Any]="[UNK]" , __A : Tuple="[SEP]" , __A : Tuple="[PAD]" , __A : List[str]="[CLS]" , __A : Optional[Any]="[MASK]" , __A : List[str]=True , __A : Optional[Any]=None , **__A : Any , ): super().__init__( __A , tokenizer_file=__A , do_lower_case=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , tokenize_chinese_chars=__A , strip_accents=__A , **__A , ) snake_case__ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , __A ) != do_lower_case or normalizer_state.get("strip_accents" , __A ) != strip_accents or normalizer_state.get("handle_chinese_chars" , __A ) != tokenize_chinese_chars ): snake_case__ : Dict = getattr(__A , normalizer_state.pop("type" ) ) snake_case__ : str = do_lower_case snake_case__ : Optional[int] = strip_accents snake_case__ : int = tokenize_chinese_chars snake_case__ : Tuple = normalizer_class(**__A ) snake_case__ : Union[str, Any] = do_lower_case def _lowercase ( self : Any , __A : int , __A : Union[str, Any]=None ): snake_case__ : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _lowercase ( self : Any , __A : List[int] , __A : Optional[List[int]] = None ): snake_case__ : str = [self.sep_token_id] snake_case__ : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _lowercase ( self : Optional[int] , __A : str , __A : Optional[str] = None ): snake_case__ : Tuple = self._tokenizer.model.save(__A , name=__A ) return tuple(__A )
297
1
import copy import unittest from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_MULTIPLE_CHOICE_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, ) from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class _A : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=36 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=1000 , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = num_channels _UpperCAmelCase = image_size _UpperCAmelCase = patch_size _UpperCAmelCase = text_seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_input_mask _UpperCAmelCase = use_token_type_ids _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = coordinate_size _UpperCAmelCase = shape_size _UpperCAmelCase = num_labels _UpperCAmelCase = num_choices _UpperCAmelCase = scope _UpperCAmelCase = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) _UpperCAmelCase = text_seq_length _UpperCAmelCase = (image_size // patch_size) ** 2 + 1 _UpperCAmelCase = self.text_seq_length + self.image_seq_length def UpperCAmelCase ( self ): _UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) _UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: _UpperCAmelCase = bbox[i, j, 3] _UpperCAmelCase = bbox[i, j, 1] _UpperCAmelCase = t if bbox[i, j, 2] < bbox[i, j, 0]: _UpperCAmelCase = bbox[i, j, 2] _UpperCAmelCase = bbox[i, j, 0] _UpperCAmelCase = t _UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _UpperCAmelCase = None if self.use_input_mask: _UpperCAmelCase = random_attention_mask([self.batch_size, self.text_seq_length] ) _UpperCAmelCase = None if self.use_token_type_ids: _UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) _UpperCAmelCase = None _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) _UpperCAmelCase = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _UpperCAmelCase = LayoutLMvaModel(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() # text + image _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = model( _SCREAMING_SNAKE_CASE , bbox=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , bbox=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , bbox=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only _UpperCAmelCase = model(pixel_values=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = LayoutLMvaForSequenceClassification(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() _UpperCAmelCase = model( _SCREAMING_SNAKE_CASE , bbox=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = LayoutLMvaForTokenClassification(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() _UpperCAmelCase = model( _SCREAMING_SNAKE_CASE , bbox=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _UpperCAmelCase = LayoutLMvaForQuestionAnswering(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() _UpperCAmelCase = model( _SCREAMING_SNAKE_CASE , bbox=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase ( self ): _UpperCAmelCase = self.prepare_config_and_inputs() ( ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ) = config_and_inputs _UpperCAmelCase = { """input_ids""": input_ids, """bbox""": bbox, """pixel_values""": pixel_values, """token_type_ids""": token_type_ids, """attention_mask""": input_mask, } return config, inputs_dict @require_torch class _A ( __lowercase , __lowercase , unittest.TestCase ): __a = False __a = False __a = False __a = ( ( LayoutLMvaModel, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaForQuestionAnswering, ) if is_torch_available() else () ) __a = ( {"""document-question-answering""": LayoutLMvaForQuestionAnswering, """feature-extraction""": LayoutLMvaModel} if is_torch_available() else {} ) def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual # embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has # the sequence dimension of the text embedding only. # (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`) return True def UpperCAmelCase ( self ): _UpperCAmelCase = LayoutLMvaModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 ) def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ): _UpperCAmelCase = copy.deepcopy(_SCREAMING_SNAKE_CASE ) if model_class in get_values(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = { k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous() if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ) and v.ndim > 1 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE ) elif model_class in get_values(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE ) elif model_class in [ *get_values(_SCREAMING_SNAKE_CASE ), ]: _UpperCAmelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE ) elif model_class in [ *get_values(_SCREAMING_SNAKE_CASE ), ]: _UpperCAmelCase = torch.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=_SCREAMING_SNAKE_CASE , ) return inputs_dict def UpperCAmelCase ( self ): self.config_tester.run_common_tests() def UpperCAmelCase ( self ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE ) def UpperCAmelCase ( self ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _UpperCAmelCase = type self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE ) def UpperCAmelCase ( self ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_SCREAMING_SNAKE_CASE ) def UpperCAmelCase ( self ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_SCREAMING_SNAKE_CASE ) def UpperCAmelCase ( self ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_SCREAMING_SNAKE_CASE ) @slow def UpperCAmelCase ( self ): for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase = LayoutLMvaModel.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) def _SCREAMING_SNAKE_CASE ( ) -> Dict: _UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch class _A ( unittest.TestCase ): @cached_property def UpperCAmelCase ( self ): return LayoutLMvaImageProcessor(apply_ocr=_SCREAMING_SNAKE_CASE ) if is_vision_available() else None @slow def UpperCAmelCase ( self ): _UpperCAmelCase = LayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ).to(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values.to(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([[1, 2]] ) _UpperCAmelCase = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 ) # forward pass _UpperCAmelCase = model( input_ids=input_ids.to(_SCREAMING_SNAKE_CASE ) , bbox=bbox.to(_SCREAMING_SNAKE_CASE ) , pixel_values=pixel_values.to(_SCREAMING_SNAKE_CASE ) , ) # verify the logits _UpperCAmelCase = torch.Size((1, 199, 768) ) self.assertEqual(outputs.last_hidden_state.shape , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor( [[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(_SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
175
import logging from transformers.configuration_utils import PretrainedConfig a = logging.getLogger(__name__) class _A ( __lowercase ): __a = """masked_bert""" def __init__( self , _SCREAMING_SNAKE_CASE=3_0522 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-12 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE="topK" , _SCREAMING_SNAKE_CASE="constant" , _SCREAMING_SNAKE_CASE=0.0 , **_SCREAMING_SNAKE_CASE , ): super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = hidden_act _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = pruning_method _UpperCAmelCase = mask_init _UpperCAmelCase = mask_scale
175
1
"""simple docstring""" import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401 from coval.conll import reader, util from coval.eval import evaluator import datasets a =datasets.logging.get_logger(__name__) a ='\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n' a ='\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n' a ='\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n' def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase="dummy_doc" ) -> Union[str, Any]: '''simple docstring''' lowerCamelCase__ ={doc: key_lines} lowerCamelCase__ ={doc: sys_lines} lowerCamelCase__ ={} lowerCamelCase__ =0 lowerCamelCase__ =0 lowerCamelCase__ =0 lowerCamelCase__ =0 lowerCamelCase__ =0 lowerCamelCase__ =0 lowerCamelCase__ , lowerCamelCase__ =reader.get_doc_mentions(__lowerCAmelCase , key_doc_lines[doc] , __lowerCAmelCase ) key_singletons_num += singletons_num if NP_only or min_span: lowerCamelCase__ =reader.set_annotated_parse_trees(__lowerCAmelCase , key_doc_lines[doc] , __lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ , lowerCamelCase__ =reader.get_doc_mentions(__lowerCAmelCase , sys_doc_lines[doc] , __lowerCAmelCase ) sys_singletons_num += singletons_num if NP_only or min_span: lowerCamelCase__ =reader.set_annotated_parse_trees(__lowerCAmelCase , key_doc_lines[doc] , __lowerCAmelCase , __lowerCAmelCase ) if remove_nested: lowerCamelCase__ , lowerCamelCase__ =reader.remove_nested_coref_mentions(__lowerCAmelCase , __lowerCAmelCase ) key_nested_coref_num += nested_mentions key_removed_nested_clusters += removed_clusters lowerCamelCase__ , lowerCamelCase__ =reader.remove_nested_coref_mentions(__lowerCAmelCase , __lowerCAmelCase ) sys_nested_coref_num += nested_mentions sys_removed_nested_clusters += removed_clusters lowerCamelCase__ =reader.get_mention_assignments(__lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ =reader.get_mention_assignments(__lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ =(key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster) if remove_nested: logger.info( "Number of removed nested coreferring mentions in the key " F'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''' ) logger.info( "Number of resulting singleton clusters in the key " F'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''' ) if not keep_singletons: logger.info( F'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system ''' "files, respectively" ) return doc_coref_infos def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]: '''simple docstring''' lowerCamelCase__ =get_coref_infos(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ ={} lowerCamelCase__ =0 lowerCamelCase__ =0 for name, metric in metrics: lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ =evaluator.evaluate_documents(__lowerCAmelCase , __lowerCAmelCase , beta=1 ) if name in ["muc", "bcub", "ceafe"]: conll += fa conll_subparts_num += 1 output_scores.update({F'''{name}/recall''': recall, F'''{name}/precision''': precision, F'''{name}/f1''': fa} ) logger.info( name.ljust(10 ) , F'''Recall: {recall * 100:.2f}''' , F''' Precision: {precision * 100:.2f}''' , F''' F1: {fa * 100:.2f}''' , ) if conll_subparts_num == 3: lowerCamelCase__ =(conll / 3) * 100 logger.info(F'''CoNLL score: {conll:.2f}''' ) output_scores.update({"conll_score": conll} ) return output_scores def lowerCamelCase_ ( __lowerCAmelCase ) -> List[Any]: '''simple docstring''' lowerCamelCase__ =False for line in key_lines: if not line.startswith("#" ): if len(line.split() ) > 6: lowerCamelCase__ =line.split()[5] if not parse_col == "-": lowerCamelCase__ =True break else: break return has_gold_parse @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __UpperCAmelCase ( datasets.Metric ): def _a ( self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("string" ) ), "references": datasets.Sequence(datasets.Value("string" ) ), } ) , codebase_urls=["https://github.com/ns-moosavi/coval"] , reference_urls=[ "https://github.com/ns-moosavi/coval", "https://www.aclweb.org/anthology/P16-1060", "http://www.conll.cemantix.org/2012/data.html", ] , ) def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False ): lowerCamelCase__ =[ ("mentions", evaluator.mentions), ("muc", evaluator.muc), ("bcub", evaluator.b_cubed), ("ceafe", evaluator.ceafe), ("lea", evaluator.lea), ] if min_span: lowerCamelCase__ =util.check_gold_parse_annotation(_lowerCamelCase ) if not has_gold_parse: raise NotImplementedError("References should have gold parse annotation to use 'min_span'." ) # util.parse_key_file(key_file) # key_file = key_file + ".parsed" lowerCamelCase__ =evaluate( key_lines=_lowerCamelCase , sys_lines=_lowerCamelCase , metrics=_lowerCamelCase , NP_only=_lowerCamelCase , remove_nested=_lowerCamelCase , keep_singletons=_lowerCamelCase , min_span=_lowerCamelCase , ) return score
530
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, PNDMScheduler, StableDiffusionLDMaDPipeline, UNetaDConditionModel, ) from diffusers.utils import nightly, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS enable_full_determinism() class __UpperCAmelCase ( unittest.TestCase ): A__ : List[Any] = StableDiffusionLDMaDPipeline A__ : List[str] = TEXT_TO_IMAGE_PARAMS A__ : str = TEXT_TO_IMAGE_BATCH_PARAMS A__ : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS def _a ( self ): torch.manual_seed(0 ) lowerCamelCase__ =UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) lowerCamelCase__ =DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=_lowerCamelCase , set_alpha_to_one=_lowerCamelCase , ) torch.manual_seed(0 ) lowerCamelCase__ =AutoencoderKL( block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) torch.manual_seed(0 ) lowerCamelCase__ =CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) lowerCamelCase__ =CLIPTextModel(_lowerCamelCase ) lowerCamelCase__ =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) lowerCamelCase__ ={ "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def _a ( self , _lowerCamelCase , _lowerCamelCase=0 ): if str(_lowerCamelCase ).startswith("mps" ): lowerCamelCase__ =torch.manual_seed(_lowerCamelCase ) else: lowerCamelCase__ =torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase ) lowerCamelCase__ ={ "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def _a ( self ): lowerCamelCase__ ="cpu" # ensure determinism for the device-dependent torch.Generator lowerCamelCase__ =self.get_dummy_components() lowerCamelCase__ =StableDiffusionLDMaDPipeline(**_lowerCamelCase ) lowerCamelCase__ =ldmad_pipe.to(_lowerCamelCase ) ldmad_pipe.set_progress_bar_config(disable=_lowerCamelCase ) lowerCamelCase__ =self.get_dummy_inputs(_lowerCamelCase ) lowerCamelCase__ =ldmad_pipe(**_lowerCamelCase ) lowerCamelCase__ , lowerCamelCase__ =output.rgb, output.depth lowerCamelCase__ =rgb[0, -3:, -3:, -1] lowerCamelCase__ =depth[0, -3:, -1] assert rgb.shape == (1, 64, 64, 3) assert depth.shape == (1, 64, 64) lowerCamelCase__ =np.array( [0.3_7_3_3_8_1_7_6, 0.7_0_2_4_7, 0.7_4_2_0_3_1_9_3, 0.5_1_6_4_3_6_0_4, 0.5_8_2_5_6_7_9_3, 0.6_0_9_3_2_1_3_6, 0.4_1_8_1_0_9_5, 0.4_8_3_5_5_8_7_7, 0.4_6_5_3_5_2_6_2] ) lowerCamelCase__ =np.array([1_0_3.4_6_7_2_7, 8_5.8_1_2_0_0_4, 8_7.8_4_9_2_3_6] ) assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1E-2 assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1E-2 def _a ( self ): lowerCamelCase__ =self.get_dummy_components() lowerCamelCase__ =StableDiffusionLDMaDPipeline(**_lowerCamelCase ) lowerCamelCase__ =ldmad_pipe.to(_lowerCamelCase ) ldmad_pipe.set_progress_bar_config(disable=_lowerCamelCase ) lowerCamelCase__ =self.get_dummy_inputs(_lowerCamelCase ) lowerCamelCase__ =3 * [inputs["prompt"]] # forward lowerCamelCase__ =ldmad_pipe(**_lowerCamelCase ) lowerCamelCase__ , lowerCamelCase__ =output.rgb, output.depth lowerCamelCase__ =rgb_slice_a[0, -3:, -3:, -1] lowerCamelCase__ =depth_slice_a[0, -3:, -1] lowerCamelCase__ =self.get_dummy_inputs(_lowerCamelCase ) lowerCamelCase__ =3 * [inputs.pop("prompt" )] lowerCamelCase__ =ldmad_pipe.tokenizer( _lowerCamelCase , padding="max_length" , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=_lowerCamelCase , return_tensors="pt" , ) lowerCamelCase__ =text_inputs["input_ids"].to(_lowerCamelCase ) lowerCamelCase__ =ldmad_pipe.text_encoder(_lowerCamelCase )[0] lowerCamelCase__ =prompt_embeds # forward lowerCamelCase__ =ldmad_pipe(**_lowerCamelCase ) lowerCamelCase__ , lowerCamelCase__ =output.rgb, output.depth lowerCamelCase__ =rgb_slice_a[0, -3:, -3:, -1] lowerCamelCase__ =depth_slice_a[0, -3:, -1] assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1E-4 assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1E-4 def _a ( self ): lowerCamelCase__ ="cpu" # ensure determinism for the device-dependent torch.Generator lowerCamelCase__ =self.get_dummy_components() lowerCamelCase__ =PNDMScheduler(skip_prk_steps=_lowerCamelCase ) lowerCamelCase__ =StableDiffusionLDMaDPipeline(**_lowerCamelCase ) lowerCamelCase__ =ldmad_pipe.to(_lowerCamelCase ) ldmad_pipe.set_progress_bar_config(disable=_lowerCamelCase ) lowerCamelCase__ =self.get_dummy_inputs(_lowerCamelCase ) lowerCamelCase__ ="french fries" lowerCamelCase__ =ldmad_pipe(**_lowerCamelCase , negative_prompt=_lowerCamelCase ) lowerCamelCase__ , lowerCamelCase__ =output.rgb, output.depth lowerCamelCase__ =rgb[0, -3:, -3:, -1] lowerCamelCase__ =depth[0, -3:, -1] assert rgb.shape == (1, 64, 64, 3) assert depth.shape == (1, 64, 64) lowerCamelCase__ =np.array( [0.3_7_0_4_4, 0.7_1_8_1_1_5_0_3, 0.7_2_2_3_2_5_1, 0.4_8_6_0_3_6_7_5, 0.5_6_3_8_3_9_1, 0.6_3_6_4_9_4_8, 0.4_2_8_3_3_7_0_4, 0.4_9_0_1_3_1_5, 0.4_7_9_2_6_2_1_7] ) lowerCamelCase__ =np.array([1_0_7.8_4_7_3_8, 8_4.6_2_8_0_2, 8_9.9_6_2_1_3_5] ) assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1E-2 assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1E-2 @slow @require_torch_gpu class __UpperCAmelCase ( unittest.TestCase ): def _a ( self ): super().tearDown() gc.collect() torch.cuda.empty_cache() def _a ( self , _lowerCamelCase , _lowerCamelCase="cpu" , _lowerCamelCase=torch.floataa , _lowerCamelCase=0 ): lowerCamelCase__ =torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase ) lowerCamelCase__ =np.random.RandomState(_lowerCamelCase ).standard_normal((1, 4, 64, 64) ) lowerCamelCase__ =torch.from_numpy(_lowerCamelCase ).to(device=_lowerCamelCase , dtype=_lowerCamelCase ) lowerCamelCase__ ={ "prompt": "a photograph of an astronaut riding a horse", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def _a ( self ): lowerCamelCase__ =StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" ) lowerCamelCase__ =ldmad_pipe.to(_lowerCamelCase ) ldmad_pipe.set_progress_bar_config(disable=_lowerCamelCase ) lowerCamelCase__ =self.get_inputs(_lowerCamelCase ) lowerCamelCase__ =ldmad_pipe(**_lowerCamelCase ) lowerCamelCase__ , lowerCamelCase__ =output.rgb, output.depth lowerCamelCase__ =rgb[0, -3:, -3:, -1].flatten() lowerCamelCase__ =rgb[0, -3:, -1].flatten() assert rgb.shape == (1, 512, 512, 3) assert depth.shape == (1, 512, 512) lowerCamelCase__ =np.array( [0.5_3_8_0_5_4_6_5, 0.5_6_7_0_7_3_0_5, 0.5_4_8_6_5_1_5, 0.5_7_0_1_2_2_3_6, 0.5_8_1_4_5_1_1, 0.5_6_2_5_3_4_8_7, 0.5_4_8_4_3_0_1_4, 0.5_5_0_9_2_2_6_3, 0.6_4_5_9_7_0_6] ) lowerCamelCase__ =np.array( [0.9_2_6_3_7_8_1, 0.6_6_7_8_6_7_2, 0.5_4_8_6_5_1_5, 0.9_2_2_0_2_1_4_5, 0.6_7_8_3_1_1_3_5, 0.5_6_2_5_3_4_8_7, 0.9_2_4_1_6_9_4, 0.7_5_5_1_4_7_8, 0.6_4_5_9_7_0_6] ) assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3E-3 assert np.abs(depth_slice - expected_slice_depth ).max() < 3E-3 @nightly @require_torch_gpu class __UpperCAmelCase ( unittest.TestCase ): def _a ( self ): super().tearDown() gc.collect() torch.cuda.empty_cache() def _a ( self , _lowerCamelCase , _lowerCamelCase="cpu" , _lowerCamelCase=torch.floataa , _lowerCamelCase=0 ): lowerCamelCase__ =torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase ) lowerCamelCase__ =np.random.RandomState(_lowerCamelCase ).standard_normal((1, 4, 64, 64) ) lowerCamelCase__ =torch.from_numpy(_lowerCamelCase ).to(device=_lowerCamelCase , dtype=_lowerCamelCase ) lowerCamelCase__ ={ "prompt": "a photograph of an astronaut riding a horse", "latents": latents, "generator": generator, "num_inference_steps": 50, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def _a ( self ): lowerCamelCase__ =StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" ).to(_lowerCamelCase ) ldmad_pipe.set_progress_bar_config(disable=_lowerCamelCase ) lowerCamelCase__ =self.get_inputs(_lowerCamelCase ) lowerCamelCase__ =ldmad_pipe(**_lowerCamelCase ) lowerCamelCase__ , lowerCamelCase__ =output.rgb, output.depth lowerCamelCase__ =0.4_9_5_5_8_6 lowerCamelCase__ =0.3_3_7_9_5_5_1_5 lowerCamelCase__ =1_1_2.4_8_5_1_8 lowerCamelCase__ =9_8.4_8_9_7_4_6 assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3 assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3 assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3 assert np.abs(expected_depth_std - depth.std() ) < 1E-3 def _a ( self ): lowerCamelCase__ =StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d-4c" ).to(_lowerCamelCase ) ldmad_pipe.set_progress_bar_config(disable=_lowerCamelCase ) lowerCamelCase__ =self.get_inputs(_lowerCamelCase ) lowerCamelCase__ =ldmad_pipe(**_lowerCamelCase ) lowerCamelCase__ , lowerCamelCase__ =output.rgb, output.depth lowerCamelCase__ =0.4_1_9_4_1_2_7 lowerCamelCase__ =0.3_5_3_7_5_5_8_6 lowerCamelCase__ =0.5_6_3_8_5_0_2 lowerCamelCase__ =0.3_4_6_8_6_1_0_3 assert rgb.shape == (1, 512, 512, 3) assert depth.shape == (1, 512, 512, 1) assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3 assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3 assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3 assert np.abs(expected_depth_std - depth.std() ) < 1E-3
530
1
import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin class lowerCamelCase ( unittest.TestCase , SCREAMING_SNAKE_CASE ): def snake_case_ ( self : Optional[Any] ) -> Any: _a : List[str] = load_tool('''text-classification''' ) self.tool.setup() _a : List[str] = load_tool('''text-classification''' , remote=__snake_case ) def snake_case_ ( self : Optional[Any] ) -> str: _a : int = self.tool('''That\'s quite cool''' , ['''positive''', '''negative'''] ) self.assertEqual(__snake_case , '''positive''' ) def snake_case_ ( self : Union[str, Any] ) -> Dict: _a : List[str] = self.remote_tool('''That\'s quite cool''' , ['''positive''', '''negative'''] ) self.assertEqual(__snake_case , '''positive''' ) def snake_case_ ( self : Dict ) -> Optional[Any]: _a : Any = self.tool(text='''That\'s quite cool''' , labels=['''positive''', '''negative'''] ) self.assertEqual(__snake_case , '''positive''' ) def snake_case_ ( self : Any ) -> str: _a : Optional[int] = self.remote_tool(text='''That\'s quite cool''' , labels=['''positive''', '''negative'''] ) self.assertEqual(__snake_case , '''positive''' )
249
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging __UpperCAmelCase : Dict = logging.get_logger(__name__) __UpperCAmelCase : Tuple = { 'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json', # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class lowerCamelCase ( SCREAMING_SNAKE_CASE ): UpperCAmelCase : Tuple = 'gpt_neo' UpperCAmelCase : List[Any] = ['past_key_values'] UpperCAmelCase : int = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'} def __init__( self : Union[str, Any] , __snake_case : Union[str, Any]=50257 , __snake_case : Any=2048 , __snake_case : Dict=2048 , __snake_case : Dict=24 , __snake_case : Union[str, Any]=[[["global", "local"], 12]] , __snake_case : Tuple=16 , __snake_case : List[str]=None , __snake_case : Tuple=256 , __snake_case : Optional[int]="gelu_new" , __snake_case : str=0.0 , __snake_case : Union[str, Any]=0.0 , __snake_case : List[Any]=0.0 , __snake_case : Dict=0.1 , __snake_case : List[str]=1E-5 , __snake_case : Union[str, Any]=0.02 , __snake_case : Optional[int]=True , __snake_case : Tuple=50256 , __snake_case : Union[str, Any]=50256 , **__snake_case : Optional[int] , ) -> Any: _a : List[Any] = vocab_size _a : Dict = max_position_embeddings _a : List[Any] = hidden_size _a : Tuple = num_layers _a : List[Any] = num_heads _a : List[Any] = intermediate_size _a : Dict = window_size _a : Tuple = activation_function _a : Dict = resid_dropout _a : List[str] = embed_dropout _a : Optional[Any] = attention_dropout _a : Dict = classifier_dropout _a : str = layer_norm_epsilon _a : Tuple = initializer_range _a : Union[str, Any] = use_cache _a : Tuple = bos_token_id _a : int = eos_token_id _a : List[Any] = attention_types _a : str = self.expand_attention_types_params(__snake_case ) if len(self.attention_layers ) != self.num_layers: raise ValueError( '''Configuration for convolutional module is incorrect. ''' '''It is required that `len(config.attention_layers)` == `config.num_layers` ''' f"""but is `len(config.attention_layers) = {len(self.attention_layers )}`, """ f"""`config.num_layers = {self.num_layers}`. """ '''`config.attention_layers` is prepared using `config.attention_types`. ''' '''Please verify the value of `config.attention_types` argument.''' ) super().__init__(bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case ) @staticmethod def snake_case_ ( __snake_case : Tuple ) -> str: _a : Optional[int] = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): import torch _a : Dict = input.size() _a : Tuple = len(UpperCamelCase_ ) _a : Tuple = shape[dimension] _a : Union[str, Any] = torch.arange(0 , UpperCamelCase_ , UpperCamelCase_ ) _a : Optional[Any] = torch.div(sizedim - size , UpperCamelCase_ , rounding_mode='''floor''' ) + 1 _a : int = torch.arange(UpperCamelCase_ ) + low_indices[:min_length][:, None] _a : Tuple = [slice(UpperCamelCase_ )] * rank _a : List[str] = indices _a : str = input[s] _a : Optional[int] = list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(UpperCamelCase_ ) def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ ): import torch _a : List[Any] = torch.arange(1 , UpperCamelCase_ ) _a : Dict = torch.remainder(UpperCamelCase_ , UpperCamelCase_ ) _a : Any = remainders == 0 _a : List[str] = candidates[divisor_indices] _a : str = torch.max(UpperCamelCase_ ) return largest_divisor, torch.div(UpperCamelCase_ , UpperCamelCase_ , rounding_mode='''floor''' ) class lowerCamelCase ( SCREAMING_SNAKE_CASE ): @property def snake_case_ ( self : Dict ) -> Mapping[str, Mapping[int, str]]: _a : List[str] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} ) if self.use_past: self.fill_with_past_key_values_(__snake_case , direction='''inputs''' ) _a : Tuple = {0: '''batch''', 1: '''past_sequence + sequence'''} else: _a : Union[str, Any] = {0: '''batch''', 1: '''sequence'''} return common_inputs @property def snake_case_ ( self : Any ) -> int: return self._config.num_heads def snake_case_ ( self : List[Any] , __snake_case : PreTrainedTokenizer , __snake_case : int = -1 , __snake_case : int = -1 , __snake_case : bool = False , __snake_case : Optional[TensorType] = None , ) -> Mapping[str, Any]: _a : Tuple = super(__snake_case , self ).generate_dummy_inputs( __snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case ) # We need to order the input in the way they appears in the forward() _a : Optional[int] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch _a , _a : Any = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values _a : Union[str, Any] = seqlen + 2 _a : Any = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) _a : Optional[Any] = [ (torch.zeros(__snake_case ), torch.zeros(__snake_case )) for _ in range(self.num_layers ) ] _a : Dict = common_inputs['''attention_mask'''] if self.use_past: _a : Optional[Any] = ordered_inputs['''attention_mask'''].dtype _a : str = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(__snake_case , __snake_case , dtype=__snake_case )] , dim=1 ) return ordered_inputs @property def snake_case_ ( self : Any ) -> int: return 13
249
1