code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
import unittest from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class __snake_case : @staticmethod def _SCREAMING_SNAKE_CASE ( *snake_case ,**snake_case ): '''simple docstring''' pass @is_pipeline_test @require_torch @require_vision class __snake_case ( unittest.TestCase ): _a : str= MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ): '''simple docstring''' lowercase : Optional[Any] = pipeline("""visual-question-answering""" ,model="""hf-internal-testing/tiny-vilt-random-vqa""" ) lowercase : Any = [ { """image""": Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ), """question""": """How many cats are there?""", }, { """image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""", """question""": """How many cats are there?""", }, ] return vqa_pipeline, examples def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ): '''simple docstring''' lowercase : str = vqa_pipeline(snake_case ,top_k=1 ) self.assertEqual( snake_case ,[ [{"""score""": ANY(snake_case ), """answer""": ANY(snake_case )}], [{"""score""": ANY(snake_case ), """answer""": ANY(snake_case )}], ] ,) @require_torch def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Union[str, Any] = pipeline("""visual-question-answering""" ,model="""hf-internal-testing/tiny-vilt-random-vqa""" ) lowercase : Union[str, Any] = """./tests/fixtures/tests_samples/COCO/000000039769.png""" lowercase : int = """How many cats are there?""" lowercase : str = vqa_pipeline(image=snake_case ,question="""How many cats are there?""" ,top_k=2 ) self.assertEqual( snake_case ,[{"""score""": ANY(snake_case ), """answer""": ANY(snake_case )}, {"""score""": ANY(snake_case ), """answer""": ANY(snake_case )}] ) lowercase : List[str] = vqa_pipeline({"""image""": image, """question""": question} ,top_k=2 ) self.assertEqual( snake_case ,[{"""score""": ANY(snake_case ), """answer""": ANY(snake_case )}, {"""score""": ANY(snake_case ), """answer""": ANY(snake_case )}] ) @slow @require_torch def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Union[str, Any] = pipeline("""visual-question-answering""" ,model="""dandelin/vilt-b32-finetuned-vqa""" ) lowercase : Tuple = """./tests/fixtures/tests_samples/COCO/000000039769.png""" lowercase : Optional[int] = """How many cats are there?""" lowercase : Any = vqa_pipeline(image=snake_case ,question=snake_case ,top_k=2 ) self.assertEqual( nested_simplify(snake_case ,decimals=4 ) ,[{"""score""": 0.8_799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}] ) lowercase : str = vqa_pipeline({"""image""": image, """question""": question} ,top_k=2 ) self.assertEqual( nested_simplify(snake_case ,decimals=4 ) ,[{"""score""": 0.8_799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}] ) lowercase : Any = vqa_pipeline( [{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] ,top_k=2 ) self.assertEqual( nested_simplify(snake_case ,decimals=4 ) ,[[{"""score""": 0.8_799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}]] * 2 ,) @require_tf @unittest.skip("""Visual question answering not implemented in TF""" ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' pass
20
'''simple docstring''' from __future__ import annotations def snake_case_ (_a : Dict , _a : str , _a : Optional[Any] , _a : List[str] ): # noqa: E741 while r - l > 1: UpperCAmelCase = (l + r) // 2 if v[m] >= key: UpperCAmelCase = m else: UpperCAmelCase = m # noqa: E741 return r def snake_case_ (_a : list[int] ): if len(_a ) == 0: return 0 UpperCAmelCase = [0] * len(_a ) UpperCAmelCase = 1 UpperCAmelCase = v[0] for i in range(1 , len(_a ) ): if v[i] < tail[0]: UpperCAmelCase = v[i] elif v[i] > tail[length - 1]: UpperCAmelCase = v[i] length += 1 else: UpperCAmelCase = v[i] return length if __name__ == "__main__": import doctest doctest.testmod()
34
0
import warnings from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class _lowerCamelCase( _a ): lowercase_ : Dict = ["""image_processor""", """tokenizer"""] lowercase_ : Optional[Any] = """FlavaImageProcessor""" lowercase_ : List[str] = ("""BertTokenizer""", """BertTokenizerFast""") def __init__( self, lowerCamelCase=None, lowerCamelCase=None, **lowerCamelCase) -> List[str]: """simple docstring""" _lowercase : Dict = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.', lowerCamelCase, ) _lowercase : List[Any] = kwargs.pop('feature_extractor') _lowercase : List[Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.') if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.') super().__init__(lowerCamelCase, lowerCamelCase) _lowercase : int = self.image_processor def __call__( self, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = True, lowerCamelCase = False, lowerCamelCase = False, lowerCamelCase = None, lowerCamelCase = 0, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = False, lowerCamelCase = False, lowerCamelCase = False, lowerCamelCase = False, lowerCamelCase = True, lowerCamelCase = None, **lowerCamelCase, ) -> Dict: """simple docstring""" if text is None and images is None: raise ValueError('You have to specify either text or images. Both cannot be none.') if text is not None: _lowercase : Optional[int] = self.tokenizer( text=lowerCamelCase, add_special_tokens=lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=lowerCamelCase, stride=lowerCamelCase, pad_to_multiple_of=lowerCamelCase, return_token_type_ids=lowerCamelCase, return_attention_mask=lowerCamelCase, return_overflowing_tokens=lowerCamelCase, return_special_tokens_mask=lowerCamelCase, return_offsets_mapping=lowerCamelCase, return_length=lowerCamelCase, verbose=lowerCamelCase, return_tensors=lowerCamelCase, **lowerCamelCase, ) if images is not None: _lowercase : str = self.image_processor( lowerCamelCase, return_image_mask=lowerCamelCase, return_codebook_pixels=lowerCamelCase, return_tensors=lowerCamelCase, **lowerCamelCase, ) if text is not None and images is not None: encoding.update(lowerCamelCase) return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**lowerCamelCase), tensor_type=lowerCamelCase) def UpperCamelCase ( self, *lowerCamelCase, **lowerCamelCase) -> str: """simple docstring""" return self.tokenizer.batch_decode(*lowerCamelCase, **lowerCamelCase) def UpperCamelCase ( self, *lowerCamelCase, **lowerCamelCase) -> List[str]: """simple docstring""" return self.tokenizer.decode(*lowerCamelCase, **lowerCamelCase) @property def UpperCamelCase ( self) -> Optional[Any]: """simple docstring""" _lowercase : Dict = self.tokenizer.model_input_names _lowercase : List[str] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) @property def UpperCamelCase ( self) -> Optional[int]: """simple docstring""" warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.', lowerCamelCase, ) return self.image_processor_class @property def UpperCamelCase ( self) -> int: """simple docstring""" warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.', lowerCamelCase, ) return self.image_processor
21
'''simple docstring''' def snake_case_ (_a : str , _a : str ): UpperCAmelCase = len(_a ) + 1 UpperCAmelCase = len(_a ) + 1 # dp is a 2d matrix where dp[i][j] denotes whether prefix string of # length i of input_string matches with prefix string of length j of # given pattern. # "dp" stands for dynamic programming. UpperCAmelCase = [[0 for i in range(_a )] for j in range(_a )] # since string of zero length match pattern of zero length UpperCAmelCase = 1 # since pattern of zero length will never match with string of non-zero length for i in range(1 , _a ): UpperCAmelCase = 0 # since string of zero length will match with pattern where there # is at least one * alternatively for j in range(1 , _a ): UpperCAmelCase = dp[0][j - 2] if pattern[j - 1] == '''*''' else 0 # now using bottom-up approach to find for all remaining lengths for i in range(1 , _a ): for j in range(1 , _a ): if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".": UpperCAmelCase = dp[i - 1][j - 1] elif pattern[j - 1] == "*": if dp[i][j - 2] == 1: UpperCAmelCase = 1 elif pattern[j - 2] in (input_string[i - 1], "."): UpperCAmelCase = dp[i - 1][j] else: UpperCAmelCase = 0 else: UpperCAmelCase = 0 return bool(dp[-1][-1] ) if __name__ == "__main__": import doctest doctest.testmod() # inputing the strings # input_string = input("input a string :") # pattern = input("input a pattern :") A ='aab' A ='c*a*b' # using function to check whether given string matches the given pattern if match_pattern(input_string, pattern): print(f"""{input_string} matches the given pattern {pattern}""") else: print(f"""{input_string} does not match with the given pattern {pattern}""")
34
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tensorflow_text_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __SCREAMING_SNAKE_CASE :Union[str, Any] = { '''configuration_bert''': ['''BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BertConfig''', '''BertOnnxConfig'''], '''tokenization_bert''': ['''BasicTokenizer''', '''BertTokenizer''', '''WordpieceTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE :str = ['''BertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE :Dict = [ '''BERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BertForMaskedLM''', '''BertForMultipleChoice''', '''BertForNextSentencePrediction''', '''BertForPreTraining''', '''BertForQuestionAnswering''', '''BertForSequenceClassification''', '''BertForTokenClassification''', '''BertLayer''', '''BertLMHeadModel''', '''BertModel''', '''BertPreTrainedModel''', '''load_tf_weights_in_bert''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE :List[Any] = [ '''TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFBertEmbeddings''', '''TFBertForMaskedLM''', '''TFBertForMultipleChoice''', '''TFBertForNextSentencePrediction''', '''TFBertForPreTraining''', '''TFBertForQuestionAnswering''', '''TFBertForSequenceClassification''', '''TFBertForTokenClassification''', '''TFBertLMHeadModel''', '''TFBertMainLayer''', '''TFBertModel''', '''TFBertPreTrainedModel''', ] try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE :Any = ['''TFBertTokenizer'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE :Optional[int] = [ '''FlaxBertForCausalLM''', '''FlaxBertForMaskedLM''', '''FlaxBertForMultipleChoice''', '''FlaxBertForNextSentencePrediction''', '''FlaxBertForPreTraining''', '''FlaxBertForQuestionAnswering''', '''FlaxBertForSequenceClassification''', '''FlaxBertForTokenClassification''', '''FlaxBertModel''', '''FlaxBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_fast import BertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bert import ( BERT_PRETRAINED_MODEL_ARCHIVE_LIST, BertForMaskedLM, BertForMultipleChoice, BertForNextSentencePrediction, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, BertLayer, BertLMHeadModel, BertModel, BertPreTrainedModel, load_tf_weights_in_bert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_bert import ( TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFBertEmbeddings, TFBertForMaskedLM, TFBertForMultipleChoice, TFBertForNextSentencePrediction, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertForTokenClassification, TFBertLMHeadModel, TFBertMainLayer, TFBertModel, TFBertPreTrainedModel, ) try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_tf import TFBertTokenizer try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_bert import ( FlaxBertForCausalLM, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, FlaxBertPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE :Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
22
'''simple docstring''' import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, PerceiverTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): A ='pt' elif is_tf_available(): A ='tf' else: A ='jax' class _a ( __a , unittest.TestCase ): __a : Optional[Any] = PerceiverTokenizer __a : str = False def A ( self : Union[str, Any] ): '''simple docstring''' super().setUp() UpperCAmelCase = PerceiverTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def A ( self : Optional[int] ): '''simple docstring''' return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' ) def A ( self : Union[str, Any] , **lowercase : int ): '''simple docstring''' return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase ) def A ( self : Tuple , lowercase : str , lowercase : List[str]=False , lowercase : Union[str, Any]=20 , lowercase : Union[str, Any]=5 ): '''simple docstring''' UpperCAmelCase = [] for i in range(len(lowercase ) ): try: UpperCAmelCase = tokenizer.decode([i] , clean_up_tokenization_spaces=lowercase ) except UnicodeDecodeError: pass toks.append((i, tok) ) UpperCAmelCase = list(filter(lambda lowercase : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , lowercase ) ) UpperCAmelCase = list(filter(lambda lowercase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowercase ) , lowercase ) ) if max_length is not None and len(lowercase ) > max_length: UpperCAmelCase = toks[:max_length] if min_length is not None and len(lowercase ) < min_length and len(lowercase ) > 0: while len(lowercase ) < min_length: UpperCAmelCase = toks + toks # toks_str = [t[1] for t in toks] UpperCAmelCase = [t[0] for t in toks] # Ensure consistency UpperCAmelCase = tokenizer.decode(lowercase , clean_up_tokenization_spaces=lowercase ) if " " not in output_txt and len(lowercase ) > 1: UpperCAmelCase = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowercase ) + ''' ''' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowercase ) ) if with_prefix_space: UpperCAmelCase = ''' ''' + output_txt UpperCAmelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase ) return output_txt, output_ids def A ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase = self.perceiver_tokenizer UpperCAmelCase = '''Unicode €.''' UpperCAmelCase = tokenizer(lowercase ) UpperCAmelCase = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5] self.assertEqual(encoded['''input_ids'''] , lowercase ) # decoding UpperCAmelCase = tokenizer.decode(lowercase ) self.assertEqual(lowercase , '''[CLS]Unicode €.[SEP]''' ) UpperCAmelCase = tokenizer('''e è é ê ë''' ) UpperCAmelCase = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5] self.assertEqual(encoded['''input_ids'''] , lowercase ) # decoding UpperCAmelCase = tokenizer.decode(lowercase ) self.assertEqual(lowercase , '''[CLS]e è é ê ë[SEP]''' ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' ) def A ( self : str ): '''simple docstring''' UpperCAmelCase = self.perceiver_tokenizer UpperCAmelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] # fmt: off UpperCAmelCase = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0] # fmt: on UpperCAmelCase = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase ) self.assertIsInstance(lowercase , lowercase ) if FRAMEWORK != "jax": UpperCAmelCase = list(batch.input_ids.numpy()[0] ) else: UpperCAmelCase = list(batch.input_ids.tolist()[0] ) self.assertListEqual(lowercase , lowercase ) self.assertEqual((2, 38) , batch.input_ids.shape ) self.assertEqual((2, 38) , batch.attention_mask.shape ) def A ( self : str ): '''simple docstring''' UpperCAmelCase = self.perceiver_tokenizer UpperCAmelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] UpperCAmelCase = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase ) # check if input_ids are returned and no decoder_input_ids self.assertIn('''input_ids''' , lowercase ) self.assertIn('''attention_mask''' , lowercase ) self.assertNotIn('''decoder_input_ids''' , lowercase ) self.assertNotIn('''decoder_attention_mask''' , lowercase ) def A ( self : Dict ): '''simple docstring''' UpperCAmelCase = self.perceiver_tokenizer UpperCAmelCase = [ '''Summary of the text.''', '''Another summary.''', ] UpperCAmelCase = tokenizer( text_target=lowercase , max_length=32 , padding='''max_length''' , truncation=lowercase , return_tensors=lowercase ) self.assertEqual(32 , targets['''input_ids'''].shape[1] ) def A ( self : int ): '''simple docstring''' UpperCAmelCase = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test UpperCAmelCase = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): # Isolate this from the other tests because we save additional tokens/etc UpperCAmelCase = tempfile.mkdtemp() UpperCAmelCase = ''' He is very happy, UNwant\u00E9d,running''' UpperCAmelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase ) tokenizer.save_pretrained(lowercase ) UpperCAmelCase = tokenizer.__class__.from_pretrained(lowercase ) UpperCAmelCase = after_tokenizer.encode(lowercase , add_special_tokens=lowercase ) self.assertListEqual(lowercase , lowercase ) shutil.rmtree(lowercase ) UpperCAmelCase = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): # Isolate this from the other tests because we save additional tokens/etc UpperCAmelCase = tempfile.mkdtemp() UpperCAmelCase = ''' He is very happy, UNwant\u00E9d,running''' tokenizer.add_tokens(['''bim''', '''bambam'''] ) UpperCAmelCase = tokenizer.additional_special_tokens additional_special_tokens.append('''new_additional_special_token''' ) tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} ) UpperCAmelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase ) tokenizer.save_pretrained(lowercase ) UpperCAmelCase = tokenizer.__class__.from_pretrained(lowercase ) UpperCAmelCase = after_tokenizer.encode(lowercase , add_special_tokens=lowercase ) self.assertListEqual(lowercase , lowercase ) self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) UpperCAmelCase = tokenizer.__class__.from_pretrained(lowercase , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(lowercase ) def A ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(lowercase ) with open(os.path.join(lowercase , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file: UpperCAmelCase = json.load(lowercase ) with open(os.path.join(lowercase , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file: UpperCAmelCase = json.load(lowercase ) UpperCAmelCase = [f"<extra_id_{i}>" for i in range(125 )] UpperCAmelCase = added_tokens_extra_ids + [ '''an_additional_special_token''' ] UpperCAmelCase = added_tokens_extra_ids + [ '''an_additional_special_token''' ] with open(os.path.join(lowercase , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile: json.dump(lowercase , lowercase ) with open(os.path.join(lowercase , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile: json.dump(lowercase , lowercase ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files UpperCAmelCase = tokenizer_class.from_pretrained( lowercase , ) self.assertIn( '''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens ) self.assertEqual( ['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained UpperCAmelCase = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=lowercase )] UpperCAmelCase = tokenizer_class.from_pretrained( lowercase , additional_special_tokens=lowercase , ) self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens ) self.assertEqual( ['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , ) def A ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase = self.perceiver_tokenizer self.assertEqual(tokenizer.decode([178] ) , '''�''' ) def A ( self : Union[str, Any] ): '''simple docstring''' pass def A ( self : Any ): '''simple docstring''' pass def A ( self : Dict ): '''simple docstring''' pass def A ( self : str ): '''simple docstring''' pass def A ( self : List[str] ): '''simple docstring''' UpperCAmelCase = self.get_tokenizers(fast=lowercase , do_lower_case=lowercase ) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): UpperCAmelCase = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]'''] UpperCAmelCase = tokenizer.convert_tokens_to_string(lowercase ) self.assertIsInstance(lowercase , lowercase )
34
0
'''simple docstring''' def snake_case_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : str ) -> list[int]: UpperCAmelCase : Optional[Any] = int(_lowerCAmelCase ) # Initialize Result UpperCAmelCase : List[Any] = [] # Traverse through all denomination for denomination in reversed(_lowerCAmelCase ): # Find denominations while int(_lowerCAmelCase ) >= int(_lowerCAmelCase ): total_value -= int(_lowerCAmelCase ) answer.append(_lowerCAmelCase ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": UpperCamelCase__: int = [] UpperCamelCase__: Optional[Any] = "0" if ( input("Do you want to enter your denominations ? (yY/n): ").strip().lower() == "y" ): UpperCamelCase__: int = int(input("Enter the number of denominations you want to add: ").strip()) for i in range(0, n): denominations.append(int(input(F"Denomination {i}: ").strip())) UpperCamelCase__: int = input("Enter the change you want to make in Indian Currency: ").strip() else: # All denominations of Indian Currency if user does not enter UpperCamelCase__: Any = [1, 2, 5, 10, 20, 50, 100, 500, 2000] UpperCamelCase__: Any = input("Enter the change you want to make: ").strip() if int(value) == 0 or int(value) < 0: print("The total value cannot be zero or negative.") else: print(F"Following is minimal change for {value}: ") UpperCamelCase__: Union[str, Any] = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=" ")
23
'''simple docstring''' import os from distutils.util import strtobool def snake_case_ (_a : Union[str, Any] , _a : List[Any] ): for e in env_keys: UpperCAmelCase = int(os.environ.get(_a , -1 ) ) if val >= 0: return val return default def snake_case_ (_a : Dict , _a : Any=False ): UpperCAmelCase = os.environ.get(_a , str(_a ) ) return strtobool(_a ) == 1 # As its name indicates `strtobool` actually returns an int... def snake_case_ (_a : str , _a : Optional[Any]="no" ): UpperCAmelCase = os.environ.get(_a , str(_a ) ) return value
34
0
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def a (self : Tuple ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() @property def a (self : Optional[Any] ): """simple docstring""" __snake_case = 1 __snake_case = 3 __snake_case = (32, 32) __snake_case = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(a__ ) return image @property def a (self : List[str] ): """simple docstring""" torch.manual_seed(0 ) __snake_case = UNetaDConditionModel( block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=a__ , only_cross_attention=(True, True, False) , num_class_embeds=100 , ) return model @property def a (self : List[str] ): """simple docstring""" torch.manual_seed(0 ) __snake_case = AutoencoderKL( block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) return model @property def a (self : int ): """simple docstring""" torch.manual_seed(0 ) __snake_case = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , ) return CLIPTextModel(a__ ) def a (self : Tuple ): """simple docstring""" __snake_case = '''cpu''' # ensure determinism for the device-dependent torch.Generator __snake_case = self.dummy_cond_unet_upscale __snake_case = DDPMScheduler() __snake_case = DDIMScheduler(prediction_type='''v_prediction''' ) __snake_case = self.dummy_vae __snake_case = self.dummy_text_encoder __snake_case = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) __snake_case = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] __snake_case = Image.fromarray(np.uinta(a__ ) ).convert('''RGB''' ).resize((64, 64) ) # make sure here that pndm scheduler skips prk __snake_case = StableDiffusionUpscalePipeline( unet=a__ , low_res_scheduler=a__ , scheduler=a__ , vae=a__ , text_encoder=a__ , tokenizer=a__ , max_noise_level=350 , ) __snake_case = sd_pipe.to(a__ ) sd_pipe.set_progress_bar_config(disable=a__ ) __snake_case = '''A painting of a squirrel eating a burger''' __snake_case = torch.Generator(device=a__ ).manual_seed(0 ) __snake_case = sd_pipe( [prompt] , image=a__ , generator=a__ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , ) __snake_case = output.images __snake_case = torch.Generator(device=a__ ).manual_seed(0 ) __snake_case = sd_pipe( [prompt] , image=a__ , generator=a__ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , return_dict=a__ , )[0] __snake_case = image[0, -3:, -3:, -1] __snake_case = image_from_tuple[0, -3:, -3:, -1] __snake_case = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) __snake_case = np.array([0.3_1_1_3, 0.3_9_1_0, 0.4_2_7_2, 0.4_8_5_9, 0.5_0_6_1, 0.4_6_5_2, 0.5_3_6_2, 0.5_7_1_5, 0.5_6_6_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def a (self : str ): """simple docstring""" __snake_case = '''cpu''' # ensure determinism for the device-dependent torch.Generator __snake_case = self.dummy_cond_unet_upscale __snake_case = DDPMScheduler() __snake_case = DDIMScheduler(prediction_type='''v_prediction''' ) __snake_case = self.dummy_vae __snake_case = self.dummy_text_encoder __snake_case = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) __snake_case = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] __snake_case = Image.fromarray(np.uinta(a__ ) ).convert('''RGB''' ).resize((64, 64) ) # make sure here that pndm scheduler skips prk __snake_case = StableDiffusionUpscalePipeline( unet=a__ , low_res_scheduler=a__ , scheduler=a__ , vae=a__ , text_encoder=a__ , tokenizer=a__ , max_noise_level=350 , ) __snake_case = sd_pipe.to(a__ ) sd_pipe.set_progress_bar_config(disable=a__ ) __snake_case = '''A painting of a squirrel eating a burger''' __snake_case = sd_pipe( 2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , ) __snake_case = output.images assert image.shape[0] == 2 __snake_case = torch.Generator(device=a__ ).manual_seed(0 ) __snake_case = sd_pipe( [prompt] , image=a__ , generator=a__ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , ) __snake_case = output.images assert image.shape[0] == 2 @unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' ) def a (self : List[Any] ): """simple docstring""" __snake_case = self.dummy_cond_unet_upscale __snake_case = DDPMScheduler() __snake_case = DDIMScheduler(prediction_type='''v_prediction''' ) __snake_case = self.dummy_vae __snake_case = self.dummy_text_encoder __snake_case = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) __snake_case = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] __snake_case = Image.fromarray(np.uinta(a__ ) ).convert('''RGB''' ).resize((64, 64) ) # put models in fp16, except vae as it overflows in fp16 __snake_case = unet.half() __snake_case = text_encoder.half() # make sure here that pndm scheduler skips prk __snake_case = StableDiffusionUpscalePipeline( unet=a__ , low_res_scheduler=a__ , scheduler=a__ , vae=a__ , text_encoder=a__ , tokenizer=a__ , max_noise_level=350 , ) __snake_case = sd_pipe.to(a__ ) sd_pipe.set_progress_bar_config(disable=a__ ) __snake_case = '''A painting of a squirrel eating a burger''' __snake_case = torch.manual_seed(0 ) __snake_case = sd_pipe( [prompt] , image=a__ , generator=a__ , num_inference_steps=2 , output_type='''np''' , ).images __snake_case = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) @slow @require_torch_gpu class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def a (self : Optional[int] ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def a (self : List[Any] ): """simple docstring""" __snake_case = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-upscale/low_res_cat.png''' ) __snake_case = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale''' '''/upsampled_cat.npy''' ) __snake_case = '''stabilityai/stable-diffusion-x4-upscaler''' __snake_case = StableDiffusionUpscalePipeline.from_pretrained(a__ ) pipe.to(a__ ) pipe.set_progress_bar_config(disable=a__ ) pipe.enable_attention_slicing() __snake_case = '''a cat sitting on a park bench''' __snake_case = torch.manual_seed(0 ) __snake_case = pipe( prompt=a__ , image=a__ , generator=a__ , output_type='''np''' , ) __snake_case = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 1E-3 def a (self : int ): """simple docstring""" __snake_case = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-upscale/low_res_cat.png''' ) __snake_case = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale''' '''/upsampled_cat_fp16.npy''' ) __snake_case = '''stabilityai/stable-diffusion-x4-upscaler''' __snake_case = StableDiffusionUpscalePipeline.from_pretrained( a__ , torch_dtype=torch.floataa , ) pipe.to(a__ ) pipe.set_progress_bar_config(disable=a__ ) pipe.enable_attention_slicing() __snake_case = '''a cat sitting on a park bench''' __snake_case = torch.manual_seed(0 ) __snake_case = pipe( prompt=a__ , image=a__ , generator=a__ , output_type='''np''' , ) __snake_case = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 5E-1 def a (self : str ): """simple docstring""" torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __snake_case = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-upscale/low_res_cat.png''' ) __snake_case = '''stabilityai/stable-diffusion-x4-upscaler''' __snake_case = StableDiffusionUpscalePipeline.from_pretrained( a__ , torch_dtype=torch.floataa , ) pipe.to(a__ ) pipe.set_progress_bar_config(disable=a__ ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() __snake_case = '''a cat sitting on a park bench''' __snake_case = torch.manual_seed(0 ) __snake_case = pipe( prompt=a__ , image=a__ , generator=a__ , num_inference_steps=5 , output_type='''np''' , ) __snake_case = torch.cuda.max_memory_allocated() # make sure that less than 2.9 GB is allocated assert mem_bytes < 2.9 * 10**9
24
'''simple docstring''' import argparse import logging from collections import namedtuple import torch from model_bertabs import BertAbsSummarizer from models.model_builder import AbsSummarizer # The authors' implementation from transformers import BertTokenizer logging.basicConfig(level=logging.INFO) A =logging.getLogger(__name__) A ='Hello world! cécé herlolip' A =namedtuple( 'BertAbsConfig', [ 'temp_dir', 'large', 'use_bert_emb', 'finetune_bert', 'encoder', 'share_emb', 'max_pos', 'enc_layers', 'enc_hidden_size', 'enc_heads', 'enc_ff_size', 'enc_dropout', 'dec_layers', 'dec_hidden_size', 'dec_heads', 'dec_ff_size', 'dec_dropout', ], ) def snake_case_ (_a : List[Any] , _a : Any ): UpperCAmelCase = BertAbsConfig( temp_dir='''.''' , finetune_bert=_a , large=_a , share_emb=_a , use_bert_emb=_a , encoder='''bert''' , max_pos=5_1_2 , enc_layers=6 , enc_hidden_size=5_1_2 , enc_heads=8 , enc_ff_size=5_1_2 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_6_8 , dec_heads=8 , dec_ff_size=2_0_4_8 , dec_dropout=0.2 , ) UpperCAmelCase = torch.load(_a , lambda _a , _a : storage ) UpperCAmelCase = AbsSummarizer(_a , torch.device('''cpu''' ) , _a ) original.eval() UpperCAmelCase = BertAbsSummarizer(_a , torch.device('''cpu''' ) ) new_model.eval() # ------------------- # Convert the weights # ------------------- logging.info('''convert the model''' ) new_model.bert.load_state_dict(original.bert.state_dict() ) new_model.decoder.load_state_dict(original.decoder.state_dict() ) new_model.generator.load_state_dict(original.generator.state_dict() ) # ---------------------------------- # Make sure the outpus are identical # ---------------------------------- logging.info('''Make sure that the models\' outputs are identical''' ) UpperCAmelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' ) # prepare the model inputs UpperCAmelCase = tokenizer.encode('''This is sample éàalj\'-.''' ) encoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(_a )) ) UpperCAmelCase = torch.tensor(_a ).unsqueeze(0 ) UpperCAmelCase = tokenizer.encode('''This is sample 3 éàalj\'-.''' ) decoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(_a )) ) UpperCAmelCase = torch.tensor(_a ).unsqueeze(0 ) # failsafe to make sure the weights reset does not affect the # loaded weights. assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0 # forward pass UpperCAmelCase = encoder_input_ids UpperCAmelCase = decoder_input_ids UpperCAmelCase = UpperCAmelCase = None UpperCAmelCase = None UpperCAmelCase = UpperCAmelCase = None UpperCAmelCase = UpperCAmelCase = None UpperCAmelCase = None # The original model does not apply the geneator layer immediatly but rather in # the beam search (where it combines softmax + linear layer). Since we already # apply the softmax in our generation process we only apply the linear layer here. # We make sure that the outputs of the full stack are identical UpperCAmelCase = original(_a , _a , _a , _a , _a , _a , _a )[0] UpperCAmelCase = original.generator(_a ) UpperCAmelCase = new_model( _a , _a , _a , _a , _a )[0] UpperCAmelCase = new_model.generator(_a ) UpperCAmelCase = torch.max(torch.abs(output_converted_model - output_original_model ) ).item() print('''Maximum absolute difference beween weights: {:.2f}'''.format(_a ) ) UpperCAmelCase = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item() print('''Maximum absolute difference beween weights: {:.2f}'''.format(_a ) ) UpperCAmelCase = torch.allclose(_a , _a , atol=1E-3 ) if are_identical: logging.info('''all weights are equal up to 1e-3''' ) else: raise ValueError('''the weights are different. The new model is likely different from the original one.''' ) # The model has been saved with torch.save(model) and this is bound to the exact # directory structure. We save the state_dict instead. logging.info('''saving the model\'s state dictionary''' ) torch.save( new_model.state_dict() , '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''' ) if __name__ == "__main__": A =argparse.ArgumentParser() parser.add_argument( '--bertabs_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.', ) A =parser.parse_args() convert_bertabs_checkpoints( args.bertabs_checkpoint_path, args.pytorch_dump_folder_path, )
34
0
"""simple docstring""" import json import os import re import unittest from transformers import CodeGenTokenizer, CodeGenTokenizerFast from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCAmelCase_ (a__ , unittest.TestCase ): """simple docstring""" __UpperCamelCase : List[str] = CodeGenTokenizer __UpperCamelCase : int = CodeGenTokenizerFast __UpperCamelCase : List[str] = True __UpperCamelCase : Optional[Any] = {'''add_prefix_space''': True} __UpperCamelCase : str = False def __magic_name__ (self ) -> Union[str, Any]: """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt SCREAMING_SNAKE_CASE__ : List[Any] = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", """<|endoftext|>""", ] SCREAMING_SNAKE_CASE__ : Any = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) ) SCREAMING_SNAKE_CASE__ : Dict = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] SCREAMING_SNAKE_CASE__ : List[str] = {"""unk_token""": """<unk>"""} SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) SCREAMING_SNAKE_CASE__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(SCREAMING_SNAKE_CASE__ ) ) def __magic_name__ (self , **SCREAMING_SNAKE_CASE__ ) -> Dict: """simple docstring""" kwargs.update(self.special_tokens_map ) return CodeGenTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , **SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]: """simple docstring""" kwargs.update(self.special_tokens_map ) return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = """lower newer""" SCREAMING_SNAKE_CASE__ : Optional[Any] = """lower newer""" return input_text, output_text def __magic_name__ (self ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) SCREAMING_SNAKE_CASE__ : int = """lower newer""" SCREAMING_SNAKE_CASE__ : Dict = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""] SCREAMING_SNAKE_CASE__ : int = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : List[Any] = tokens + [tokenizer.unk_token] SCREAMING_SNAKE_CASE__ : Tuple = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self ) -> Union[str, Any]: """simple docstring""" if not self.test_rust_tokenizer: return SCREAMING_SNAKE_CASE__ : Any = self.get_tokenizer() SCREAMING_SNAKE_CASE__ : str = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Tuple = """lower newer""" # Testing tokenization SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : List[str] = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Testing conversion to ids without special tokens SCREAMING_SNAKE_CASE__ : int = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Dict = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Testing conversion to ids with special tokens SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Dict = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : List[Any] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Testing the unknown token SCREAMING_SNAKE_CASE__ : str = tokens + [rust_tokenizer.unk_token] SCREAMING_SNAKE_CASE__ : Optional[Any] = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Dict: """simple docstring""" pass def __magic_name__ (self , SCREAMING_SNAKE_CASE__=15 ) -> List[str]: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): SCREAMING_SNAKE_CASE__ : Tuple = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) # Simple input SCREAMING_SNAKE_CASE__ : Any = """This is a simple input""" SCREAMING_SNAKE_CASE__ : Tuple = ["""This is a simple input 1""", """This is a simple input 2"""] SCREAMING_SNAKE_CASE__ : Dict = ("""This is a simple input""", """This is a pair""") SCREAMING_SNAKE_CASE__ : Optional[int] = [ ("""This is a simple input 1""", """This is a simple input 2"""), ("""This is a simple pair 1""", """This is a simple pair 2"""), ] # Simple input tests self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="""max_length""" ) # Simple input self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="""max_length""" ) # Simple input self.assertRaises( SCREAMING_SNAKE_CASE__ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="""max_length""" , ) # Pair input self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="""max_length""" ) # Pair input self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="""max_length""" ) # Pair input self.assertRaises( SCREAMING_SNAKE_CASE__ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="""max_length""" , ) def __magic_name__ (self ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" ) # Simple input SCREAMING_SNAKE_CASE__ : str = """This is a simple input""" SCREAMING_SNAKE_CASE__ : int = ["""This is a simple input looooooooong""", """This is a simple input"""] SCREAMING_SNAKE_CASE__ : str = ("""This is a simple input""", """This is a pair""") SCREAMING_SNAKE_CASE__ : List[Any] = [ ("""This is a simple input loooooong""", """This is a simple input"""), ("""This is a simple pair loooooong""", """This is a simple pair"""), ] SCREAMING_SNAKE_CASE__ : Dict = tokenizer.pad_token_id SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer(SCREAMING_SNAKE_CASE__ , padding="""max_length""" , max_length=30 , return_tensors="""np""" ) SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncate=SCREAMING_SNAKE_CASE__ , return_tensors="""np""" ) SCREAMING_SNAKE_CASE__ : Dict = tokenizer(*SCREAMING_SNAKE_CASE__ , padding="""max_length""" , max_length=60 , return_tensors="""np""" ) SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncate=SCREAMING_SNAKE_CASE__ , return_tensors="""np""" ) # s # test single string max_length padding self.assertEqual(out_s["""input_ids"""].shape[-1] , 30 ) self.assertTrue(pad_token_id in out_s["""input_ids"""] ) self.assertTrue(0 in out_s["""attention_mask"""] ) # s2 # test automatic padding self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] ) self.assertFalse(0 in out_sa["""attention_mask"""][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] ) self.assertTrue(0 in out_sa["""attention_mask"""][1] ) # p # test single pair max_length padding self.assertEqual(out_p["""input_ids"""].shape[-1] , 60 ) self.assertTrue(pad_token_id in out_p["""input_ids"""] ) self.assertTrue(0 in out_p["""attention_mask"""] ) # p2 # test automatic padding pair self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] ) self.assertFalse(0 in out_pa["""attention_mask"""][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] ) self.assertTrue(0 in out_pa["""attention_mask"""][1] ) def __magic_name__ (self ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = """$$$""" SCREAMING_SNAKE_CASE__ : Optional[int] = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=SCREAMING_SNAKE_CASE__ , add_bos_token=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[int] = """This is a simple input""" SCREAMING_SNAKE_CASE__ : List[str] = ["""This is a simple input 1""", """This is a simple input 2"""] SCREAMING_SNAKE_CASE__ : Dict = tokenizer.bos_token_id SCREAMING_SNAKE_CASE__ : Any = tokenizer(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Any = tokenizer(SCREAMING_SNAKE_CASE__ ) self.assertEqual(out_s.input_ids[0] , SCREAMING_SNAKE_CASE__ ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.decode(out_s.input_ids ) SCREAMING_SNAKE_CASE__ : int = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , SCREAMING_SNAKE_CASE__ ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) @slow def __magic_name__ (self ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = CodeGenTokenizer.from_pretrained("""Salesforce/codegen-350M-mono""" ) SCREAMING_SNAKE_CASE__ : str = """\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#""" SCREAMING_SNAKE_CASE__ : int = """\nif len_a > len_b: result = a\nelse: result = b""" SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.encode(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : List[str] = ["""^#""", re.escape("""<|endoftext|>""" ), """^'''""", """^\"\"\"""", """\n\n\n"""] SCREAMING_SNAKE_CASE__ : Any = tokenizer.decode(SCREAMING_SNAKE_CASE__ , truncate_before_pattern=SCREAMING_SNAKE_CASE__ ) self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self ) -> Dict: """simple docstring""" pass
25
'''simple docstring''' from ..utils import DummyObject, requires_backends class _a ( metaclass=__a ): __a : int = ["""flax""", """transformers"""] def __init__( self : Optional[Any] , *lowercase : str , **lowercase : List[Any] ): '''simple docstring''' requires_backends(self , ['''flax''', '''transformers'''] ) @classmethod def A ( cls : Union[str, Any] , *lowercase : List[Any] , **lowercase : List[str] ): '''simple docstring''' requires_backends(cls , ['''flax''', '''transformers'''] ) @classmethod def A ( cls : Union[str, Any] , *lowercase : Tuple , **lowercase : int ): '''simple docstring''' requires_backends(cls , ['''flax''', '''transformers'''] ) class _a ( metaclass=__a ): __a : int = ["""flax""", """transformers"""] def __init__( self : int , *lowercase : Tuple , **lowercase : Dict ): '''simple docstring''' requires_backends(self , ['''flax''', '''transformers'''] ) @classmethod def A ( cls : List[str] , *lowercase : Optional[int] , **lowercase : List[Any] ): '''simple docstring''' requires_backends(cls , ['''flax''', '''transformers'''] ) @classmethod def A ( cls : Dict , *lowercase : Union[str, Any] , **lowercase : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ['''flax''', '''transformers'''] ) class _a ( metaclass=__a ): __a : int = ["""flax""", """transformers"""] def __init__( self : Optional[int] , *lowercase : Union[str, Any] , **lowercase : Any ): '''simple docstring''' requires_backends(self , ['''flax''', '''transformers'''] ) @classmethod def A ( cls : Union[str, Any] , *lowercase : Tuple , **lowercase : Any ): '''simple docstring''' requires_backends(cls , ['''flax''', '''transformers'''] ) @classmethod def A ( cls : Any , *lowercase : Dict , **lowercase : Optional[Any] ): '''simple docstring''' requires_backends(cls , ['''flax''', '''transformers'''] ) class _a ( metaclass=__a ): __a : Any = ["""flax""", """transformers"""] def __init__( self : Any , *lowercase : Optional[Any] , **lowercase : Optional[int] ): '''simple docstring''' requires_backends(self , ['''flax''', '''transformers'''] ) @classmethod def A ( cls : Dict , *lowercase : Optional[Any] , **lowercase : Optional[Any] ): '''simple docstring''' requires_backends(cls , ['''flax''', '''transformers'''] ) @classmethod def A ( cls : Union[str, Any] , *lowercase : str , **lowercase : Any ): '''simple docstring''' requires_backends(cls , ['''flax''', '''transformers'''] )
34
0
import os import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from huggingface_hub.file_download import http_get from requests.exceptions import HTTPError from transformers import ( AlbertTokenizer, AutoTokenizer, BertTokenizer, BertTokenizerFast, GPTaTokenizerFast, is_tokenizers_available, ) from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers from transformers.tokenization_utils import Trie sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class lowercase ( unittest.TestCase ): def a__ ( self ) -> Any: # A mock response for an HTTP head request to emulate server down _A : Optional[int] = mock.Mock() _A : Optional[Any] = 500 _A : Dict = {} _A : Union[str, Any] = HTTPError _A : List[Any] = {} # Download this model to make sure it's in the cache. _A : int = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("""requests.Session.request""" , return_value=_a ) as mock_head: _A : List[Any] = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) # This check we did call the fake head request mock_head.assert_called() @require_tokenizers def a__ ( self ) -> Optional[Any]: # A mock response for an HTTP head request to emulate server down _A : str = mock.Mock() _A : Any = 500 _A : Optional[int] = {} _A : List[str] = HTTPError _A : int = {} # Download this model to make sure it's in the cache. _A : int = GPTaTokenizerFast.from_pretrained("""gpt2""" ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("""requests.Session.request""" , return_value=_a ) as mock_head: _A : str = GPTaTokenizerFast.from_pretrained("""gpt2""" ) # This check we did call the fake head request mock_head.assert_called() def a__ ( self ) -> Optional[int]: # This test is for deprecated behavior and can be removed in v5 try: _A : Tuple = tempfile.mktemp() with open(_a , """wb""" ) as f: http_get("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" , _a ) _A : str = AlbertTokenizer.from_pretrained(_a ) finally: os.remove(_a ) # Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in # the current folder and have the right name. if os.path.isfile("""tokenizer.json""" ): # We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it. return try: with open("""tokenizer.json""" , """wb""" ) as f: http_get("""https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json""" , _a ) _A : Optional[int] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) # The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000 self.assertEqual(tokenizer.vocab_size , 1000 ) # Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file. finally: os.remove("""tokenizer.json""" ) def a__ ( self ) -> Union[str, Any]: # This test is for deprecated behavior and can be removed in v5 _A : Dict = AlbertTokenizer.from_pretrained("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" ) @is_staging_test class lowercase ( unittest.TestCase ): _a = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"] @classmethod def a__ ( cls ) -> Tuple: _A : int = TOKEN HfFolder.save_token(_a ) @classmethod def a__ ( cls ) -> List[Any]: try: delete_repo(token=cls._token , repo_id="""test-tokenizer""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""valid_org/test-tokenizer-org""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""test-dynamic-tokenizer""" ) except HTTPError: pass def a__ ( self ) -> Dict: with tempfile.TemporaryDirectory() as tmp_dir: _A : Optional[Any] = os.path.join(_a , """vocab.txt""" ) with open(_a , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) _A : Optional[Any] = BertTokenizer(_a ) tokenizer.push_to_hub("""test-tokenizer""" , use_auth_token=self._token ) _A : List[Any] = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) # Reset repo delete_repo(token=self._token , repo_id="""test-tokenizer""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(_a , repo_id="""test-tokenizer""" , push_to_hub=_a , use_auth_token=self._token ) _A : List[str] = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) def a__ ( self ) -> Optional[int]: with tempfile.TemporaryDirectory() as tmp_dir: _A : Optional[int] = os.path.join(_a , """vocab.txt""" ) with open(_a , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) _A : Tuple = BertTokenizer(_a ) tokenizer.push_to_hub("""valid_org/test-tokenizer-org""" , use_auth_token=self._token ) _A : List[Any] = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) # Reset repo delete_repo(token=self._token , repo_id="""valid_org/test-tokenizer-org""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained( _a , repo_id="""valid_org/test-tokenizer-org""" , push_to_hub=_a , use_auth_token=self._token ) _A : Dict = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) @require_tokenizers def a__ ( self ) -> str: CustomTokenizer.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: _A : Union[str, Any] = os.path.join(_a , """vocab.txt""" ) with open(_a , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) _A : str = CustomTokenizer(_a ) # No fast custom tokenizer tokenizer.push_to_hub("""test-dynamic-tokenizer""" , use_auth_token=self._token ) _A : Optional[int] = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=_a ) # Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , """CustomTokenizer""" ) # Fast and slow custom tokenizer CustomTokenizerFast.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: _A : Any = os.path.join(_a , """vocab.txt""" ) with open(_a , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) _A : Dict = BertTokenizerFast.from_pretrained(_a ) bert_tokenizer.save_pretrained(_a ) _A : Dict = CustomTokenizerFast.from_pretrained(_a ) tokenizer.push_to_hub("""test-dynamic-tokenizer""" , use_auth_token=self._token ) _A : Tuple = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=_a ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , """CustomTokenizerFast""" ) _A : Tuple = AutoTokenizer.from_pretrained( F'''{USER}/test-dynamic-tokenizer''' , use_fast=_a , trust_remote_code=_a ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , """CustomTokenizer""" ) class lowercase ( unittest.TestCase ): def a__ ( self ) -> List[Any]: _A : Optional[Any] = Trie() trie.add("""Hello 友達""" ) self.assertEqual(trie.data , {"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {""" """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} ) trie.add("""Hello""" ) trie.data self.assertEqual(trie.data , {"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {"""""": 1, """ """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} ) def a__ ( self ) -> Union[str, Any]: _A : List[Any] = Trie() self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) , ["""[CLS] This is a extra_id_100"""] ) trie.add("""[CLS]""" ) trie.add("""extra_id_1""" ) trie.add("""extra_id_100""" ) self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) , ["""[CLS]""", """ This is a """, """extra_id_100"""] ) def a__ ( self ) -> Dict: _A : List[Any] = Trie() trie.add("""A""" ) self.assertEqual(trie.split("""ABC""" ) , ["""A""", """BC"""] ) self.assertEqual(trie.split("""BCA""" ) , ["""BC""", """A"""] ) def a__ ( self ) -> Dict: _A : Dict = Trie() trie.add("""TOKEN]""" ) trie.add("""[SPECIAL_TOKEN]""" ) self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) , ["""This is something """, """[SPECIAL_TOKEN]"""] ) def a__ ( self ) -> List[Any]: _A : Optional[Any] = Trie() trie.add("""A""" ) trie.add("""P""" ) trie.add("""[SPECIAL_TOKEN]""" ) self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) , ["""This is something """, """[SPECIAL_TOKEN]"""] ) def a__ ( self ) -> List[Any]: _A : Dict = Trie() trie.add("""AB""" ) trie.add("""B""" ) trie.add("""C""" ) self.assertEqual(trie.split("""ABC""" ) , ["""AB""", """C"""] ) def a__ ( self ) -> int: _A : Any = Trie() trie.add("""ABC""" ) trie.add("""B""" ) trie.add("""CD""" ) self.assertEqual(trie.split("""ABCD""" ) , ["""ABC""", """D"""] ) def a__ ( self ) -> List[Any]: # Even if the offsets are wrong, we necessarily output correct string # parts. _A : Tuple = Trie() _A : int = trie.cut_text("""ABC""" , [0, 0, 2, 1, 2, 3] ) self.assertEqual(_a , ["""AB""", """C"""] )
26
'''simple docstring''' import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() A =logging.get_logger(__name__) A ={ 'b0': efficientnet.EfficientNetBa, 'b1': efficientnet.EfficientNetBa, 'b2': efficientnet.EfficientNetBa, 'b3': efficientnet.EfficientNetBa, 'b4': efficientnet.EfficientNetBa, 'b5': efficientnet.EfficientNetBa, 'b6': efficientnet.EfficientNetBa, 'b7': efficientnet.EfficientNetBa, } A ={ 'b0': { 'hidden_dim': 12_80, 'width_coef': 1.0, 'depth_coef': 1.0, 'image_size': 2_24, 'dropout_rate': 0.2, 'dw_padding': [], }, 'b1': { 'hidden_dim': 12_80, 'width_coef': 1.0, 'depth_coef': 1.1, 'image_size': 2_40, 'dropout_rate': 0.2, 'dw_padding': [16], }, 'b2': { 'hidden_dim': 14_08, 'width_coef': 1.1, 'depth_coef': 1.2, 'image_size': 2_60, 'dropout_rate': 0.3, 'dw_padding': [5, 8, 16], }, 'b3': { 'hidden_dim': 15_36, 'width_coef': 1.2, 'depth_coef': 1.4, 'image_size': 3_00, 'dropout_rate': 0.3, 'dw_padding': [5, 18], }, 'b4': { 'hidden_dim': 17_92, 'width_coef': 1.4, 'depth_coef': 1.8, 'image_size': 3_80, 'dropout_rate': 0.4, 'dw_padding': [6], }, 'b5': { 'hidden_dim': 20_48, 'width_coef': 1.6, 'depth_coef': 2.2, 'image_size': 4_56, 'dropout_rate': 0.4, 'dw_padding': [13, 27], }, 'b6': { 'hidden_dim': 23_04, 'width_coef': 1.8, 'depth_coef': 2.6, 'image_size': 5_28, 'dropout_rate': 0.5, 'dw_padding': [31], }, 'b7': { 'hidden_dim': 25_60, 'width_coef': 2.0, 'depth_coef': 3.1, 'image_size': 6_00, 'dropout_rate': 0.5, 'dw_padding': [18], }, } def snake_case_ (_a : List[str] ): UpperCAmelCase = EfficientNetConfig() UpperCAmelCase = CONFIG_MAP[model_name]['''hidden_dim'''] UpperCAmelCase = CONFIG_MAP[model_name]['''width_coef'''] UpperCAmelCase = CONFIG_MAP[model_name]['''depth_coef'''] UpperCAmelCase = CONFIG_MAP[model_name]['''image_size'''] UpperCAmelCase = CONFIG_MAP[model_name]['''dropout_rate'''] UpperCAmelCase = CONFIG_MAP[model_name]['''dw_padding'''] UpperCAmelCase = '''huggingface/label-files''' UpperCAmelCase = '''imagenet-1k-id2label.json''' UpperCAmelCase = 1_0_0_0 UpperCAmelCase = json.load(open(hf_hub_download(_a , _a , repo_type='''dataset''' ) , '''r''' ) ) UpperCAmelCase = {int(_a ): v for k, v in idalabel.items()} UpperCAmelCase = idalabel UpperCAmelCase = {v: k for k, v in idalabel.items()} return config def snake_case_ (): UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg''' UpperCAmelCase = Image.open(requests.get(_a , stream=_a ).raw ) return im def snake_case_ (_a : str ): UpperCAmelCase = CONFIG_MAP[model_name]['''image_size'''] UpperCAmelCase = EfficientNetImageProcessor( size={'''height''': size, '''width''': size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.4785_3944, 0.473_2864, 0.4743_4163] , do_center_crop=_a , ) return preprocessor def snake_case_ (_a : Optional[Any] ): UpperCAmelCase = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )] UpperCAmelCase = sorted(set(_a ) ) UpperCAmelCase = len(_a ) UpperCAmelCase = {b: str(_a ) for b, i in zip(_a , range(_a ) )} UpperCAmelCase = [] rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') ) rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') ) rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') ) rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') ) rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') ) for b in block_names: UpperCAmelCase = block_name_mapping[b] rename_keys.append((F"block{b}_expand_conv/kernel:0", F"encoder.blocks.{hf_b}.expansion.expand_conv.weight") ) rename_keys.append((F"block{b}_expand_bn/gamma:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.weight") ) rename_keys.append((F"block{b}_expand_bn/beta:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.bias") ) rename_keys.append( (F"block{b}_expand_bn/moving_mean:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.running_mean") ) rename_keys.append( (F"block{b}_expand_bn/moving_variance:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.running_var") ) rename_keys.append( (F"block{b}_dwconv/depthwise_kernel:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight") ) rename_keys.append((F"block{b}_bn/gamma:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight") ) rename_keys.append((F"block{b}_bn/beta:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias") ) rename_keys.append( (F"block{b}_bn/moving_mean:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean") ) rename_keys.append( (F"block{b}_bn/moving_variance:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var") ) rename_keys.append((F"block{b}_se_reduce/kernel:0", F"encoder.blocks.{hf_b}.squeeze_excite.reduce.weight") ) rename_keys.append((F"block{b}_se_reduce/bias:0", F"encoder.blocks.{hf_b}.squeeze_excite.reduce.bias") ) rename_keys.append((F"block{b}_se_expand/kernel:0", F"encoder.blocks.{hf_b}.squeeze_excite.expand.weight") ) rename_keys.append((F"block{b}_se_expand/bias:0", F"encoder.blocks.{hf_b}.squeeze_excite.expand.bias") ) rename_keys.append( (F"block{b}_project_conv/kernel:0", F"encoder.blocks.{hf_b}.projection.project_conv.weight") ) rename_keys.append((F"block{b}_project_bn/gamma:0", F"encoder.blocks.{hf_b}.projection.project_bn.weight") ) rename_keys.append((F"block{b}_project_bn/beta:0", F"encoder.blocks.{hf_b}.projection.project_bn.bias") ) rename_keys.append( (F"block{b}_project_bn/moving_mean:0", F"encoder.blocks.{hf_b}.projection.project_bn.running_mean") ) rename_keys.append( (F"block{b}_project_bn/moving_variance:0", F"encoder.blocks.{hf_b}.projection.project_bn.running_var") ) rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') ) rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') ) rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') ) rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') ) rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') ) UpperCAmelCase = {} for item in rename_keys: if item[0] in original_param_names: UpperCAmelCase = '''efficientnet.''' + item[1] UpperCAmelCase = '''classifier.weight''' UpperCAmelCase = '''classifier.bias''' return key_mapping def snake_case_ (_a : Dict , _a : List[str] , _a : Dict ): for key, value in tf_params.items(): if "normalization" in key: continue UpperCAmelCase = key_mapping[key] if "_conv" in key and "kernel" in key: UpperCAmelCase = torch.from_numpy(_a ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: UpperCAmelCase = torch.from_numpy(_a ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: UpperCAmelCase = torch.from_numpy(np.transpose(_a ) ) else: UpperCAmelCase = torch.from_numpy(_a ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(_a ) @torch.no_grad() def snake_case_ (_a : Optional[Any] , _a : List[str] , _a : Optional[int] , _a : Dict ): UpperCAmelCase = model_classes[model_name]( include_top=_a , weights='''imagenet''' , input_tensor=_a , input_shape=_a , pooling=_a , classes=1_0_0_0 , classifier_activation='''softmax''' , ) UpperCAmelCase = original_model.trainable_variables UpperCAmelCase = original_model.non_trainable_variables UpperCAmelCase = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: UpperCAmelCase = param.numpy() UpperCAmelCase = list(tf_params.keys() ) # Load HuggingFace model UpperCAmelCase = get_efficientnet_config(_a ) UpperCAmelCase = EfficientNetForImageClassification(_a ).eval() UpperCAmelCase = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print('''Converting parameters...''' ) UpperCAmelCase = rename_keys(_a ) replace_params(_a , _a , _a ) # Initialize preprocessor and preprocess input image UpperCAmelCase = convert_image_processor(_a ) UpperCAmelCase = preprocessor(images=prepare_img() , return_tensors='''pt''' ) # HF model inference hf_model.eval() with torch.no_grad(): UpperCAmelCase = hf_model(**_a ) UpperCAmelCase = outputs.logits.detach().numpy() # Original model inference UpperCAmelCase = False UpperCAmelCase = CONFIG_MAP[model_name]['''image_size'''] UpperCAmelCase = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) UpperCAmelCase = image.img_to_array(_a ) UpperCAmelCase = np.expand_dims(_a , axis=0 ) UpperCAmelCase = original_model.predict(_a ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(_a , _a , atol=1E-3 ), "The predicted logits are not the same." print('''Model outputs match!''' ) if save_model: # Create folder to save model if not os.path.isdir(_a ): os.mkdir(_a ) # Save converted model and image processor hf_model.save_pretrained(_a ) preprocessor.save_pretrained(_a ) if push_to_hub: # Push model and image processor to hub print(F"Pushing converted {model_name} to the hub..." ) UpperCAmelCase = F"efficientnet-{model_name}" preprocessor.push_to_hub(_a ) hf_model.push_to_hub(_a ) if __name__ == "__main__": A =argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='b0', type=str, help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].', ) parser.add_argument( '--pytorch_dump_folder_path', default='hf_model', type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument('--save_model', action='store_true', help='Save model to local') parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub') A =parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
34
0
'''simple docstring''' from importlib import import_module from .logging import get_logger __lowercase : Tuple = get_logger(__name__) class __UpperCamelCase : def __init__( self , __a , __a=None ): '''simple docstring''' __a : List[Any] = attrs or [] if module is not None: for key in module.__dict__: if key in attrs or not key.startswith('__' ): setattr(self , __a , getattr(__a , __a ) ) __a : Dict = module._original_module if isinstance(__a , _PatchedModuleObj ) else module class __UpperCamelCase : A_ = [] def __init__( self , __a , __a , __a , __a=None ): '''simple docstring''' __a : int = obj __a : Union[str, Any] = target __a : int = new __a : Dict = target.split('.' )[0] __a : Any = {} __a : Any = attrs or [] def __enter__( self ): '''simple docstring''' *__a , __a : Dict = self.target.split('.' ) # Patch modules: # it's used to patch attributes of submodules like "os.path.join"; # in this case we need to patch "os" and "os.path" for i in range(len(__a ) ): try: __a : str = import_module('.'.join(submodules[: i + 1] ) ) except ModuleNotFoundError: continue # We iterate over all the globals in self.obj in case we find "os" or "os.path" for attr in self.obj.__dir__(): __a : Optional[int] = getattr(self.obj , __a ) # We don't check for the name of the global, but rather if its value *is* "os" or "os.path". # This allows to patch renamed modules like "from os import path as ospath". if obj_attr is submodule or ( (isinstance(__a , _PatchedModuleObj ) and obj_attr._original_module is submodule) ): __a : List[str] = obj_attr # patch at top level setattr(self.obj , __a , _PatchedModuleObj(__a , attrs=self.attrs ) ) __a : List[str] = getattr(self.obj , __a ) # construct lower levels patches for key in submodules[i + 1 :]: setattr(__a , __a , _PatchedModuleObj(getattr(__a , __a , __a ) , attrs=self.attrs ) ) __a : str = getattr(__a , __a ) # finally set the target attribute setattr(__a , __a , self.new ) # Patch attribute itself: # it's used for builtins like "open", # and also to patch "os.path.join" we may also need to patch "join" # itself if it was imported as "from os.path import join". if submodules: # if it's an attribute of a submodule like "os.path.join" try: __a : Any = getattr(import_module('.'.join(__a ) ) , __a ) except (AttributeError, ModuleNotFoundError): return # We iterate over all the globals in self.obj in case we find "os.path.join" for attr in self.obj.__dir__(): # We don't check for the name of the global, but rather if its value *is* "os.path.join". # This allows to patch renamed attributes like "from os.path import join as pjoin". if getattr(self.obj , __a ) is attr_value: __a : List[Any] = getattr(self.obj , __a ) setattr(self.obj , __a , self.new ) elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open" __a : str = globals()['__builtins__'][target_attr] setattr(self.obj , __a , self.new ) else: raise RuntimeError(f"""Tried to patch attribute {target_attr} instead of a submodule.""" ) def __exit__( self , *__a ): '''simple docstring''' for attr in list(self.original ): setattr(self.obj , __a , self.original.pop(__a ) ) def __UpperCAmelCase ( self ): '''simple docstring''' self.__enter__() self._active_patches.append(self ) def __UpperCAmelCase ( self ): '''simple docstring''' try: self._active_patches.remove(self ) except ValueError: # If the patch hasn't been started this will fail return None return self.__exit__()
27
'''simple docstring''' from datetime import datetime import requests from bsa import BeautifulSoup if __name__ == "__main__": A =input('Enter image url: ').strip() print(f"""Downloading image from {url} ...""") A =BeautifulSoup(requests.get(url).content, 'html.parser') # The image URL is in the content field of the first meta tag with property og:image A =soup.find('meta', {'property': 'og:image'})['content'] A =requests.get(image_url).content A =f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg""" with open(file_name, 'wb') as fp: fp.write(image_data) print(f"""Done. Image saved to disk as {file_name}.""")
34
0
'''simple docstring''' import argparse import numpy as np import torch from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging logging.set_verbosity_info() _lowerCamelCase : Union[str, Any] = logging.get_logger("transformers.models.speecht5") def __lowerCamelCase ( A__ , A__ , A__ ) -> Tuple: """simple docstring""" hf_model.apply_weight_norm() UpperCamelCase = checkpoint['input_conv.weight_g'] UpperCamelCase = checkpoint['input_conv.weight_v'] UpperCamelCase = checkpoint['input_conv.bias'] for i in range(len(config.upsample_rates ) ): UpperCamelCase = checkpoint[F"""upsamples.{i}.1.weight_g"""] UpperCamelCase = checkpoint[F"""upsamples.{i}.1.weight_v"""] UpperCamelCase = checkpoint[F"""upsamples.{i}.1.bias"""] for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ): for j in range(len(config.resblock_dilation_sizes ) ): UpperCamelCase = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_g"""] UpperCamelCase = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_v"""] UpperCamelCase = checkpoint[F"""blocks.{i}.convs1.{j}.1.bias"""] UpperCamelCase = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_g"""] UpperCamelCase = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_v"""] UpperCamelCase = checkpoint[F"""blocks.{i}.convs2.{j}.1.bias"""] UpperCamelCase = checkpoint['output_conv.1.weight_g'] UpperCamelCase = checkpoint['output_conv.1.weight_v'] UpperCamelCase = checkpoint['output_conv.1.bias'] hf_model.remove_weight_norm() @torch.no_grad() def __lowerCamelCase ( A__ , A__ , A__ , A__=None , A__=None , ) -> int: """simple docstring""" if config_path is not None: UpperCamelCase = SpeechTaHifiGanConfig.from_pretrained(A__ ) else: UpperCamelCase = SpeechTaHifiGanConfig() UpperCamelCase = SpeechTaHifiGan(A__ ) UpperCamelCase = torch.load(A__ ) load_weights(orig_checkpoint['model']['generator'] , A__ , A__ ) UpperCamelCase = np.load(A__ ) UpperCamelCase = stats[0].reshape(-1 ) UpperCamelCase = stats[1].reshape(-1 ) UpperCamelCase = torch.from_numpy(A__ ).float() UpperCamelCase = torch.from_numpy(A__ ).float() model.save_pretrained(A__ ) if repo_id: print('Pushing to the hub...' ) model.push_to_hub(A__ ) if __name__ == "__main__": _lowerCamelCase : Optional[Any] = argparse.ArgumentParser() parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint") parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model." ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) _lowerCamelCase : Union[str, Any] = parser.parse_args() convert_hifigan_checkpoint( args.checkpoint_path, args.stats_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
28
'''simple docstring''' from math import acos, sin from typing import List, Tuple, Union import numpy as np import torch from PIL import Image from ...models import AutoencoderKL, UNetaDConditionModel from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel class _a ( __a ): __a : str = ["""vqvae"""] def __init__( self : str , lowercase : AutoencoderKL , lowercase : UNetaDConditionModel , lowercase : Mel , lowercase : Union[DDIMScheduler, DDPMScheduler] , ): '''simple docstring''' super().__init__() self.register_modules(unet=lowercase , scheduler=lowercase , mel=lowercase , vqvae=lowercase ) def A ( self : Optional[Any] ): '''simple docstring''' return 50 if isinstance(self.scheduler , lowercase ) else 1_000 @torch.no_grad() def __call__( self : Optional[Any] , lowercase : int = 1 , lowercase : str = None , lowercase : np.ndarray = None , lowercase : int = 0 , lowercase : int = 0 , lowercase : int = None , lowercase : torch.Generator = None , lowercase : float = 0 , lowercase : float = 0 , lowercase : torch.Generator = None , lowercase : float = 0 , lowercase : torch.Tensor = None , lowercase : torch.Tensor = None , lowercase : Tuple=True , ): '''simple docstring''' UpperCAmelCase = steps or self.get_default_steps() self.scheduler.set_timesteps(lowercase ) UpperCAmelCase = step_generator or generator # For backwards compatibility if type(self.unet.config.sample_size ) == int: UpperCAmelCase = (self.unet.config.sample_size, self.unet.config.sample_size) if noise is None: UpperCAmelCase = randn_tensor( ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size[0], self.unet.config.sample_size[1], ) , generator=lowercase , device=self.device , ) UpperCAmelCase = noise UpperCAmelCase = None if audio_file is not None or raw_audio is not None: self.mel.load_audio(lowercase , lowercase ) UpperCAmelCase = self.mel.audio_slice_to_image(lowercase ) UpperCAmelCase = np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape( (input_image.height, input_image.width) ) UpperCAmelCase = (input_image / 255) * 2 - 1 UpperCAmelCase = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device ) if self.vqvae is not None: UpperCAmelCase = self.vqvae.encode(torch.unsqueeze(lowercase , 0 ) ).latent_dist.sample( generator=lowercase )[0] UpperCAmelCase = self.vqvae.config.scaling_factor * input_images if start_step > 0: UpperCAmelCase = self.scheduler.add_noise(lowercase , lowercase , self.scheduler.timesteps[start_step - 1] ) UpperCAmelCase = ( self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length ) UpperCAmelCase = int(mask_start_secs * pixels_per_second ) UpperCAmelCase = int(mask_end_secs * pixels_per_second ) UpperCAmelCase = self.scheduler.add_noise(lowercase , lowercase , torch.tensor(self.scheduler.timesteps[start_step:] ) ) for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ): if isinstance(self.unet , lowercase ): UpperCAmelCase = self.unet(lowercase , lowercase , lowercase )['''sample'''] else: UpperCAmelCase = self.unet(lowercase , lowercase )['''sample'''] if isinstance(self.scheduler , lowercase ): UpperCAmelCase = self.scheduler.step( model_output=lowercase , timestep=lowercase , sample=lowercase , eta=lowercase , generator=lowercase , )['''prev_sample'''] else: UpperCAmelCase = self.scheduler.step( model_output=lowercase , timestep=lowercase , sample=lowercase , generator=lowercase , )['''prev_sample'''] if mask is not None: if mask_start > 0: UpperCAmelCase = mask[:, step, :, :mask_start] if mask_end > 0: UpperCAmelCase = mask[:, step, :, -mask_end:] if self.vqvae is not None: # 0.18215 was scaling factor used in training to ensure unit variance UpperCAmelCase = 1 / self.vqvae.config.scaling_factor * images UpperCAmelCase = self.vqvae.decode(lowercase )['''sample'''] UpperCAmelCase = (images / 2 + 0.5).clamp(0 , 1 ) UpperCAmelCase = images.cpu().permute(0 , 2 , 3 , 1 ).numpy() UpperCAmelCase = (images * 255).round().astype('''uint8''' ) UpperCAmelCase = list( (Image.fromarray(_[:, :, 0] ) for _ in images) if images.shape[3] == 1 else (Image.fromarray(lowercase , mode='''RGB''' ).convert('''L''' ) for _ in images) ) UpperCAmelCase = [self.mel.image_to_audio(lowercase ) for _ in images] if not return_dict: return images, (self.mel.get_sample_rate(), audios) return BaseOutput(**AudioPipelineOutput(np.array(lowercase )[:, np.newaxis, :] ) , **ImagePipelineOutput(lowercase ) ) @torch.no_grad() def A ( self : Dict , lowercase : List[Image.Image] , lowercase : int = 50 ): '''simple docstring''' assert isinstance(self.scheduler , lowercase ) self.scheduler.set_timesteps(lowercase ) UpperCAmelCase = np.array( [np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] ) UpperCAmelCase = (sample / 255) * 2 - 1 UpperCAmelCase = torch.Tensor(lowercase ).to(self.device ) for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ): UpperCAmelCase = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps UpperCAmelCase = self.scheduler.alphas_cumprod[t] UpperCAmelCase = ( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) UpperCAmelCase = 1 - alpha_prod_t UpperCAmelCase = self.unet(lowercase , lowercase )['''sample'''] UpperCAmelCase = (1 - alpha_prod_t_prev) ** 0.5 * model_output UpperCAmelCase = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) UpperCAmelCase = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output return sample @staticmethod def A ( lowercase : torch.Tensor , lowercase : torch.Tensor , lowercase : float ): '''simple docstring''' UpperCAmelCase = acos(torch.dot(torch.flatten(lowercase ) , torch.flatten(lowercase ) ) / torch.norm(lowercase ) / torch.norm(lowercase ) ) return sin((1 - alpha) * theta ) * xa / sin(lowercase ) + sin(alpha * theta ) * xa / sin(lowercase )
34
0
import os import pickle import unittest from transformers import AutoTokenizer from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.models.bert_japanese.tokenization_bert_japanese import ( VOCAB_FILES_NAMES, BertJapaneseTokenizer, CharacterTokenizer, JumanppTokenizer, MecabTokenizer, SudachiTokenizer, WordpieceTokenizer, ) from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi from ...test_tokenization_common import TokenizerTesterMixin @custom_tokenizers class lowerCamelCase (_snake_case , unittest.TestCase ): '''simple docstring''' _snake_case : str = BertJapaneseTokenizer _snake_case : Dict = False _snake_case : int = True def __UpperCAmelCase ( self ) -> str: super().setUp() UpperCAmelCase_ : str = [ '[UNK]', '[CLS]', '[SEP]', 'こんにちは', 'こん', 'にちは', 'ばんは', '##こん', '##にちは', '##ばんは', '世界', '##世界', '、', '##、', '。', '##。', ] UpperCAmelCase_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) def __UpperCAmelCase ( self , _UpperCamelCase ) -> Dict: UpperCAmelCase_ : List[Any] = 'こんにちは、世界。 \nこんばんは、世界。' UpperCAmelCase_ : List[Any] = 'こんにちは 、 世界 。 こんばんは 、 世界 。' return input_text, output_text def __UpperCAmelCase ( self , _UpperCamelCase ) -> Union[str, Any]: UpperCAmelCase_ , UpperCAmelCase_ : int = self.get_input_output_texts(_UpperCamelCase ) UpperCAmelCase_ : int = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase ) UpperCAmelCase_ : Tuple = tokenizer.decode(_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase ) return text, ids def __UpperCAmelCase ( self ) -> Dict: pass # TODO add if relevant def __UpperCAmelCase ( self ) -> Optional[Any]: pass # TODO add if relevant def __UpperCAmelCase ( self ) -> Optional[Any]: pass # TODO add if relevant def __UpperCAmelCase ( self ) -> str: UpperCAmelCase_ : Any = self.tokenizer_class(self.vocab_file ) UpperCAmelCase_ : Dict = tokenizer.tokenize('こんにちは、世界。\nこんばんは、世界。' ) self.assertListEqual(_UpperCamelCase , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] ) def __UpperCAmelCase ( self ) -> Tuple: UpperCAmelCase_ : Tuple = self.tokenizer_class(self.vocab_file , word_tokenizer_type='mecab' ) self.assertIsNotNone(_UpperCamelCase ) UpperCAmelCase_ : List[str] = 'こんにちは、世界。\nこんばんは、世界。' UpperCAmelCase_ : List[Any] = tokenizer.tokenize(_UpperCamelCase ) self.assertListEqual(_UpperCamelCase , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] ) UpperCAmelCase_ : Optional[Any] = os.path.join(self.tmpdirname , 'tokenizer.bin' ) with open(_UpperCamelCase , 'wb' ) as handle: pickle.dump(_UpperCamelCase , _UpperCamelCase ) with open(_UpperCamelCase , 'rb' ) as handle: UpperCAmelCase_ : str = pickle.load(_UpperCamelCase ) UpperCAmelCase_ : Union[str, Any] = tokenizer_new.tokenize(_UpperCamelCase ) self.assertListEqual(_UpperCamelCase , _UpperCamelCase ) def __UpperCAmelCase ( self ) -> List[Any]: UpperCAmelCase_ : Optional[Any] = MecabTokenizer(mecab_dic='ipadic' ) self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , ) def __UpperCAmelCase ( self ) -> Union[str, Any]: try: UpperCAmelCase_ : List[str] = MecabTokenizer(mecab_dic='unidic_lite' ) except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , ) def __UpperCAmelCase ( self ) -> Optional[Any]: try: UpperCAmelCase_ : Optional[int] = MecabTokenizer(mecab_dic='unidic' ) except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , ) def __UpperCAmelCase ( self ) -> Dict: UpperCAmelCase_ : Dict = MecabTokenizer(do_lower_case=_UpperCamelCase , mecab_dic='ipadic' ) self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iphone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , ) def __UpperCAmelCase ( self ) -> str: try: UpperCAmelCase_ : int = MecabTokenizer( do_lower_case=_UpperCamelCase , normalize_text=_UpperCamelCase , mecab_option='-d /usr/local/lib/mecab/dic/jumandic' ) except RuntimeError: # if dict doesn't exist in the system, previous code raises this error. return self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '\u3000', '。'] , ) def __UpperCAmelCase ( self ) -> Tuple: UpperCAmelCase_ : Any = MecabTokenizer(normalize_text=_UpperCamelCase , mecab_dic='ipadic' ) self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', ' ', '。'] , ) @require_sudachi def __UpperCAmelCase ( self ) -> Optional[int]: UpperCAmelCase_ : Dict = self.tokenizer_class(self.vocab_file , word_tokenizer_type='sudachi' ) self.assertIsNotNone(_UpperCamelCase ) UpperCAmelCase_ : Any = 'こんにちは、世界。\nこんばんは、世界。' UpperCAmelCase_ : List[Any] = tokenizer.tokenize(_UpperCamelCase ) self.assertListEqual(_UpperCamelCase , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] ) UpperCAmelCase_ : Dict = os.path.join(self.tmpdirname , 'tokenizer.bin' ) with open(_UpperCamelCase , 'wb' ) as handle: pickle.dump(_UpperCamelCase , _UpperCamelCase ) with open(_UpperCamelCase , 'rb' ) as handle: UpperCAmelCase_ : Tuple = pickle.load(_UpperCamelCase ) UpperCAmelCase_ : Optional[int] = tokenizer_new.tokenize(_UpperCamelCase ) self.assertListEqual(_UpperCamelCase , _UpperCamelCase ) @require_sudachi def __UpperCAmelCase ( self ) -> str: UpperCAmelCase_ : Tuple = SudachiTokenizer(sudachi_dict_type='core' ) self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , ) @require_sudachi def __UpperCAmelCase ( self ) -> List[str]: UpperCAmelCase_ : Union[str, Any] = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='A' ) self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国', '人', '参政', '権'] ) @require_sudachi def __UpperCAmelCase ( self ) -> List[Any]: UpperCAmelCase_ : Optional[int] = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='B' ) self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人', '参政権'] ) @require_sudachi def __UpperCAmelCase ( self ) -> Union[str, Any]: UpperCAmelCase_ : List[Any] = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='C' ) self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人参政権'] ) @require_sudachi def __UpperCAmelCase ( self ) -> Any: UpperCAmelCase_ : Tuple = SudachiTokenizer(do_lower_case=_UpperCamelCase , sudachi_dict_type='core' ) self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iphone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , ) @require_sudachi def __UpperCAmelCase ( self ) -> Tuple: UpperCAmelCase_ : List[str] = SudachiTokenizer(normalize_text=_UpperCamelCase , sudachi_dict_type='core' ) self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', '\u3000', '。', ' ', ' '] , ) @require_sudachi def __UpperCAmelCase ( self ) -> str: UpperCAmelCase_ : List[Any] = SudachiTokenizer(trim_whitespace=_UpperCamelCase , sudachi_dict_type='core' ) self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , ) @require_jumanpp def __UpperCAmelCase ( self ) -> Any: UpperCAmelCase_ : Dict = self.tokenizer_class(self.vocab_file , word_tokenizer_type='jumanpp' ) self.assertIsNotNone(_UpperCamelCase ) UpperCAmelCase_ : Union[str, Any] = 'こんにちは、世界。\nこんばんは、世界。' UpperCAmelCase_ : Optional[Any] = tokenizer.tokenize(_UpperCamelCase ) self.assertListEqual(_UpperCamelCase , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] ) UpperCAmelCase_ : int = os.path.join(self.tmpdirname , 'tokenizer.bin' ) with open(_UpperCamelCase , 'wb' ) as handle: pickle.dump(_UpperCamelCase , _UpperCamelCase ) with open(_UpperCamelCase , 'rb' ) as handle: UpperCAmelCase_ : str = pickle.load(_UpperCamelCase ) UpperCAmelCase_ : Dict = tokenizer_new.tokenize(_UpperCamelCase ) self.assertListEqual(_UpperCamelCase , _UpperCamelCase ) @require_jumanpp def __UpperCAmelCase ( self ) -> Tuple: UpperCAmelCase_ : Union[str, Any] = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , ) @require_jumanpp def __UpperCAmelCase ( self ) -> int: UpperCAmelCase_ : int = JumanppTokenizer(do_lower_case=_UpperCamelCase ) self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iphone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , ) @require_jumanpp def __UpperCAmelCase ( self ) -> Union[str, Any]: UpperCAmelCase_ : Any = JumanppTokenizer(normalize_text=_UpperCamelCase ) self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['ア', 'ッ', 'フ', '゚', 'ル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , ) @require_jumanpp def __UpperCAmelCase ( self ) -> List[str]: UpperCAmelCase_ : List[Any] = JumanppTokenizer(trim_whitespace=_UpperCamelCase ) self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '。'] , ) @require_jumanpp def __UpperCAmelCase ( self ) -> Union[str, Any]: UpperCAmelCase_ : Dict = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize('ありがとうございますm(_ _)m見つけるのが大変です。' ) , ['ありがとう', 'ございます', 'm(_ _)m', '見つける', 'の', 'が', '大変です', '。'] , ) def __UpperCAmelCase ( self ) -> List[str]: UpperCAmelCase_ : Optional[Any] = ['[UNK]', '[CLS]', '[SEP]', 'こんにちは', 'こん', 'にちは', 'ばんは', '##こん', '##にちは', '##ばんは'] UpperCAmelCase_ : Any = {} for i, token in enumerate(_UpperCamelCase ): UpperCAmelCase_ : Union[str, Any] = i UpperCAmelCase_ : List[Any] = WordpieceTokenizer(vocab=_UpperCamelCase , unk_token='[UNK]' ) self.assertListEqual(tokenizer.tokenize('' ) , [] ) self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こんにちは'] ) self.assertListEqual(tokenizer.tokenize('こんばんは' ) , ['こん', '##ばんは'] ) self.assertListEqual(tokenizer.tokenize('こんばんは こんばんにちは こんにちは' ) , ['こん', '##ばんは', '[UNK]', 'こんにちは'] ) def __UpperCAmelCase ( self ) -> str: UpperCAmelCase_ : Union[str, Any] = BertJapaneseTokenizer.from_pretrained('nlp-waseda/roberta-base-japanese-with-auto-jumanpp' ) UpperCAmelCase_ : str = tokenizer.subword_tokenizer UpperCAmelCase_ : Union[str, Any] = subword_tokenizer.tokenize('国境 の 長い トンネル を 抜ける と 雪国 であった 。' ) self.assertListEqual(_UpperCamelCase , ['▁国境', '▁の', '▁長い', '▁トンネル', '▁を', '▁抜ける', '▁と', '▁雪', '国', '▁であった', '▁。'] ) UpperCAmelCase_ : Any = subword_tokenizer.tokenize('こんばんは こんばん にち は こんにちは' ) self.assertListEqual(_UpperCamelCase , ['▁こん', 'ばん', 'は', '▁こん', 'ばん', '▁に', 'ち', '▁は', '▁こんにちは'] ) def __UpperCAmelCase ( self ) -> Tuple: UpperCAmelCase_ : Optional[Any] = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese' ) UpperCAmelCase_ : Optional[Any] = tokenizer.encode('ありがとう。' , add_special_tokens=_UpperCamelCase ) UpperCAmelCase_ : str = tokenizer.encode('どういたしまして。' , add_special_tokens=_UpperCamelCase ) UpperCAmelCase_ : int = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase ) UpperCAmelCase_ : Dict = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase , _UpperCamelCase ) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_a + [3] @custom_tokenizers class lowerCamelCase (_snake_case , unittest.TestCase ): '''simple docstring''' _snake_case : Dict = BertJapaneseTokenizer _snake_case : Any = False def __UpperCAmelCase ( self ) -> Tuple: super().setUp() UpperCAmelCase_ : List[str] = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。'] UpperCAmelCase_ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) def __UpperCAmelCase ( self , **_UpperCamelCase ) -> Optional[int]: return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='character' , **_UpperCamelCase ) def __UpperCAmelCase ( self , _UpperCamelCase ) -> Tuple: UpperCAmelCase_ : Union[str, Any] = 'こんにちは、世界。 \nこんばんは、世界。' UpperCAmelCase_ : Optional[int] = 'こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。' return input_text, output_text def __UpperCAmelCase ( self ) -> Dict: pass # TODO add if relevant def __UpperCAmelCase ( self ) -> List[str]: pass # TODO add if relevant def __UpperCAmelCase ( self ) -> str: pass # TODO add if relevant def __UpperCAmelCase ( self ) -> List[str]: UpperCAmelCase_ : Optional[Any] = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='character' ) UpperCAmelCase_ : Optional[Any] = tokenizer.tokenize('こんにちは、世界。 \nこんばんは、世界。' ) self.assertListEqual( _UpperCamelCase , ['こ', 'ん', 'に', 'ち', 'は', '、', '世', '界', '。', 'こ', 'ん', 'ば', 'ん', 'は', '、', '世', '界', '。'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [3, 4, 5, 6, 7, 1_1, 9, 1_0, 1_2, 3, 4, 8, 4, 7, 1_1, 9, 1_0, 1_2] ) def __UpperCAmelCase ( self ) -> Optional[int]: UpperCAmelCase_ : List[str] = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。'] UpperCAmelCase_ : Dict = {} for i, token in enumerate(_UpperCamelCase ): UpperCAmelCase_ : Any = i UpperCAmelCase_ : int = CharacterTokenizer(vocab=_UpperCamelCase , unk_token='[UNK]' ) self.assertListEqual(tokenizer.tokenize('' ) , [] ) self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こ', 'ん', 'に', 'ち', 'は'] ) self.assertListEqual(tokenizer.tokenize('こんにちほ' ) , ['こ', 'ん', 'に', 'ち', '[UNK]'] ) def __UpperCAmelCase ( self ) -> Union[str, Any]: UpperCAmelCase_ : List[Any] = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese-char' ) UpperCAmelCase_ : Dict = tokenizer.encode('ありがとう。' , add_special_tokens=_UpperCamelCase ) UpperCAmelCase_ : Dict = tokenizer.encode('どういたしまして。' , add_special_tokens=_UpperCamelCase ) UpperCAmelCase_ : Any = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase ) UpperCAmelCase_ : Dict = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase , _UpperCamelCase ) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_a + [3] @custom_tokenizers class lowerCamelCase (unittest.TestCase ): '''simple docstring''' def __UpperCAmelCase ( self ) -> Optional[int]: UpperCAmelCase_ : List[Any] = 'cl-tohoku/bert-base-japanese' UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(_UpperCamelCase ) self.assertIsInstance(_UpperCamelCase , _UpperCamelCase ) class lowerCamelCase (unittest.TestCase ): '''simple docstring''' def __UpperCAmelCase ( self ) -> Union[str, Any]: UpperCAmelCase_ : Tuple = 'cl-tohoku/bert-base-japanese' with self.assertLogs('transformers' , level='WARNING' ) as cm: BertTokenizer.from_pretrained(_UpperCamelCase ) self.assertTrue( cm.records[0].message.startswith( 'The tokenizer class you load from this checkpoint is not the same type as the class this function' ' is called from.' ) ) UpperCAmelCase_ : List[Any] = 'bert-base-cased' with self.assertLogs('transformers' , level='WARNING' ) as cm: BertJapaneseTokenizer.from_pretrained(_UpperCamelCase ) self.assertTrue( cm.records[0].message.startswith( 'The tokenizer class you load from this checkpoint is not the same type as the class this function' ' is called from.' ) )
29
'''simple docstring''' from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal A =logging.get_logger(__name__) A =TypeVar('DatasetType', Dataset, IterableDataset) def snake_case_ (_a : List[DatasetType] , _a : Optional[List[float]] = None , _a : Optional[int] = None , _a : Optional[DatasetInfo] = None , _a : Optional[NamedSplit] = None , _a : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ): from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError('''Unable to interleave an empty list of datasets.''' ) for i, dataset in enumerate(_a ): if not isinstance(_a , (Dataset, IterableDataset) ): if isinstance(_a , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} " '''is an empty dataset dictionary.''' ) raise ValueError( F"Dataset at position {i} has at least one split: {list(_a )}\n" F"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_a ) )}']" ) raise ValueError( F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_a ).__name__}." ) if i == 0: UpperCAmelCase , UpperCAmelCase = ( (Dataset, IterableDataset) if isinstance(_a , _a ) else (IterableDataset, Dataset) ) elif not isinstance(_a , _a ): raise ValueError( F"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(F"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." ) if dataset_type is Dataset: return _interleave_map_style_datasets( _a , _a , _a , info=_a , split=_a , stopping_strategy=_a ) else: return _interleave_iterable_datasets( _a , _a , _a , info=_a , split=_a , stopping_strategy=_a ) def snake_case_ (_a : List[DatasetType] , _a : Optional[DatasetInfo] = None , _a : Optional[NamedSplit] = None , _a : int = 0 , ): if not dsets: raise ValueError('''Unable to concatenate an empty list of datasets.''' ) for i, dataset in enumerate(_a ): if not isinstance(_a , (Dataset, IterableDataset) ): if isinstance(_a , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} " '''is an empty dataset dictionary.''' ) raise ValueError( F"Dataset at position {i} has at least one split: {list(_a )}\n" F"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_a ) )}']" ) raise ValueError( F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_a ).__name__}." ) if i == 0: UpperCAmelCase , UpperCAmelCase = ( (Dataset, IterableDataset) if isinstance(_a , _a ) else (IterableDataset, Dataset) ) elif not isinstance(_a , _a ): raise ValueError( F"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." ) if dataset_type is Dataset: return _concatenate_map_style_datasets(_a , info=_a , split=_a , axis=_a ) else: return _concatenate_iterable_datasets(_a , info=_a , split=_a , axis=_a )
34
0
def a ( snake_case__: int = 4_000_000 ): '''simple docstring''' lowercase_ = [] lowercase_ , lowercase_ = 0, 1 while b <= n: if b % 2 == 0: even_fibs.append(snake_case__ ) lowercase_ , lowercase_ = b, a + b return sum(snake_case__ ) if __name__ == "__main__": print(f"{solution() = }")
30
'''simple docstring''' from argparse import ArgumentParser from datasets.commands.convert import ConvertCommand from datasets.commands.dummy_data import DummyDataCommand from datasets.commands.env import EnvironmentCommand from datasets.commands.run_beam import RunBeamCommand from datasets.commands.test import TestCommand from datasets.utils.logging import set_verbosity_info def snake_case_ (_a : Tuple ): return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )} def snake_case_ (): UpperCAmelCase = ArgumentParser( '''HuggingFace Datasets CLI tool''' , usage='''datasets-cli <command> [<args>]''' , allow_abbrev=_a ) UpperCAmelCase = parser.add_subparsers(help='''datasets-cli command helpers''' ) set_verbosity_info() # Register commands ConvertCommand.register_subcommand(_a ) EnvironmentCommand.register_subcommand(_a ) TestCommand.register_subcommand(_a ) RunBeamCommand.register_subcommand(_a ) DummyDataCommand.register_subcommand(_a ) # Parse args UpperCAmelCase , UpperCAmelCase = parser.parse_known_args() if not hasattr(_a , '''func''' ): parser.print_help() exit(1 ) UpperCAmelCase = parse_unknown_args(_a ) # Run UpperCAmelCase = args.func(_a , **_a ) service.run() if __name__ == "__main__": main()
34
0
'''simple docstring''' import os import sys import warnings from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen from ..table import array_cast from ..utils.file_utils import is_local_path from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: import PIL.Image from .features import FeatureType __SCREAMING_SNAKE_CASE : Optional[List[str]] = None __SCREAMING_SNAKE_CASE : Any = """<""" if sys.byteorder == """little""" else """>""" # Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image __SCREAMING_SNAKE_CASE : Dict = [ np.dtype("""|b1"""), np.dtype("""|u1"""), np.dtype("""<u2"""), np.dtype(""">u2"""), np.dtype("""<i2"""), np.dtype(""">i2"""), np.dtype("""<u4"""), np.dtype(""">u4"""), np.dtype("""<i4"""), np.dtype(""">i4"""), np.dtype("""<f4"""), np.dtype(""">f4"""), np.dtype("""<f8"""), np.dtype(""">f8"""), ] @dataclass class lowerCamelCase_ : '''simple docstring''' __UpperCamelCase: bool = True __UpperCamelCase: Optional[str] = None # Automatically constructed __UpperCamelCase: ClassVar[str] = "PIL.Image.Image" __UpperCamelCase: ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()} ) __UpperCamelCase: str = field(default="Image" , init=snake_case__ , repr=snake_case__ ) def __call__( self : Any ): return self.pa_type def _A ( self : Tuple , A : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ): if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("To support encoding images, please install 'Pillow'." ) if isinstance(A , A ): _UpperCAmelCase : Optional[int] = np.array(A ) if isinstance(A , A ): return {"path": value, "bytes": None} elif isinstance(A , A ): return {"path": None, "bytes": value} elif isinstance(A , np.ndarray ): # convert the image array to PNG/TIFF bytes return encode_np_array(A ) elif isinstance(A , PIL.Image.Image ): # convert the PIL image to bytes (default format is PNG/TIFF) return encode_pil_image(A ) elif value.get("path" ) is not None and os.path.isfile(value["path"] ): # we set "bytes": None to not duplicate the data if they're already available locally return {"bytes": None, "path": value.get("path" )} elif value.get("bytes" ) is not None or value.get("path" ) is not None: # store the image bytes, and path is used to infer the image format using the file extension return {"bytes": value.get("bytes" ), "path": value.get("path" )} else: raise ValueError( F"""An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" ) def _A ( self : List[str] , A : dict , A : Optional[Any]=None ): if not self.decode: raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead." ) if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("To support decoding images, please install 'Pillow'." ) if token_per_repo_id is None: _UpperCAmelCase : List[Any] = {} _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = value["path"], value["bytes"] if bytes_ is None: if path is None: raise ValueError(F"""An image should have one of 'path' or 'bytes' but both are None in {value}.""" ) else: if is_local_path(A ): _UpperCAmelCase : List[str] = PIL.Image.open(A ) else: _UpperCAmelCase : int = path.split("::" )[-1] try: _UpperCAmelCase : Optional[Any] = string_to_dict(A , config.HUB_DATASETS_URL )["repo_id"] _UpperCAmelCase : int = token_per_repo_id.get(A ) except ValueError: _UpperCAmelCase : Optional[Any] = None with xopen(A , "rb" , use_auth_token=A ) as f: _UpperCAmelCase : str = BytesIO(f.read() ) _UpperCAmelCase : List[str] = PIL.Image.open(bytes_ ) else: _UpperCAmelCase : Any = PIL.Image.open(BytesIO(bytes_ ) ) image.load() # to avoid "Too many open files" errors return image def _A ( self : Optional[Any] ): from .features import Value return ( self if self.decode else { "bytes": Value("binary" ), "path": Value("string" ), } ) def _A ( self : Optional[int] , A : Union[pa.StringArray, pa.StructArray, pa.ListArray] ): if pa.types.is_string(storage.type ): _UpperCAmelCase : Any = pa.array([None] * len(A ) , type=pa.binary() ) _UpperCAmelCase : List[Any] = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): _UpperCAmelCase : str = pa.array([None] * len(A ) , type=pa.string() ) _UpperCAmelCase : Union[str, Any] = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index("bytes" ) >= 0: _UpperCAmelCase : Tuple = storage.field("bytes" ) else: _UpperCAmelCase : str = pa.array([None] * len(A ) , type=pa.binary() ) if storage.type.get_field_index("path" ) >= 0: _UpperCAmelCase : Tuple = storage.field("path" ) else: _UpperCAmelCase : Optional[Any] = pa.array([None] * len(A ) , type=pa.string() ) _UpperCAmelCase : List[Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() ) elif pa.types.is_list(storage.type ): _UpperCAmelCase : List[Any] = pa.array( [encode_np_array(np.array(A ) )["bytes"] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , ) _UpperCAmelCase : Union[str, Any] = pa.array([None] * len(A ) , type=pa.string() ) _UpperCAmelCase : Union[str, Any] = pa.StructArray.from_arrays( [bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() ) return array_cast(A , self.pa_type ) def _A ( self : List[str] , A : pa.StructArray ): @no_op_if_value_is_null def path_to_bytes(A : Dict ): with xopen(A , "rb" ) as f: _UpperCAmelCase : List[str] = f.read() return bytes_ _UpperCAmelCase : str = pa.array( [ (path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None for x in storage.to_pylist() ] , type=pa.binary() , ) _UpperCAmelCase : int = pa.array( [os.path.basename(A ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , ) _UpperCAmelCase : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() ) return array_cast(A , self.pa_type ) def UpperCamelCase_ ( ) -> List[str]: """simple docstring""" if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("To support encoding images, please install 'Pillow'." ) global _IMAGE_COMPRESSION_FORMATS if _IMAGE_COMPRESSION_FORMATS is None: PIL.Image.init() _UpperCAmelCase : Any = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) ) return _IMAGE_COMPRESSION_FORMATS def UpperCamelCase_ ( _UpperCAmelCase : "PIL.Image.Image" ) -> bytes: """simple docstring""" _UpperCAmelCase : List[str] = BytesIO() if image.format in list_image_compression_formats(): _UpperCAmelCase : Tuple = image.format else: _UpperCAmelCase : Optional[Any] = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF" image.save(_UpperCAmelCase , format=_UpperCAmelCase ) return buffer.getvalue() def UpperCamelCase_ ( _UpperCAmelCase : "PIL.Image.Image" ) -> dict: """simple docstring""" if hasattr(_UpperCAmelCase , "filename" ) and image.filename != "": return {"path": image.filename, "bytes": None} else: return {"path": None, "bytes": image_to_bytes(_UpperCAmelCase )} def UpperCamelCase_ ( _UpperCAmelCase : np.ndarray ) -> dict: """simple docstring""" if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("To support encoding images, please install 'Pillow'." ) _UpperCAmelCase : Union[str, Any] = array.dtype _UpperCAmelCase : Tuple = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER _UpperCAmelCase : List[str] = dtype.kind _UpperCAmelCase : Dict = dtype.itemsize _UpperCAmelCase : Any = None # Multi-channel array case (only np.dtype("|u1") is allowed) if array.shape[2:]: _UpperCAmelCase : Dict = np.dtype("|u1" ) if dtype_kind not in ["u", "i"]: raise TypeError( F"""Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.""" ) if dtype is not dest_dtype: warnings.warn(F"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" ) # Exact match elif dtype in _VALID_IMAGE_ARRAY_DTPYES: _UpperCAmelCase : Optional[Any] = dtype else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually) while dtype_itemsize >= 1: _UpperCAmelCase : Union[str, Any] = dtype_byteorder + dtype_kind + str(_UpperCAmelCase ) _UpperCAmelCase : List[Any] = np.dtype(_UpperCAmelCase ) if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES: warnings.warn(F"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" ) break else: dtype_itemsize //= 2 if dest_dtype is None: raise TypeError( F"""Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}""" ) _UpperCAmelCase : Optional[Any] = PIL.Image.fromarray(array.astype(_UpperCAmelCase ) ) return {"path": None, "bytes": image_to_bytes(_UpperCAmelCase )} def UpperCamelCase_ ( _UpperCAmelCase : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[dict]: """simple docstring""" if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("To support encoding images, please install 'Pillow'." ) if objs: _UpperCAmelCase , _UpperCAmelCase : Any = first_non_null_value(_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ): return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs] if isinstance(_UpperCAmelCase , np.ndarray ): _UpperCAmelCase : Dict = no_op_if_value_is_null(_UpperCAmelCase ) return [obj_to_image_dict_func(_UpperCAmelCase ) for obj in objs] elif isinstance(_UpperCAmelCase , PIL.Image.Image ): _UpperCAmelCase : Optional[int] = no_op_if_value_is_null(_UpperCAmelCase ) return [obj_to_image_dict_func(_UpperCAmelCase ) for obj in objs] else: return objs else: return objs
31
'''simple docstring''' import argparse import json import logging import os import sys from unittest.mock import patch from transformers.testing_utils import TestCasePlus, get_gpu_count, slow A =[ os.path.join(os.path.dirname(__file__), dirname) for dirname in [ 'text-classification', 'language-modeling', 'summarization', 'token-classification', 'question-answering', ] ] sys.path.extend(SRC_DIRS) if SRC_DIRS is not None: import run_clm_flax import run_flax_glue import run_flax_ner import run_mlm_flax import run_qa import run_summarization_flax import run_ta_mlm_flax logging.basicConfig(level=logging.DEBUG) A =logging.getLogger() def snake_case_ (): UpperCAmelCase = argparse.ArgumentParser() parser.add_argument('''-f''' ) UpperCAmelCase = parser.parse_args() return args.f def snake_case_ (_a : List[str] , _a : Union[str, Any]="eval" ): UpperCAmelCase = os.path.join(_a , F"{split}_results.json" ) if os.path.exists(_a ): with open(_a , '''r''' ) as f: return json.load(_a ) raise ValueError(F"can't find {path}" ) A =logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class _a ( __a ): def A ( self : Any ): '''simple docstring''' UpperCAmelCase = self.get_auto_remove_tmp_dir() UpperCAmelCase = f"\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split() with patch.object(lowercase , '''argv''' , lowercase ): run_flax_glue.main() UpperCAmelCase = get_results(lowercase ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) @slow def A ( self : Any ): '''simple docstring''' UpperCAmelCase = self.get_auto_remove_tmp_dir() UpperCAmelCase = f"\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split() with patch.object(lowercase , '''argv''' , lowercase ): run_clm_flax.main() UpperCAmelCase = get_results(lowercase ) self.assertLess(result['''eval_perplexity'''] , 100 ) @slow def A ( self : str ): '''simple docstring''' UpperCAmelCase = self.get_auto_remove_tmp_dir() UpperCAmelCase = f"\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n ".split() with patch.object(lowercase , '''argv''' , lowercase ): run_summarization_flax.main() UpperCAmelCase = get_results(lowercase , split='''test''' ) self.assertGreaterEqual(result['''test_rouge1'''] , 10 ) self.assertGreaterEqual(result['''test_rouge2'''] , 2 ) self.assertGreaterEqual(result['''test_rougeL'''] , 7 ) self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 ) @slow def A ( self : int ): '''simple docstring''' UpperCAmelCase = self.get_auto_remove_tmp_dir() UpperCAmelCase = f"\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n ".split() with patch.object(lowercase , '''argv''' , lowercase ): run_mlm_flax.main() UpperCAmelCase = get_results(lowercase ) self.assertLess(result['''eval_perplexity'''] , 42 ) @slow def A ( self : Tuple ): '''simple docstring''' UpperCAmelCase = self.get_auto_remove_tmp_dir() UpperCAmelCase = f"\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split() with patch.object(lowercase , '''argv''' , lowercase ): run_ta_mlm_flax.main() UpperCAmelCase = get_results(lowercase ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 ) @slow def A ( self : Tuple ): '''simple docstring''' UpperCAmelCase = 7 if get_gpu_count() > 1 else 2 UpperCAmelCase = self.get_auto_remove_tmp_dir() UpperCAmelCase = f"\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n ".split() with patch.object(lowercase , '''argv''' , lowercase ): run_flax_ner.main() UpperCAmelCase = get_results(lowercase ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) self.assertGreaterEqual(result['''eval_f1'''] , 0.3 ) @slow def A ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase = self.get_auto_remove_tmp_dir() UpperCAmelCase = f"\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n ".split() with patch.object(lowercase , '''argv''' , lowercase ): run_qa.main() UpperCAmelCase = get_results(lowercase ) self.assertGreaterEqual(result['''eval_f1'''] , 30 ) self.assertGreaterEqual(result['''eval_exact'''] , 30 )
34
0
from unittest.mock import Mock, patch from file_transfer.send_file import send_file @patch('socket.socket' ) @patch('builtins.open' ) def SCREAMING_SNAKE_CASE_ ( __A : int , __A : Any ) -> Optional[int]: """simple docstring""" a_ : Any = Mock() a_ : Dict = conn, Mock() a_ : Optional[int] = iter([1, None] ) a_ : List[str] = lambda __A : next(__A ) # ===== invoke ===== send_file(filename='mytext.txt' , testing=__A ) # ===== ensurance ===== sock.assert_called_once() sock.return_value.bind.assert_called_once() sock.return_value.listen.assert_called_once() sock.return_value.accept.assert_called_once() conn.recv.assert_called_once() file.return_value.__enter__.assert_called_once() file.return_value.__enter__.return_value.read.assert_called() conn.send.assert_called_once() conn.close.assert_called_once() sock.return_value.shutdown.assert_called_once() sock.return_value.close.assert_called_once()
32
'''simple docstring''' class _a : def __init__( self : Any ): '''simple docstring''' UpperCAmelCase = {} # Mapping from char to TrieNode UpperCAmelCase = False def A ( self : int , lowercase : list[str] ): '''simple docstring''' for word in words: self.insert(lowercase ) def A ( self : Optional[int] , lowercase : str ): '''simple docstring''' UpperCAmelCase = self for char in word: if char not in curr.nodes: UpperCAmelCase = TrieNode() UpperCAmelCase = curr.nodes[char] UpperCAmelCase = True def A ( self : Optional[int] , lowercase : str ): '''simple docstring''' UpperCAmelCase = self for char in word: if char not in curr.nodes: return False UpperCAmelCase = curr.nodes[char] return curr.is_leaf def A ( self : str , lowercase : str ): '''simple docstring''' def _delete(lowercase : TrieNode , lowercase : str , lowercase : int ) -> bool: if index == len(lowercase ): # If word does not exist if not curr.is_leaf: return False UpperCAmelCase = False return len(curr.nodes ) == 0 UpperCAmelCase = word[index] UpperCAmelCase = curr.nodes.get(lowercase ) # If char not in current trie node if not char_node: return False # Flag to check if node can be deleted UpperCAmelCase = _delete(lowercase , lowercase , index + 1 ) if delete_curr: del curr.nodes[char] return len(curr.nodes ) == 0 return delete_curr _delete(self , lowercase , 0 ) def snake_case_ (_a : TrieNode , _a : str ): if node.is_leaf: print(_a , end=''' ''' ) for key, value in node.nodes.items(): print_words(_a , word + key ) def snake_case_ (): UpperCAmelCase = '''banana bananas bandana band apple all beast'''.split() UpperCAmelCase = TrieNode() root.insert_many(_a ) # print_words(root, "") assert all(root.find(_a ) for word in words ) assert root.find('''banana''' ) assert not root.find('''bandanas''' ) assert not root.find('''apps''' ) assert root.find('''apple''' ) assert root.find('''all''' ) root.delete('''all''' ) assert not root.find('''all''' ) root.delete('''banana''' ) assert not root.find('''banana''' ) assert root.find('''bananas''' ) return True def snake_case_ (_a : str , _a : bool ): print(str(_a ) , '''works!''' if passes else '''doesn\'t work :(''' ) def snake_case_ (): assert test_trie() def snake_case_ (): print_results('''Testing trie functionality''' , test_trie() ) if __name__ == "__main__": main()
34
0
"""simple docstring""" from __future__ import annotations from collections.abc import Iterator class _UpperCAmelCase : def __init__( self : Any , A : int ) -> None: lowercase_ : List[str] = value lowercase_ : Node | None = None lowercase_ : Node | None = None class _UpperCAmelCase : def __init__( self : Optional[int] , A : Node ) -> None: lowercase_ : Optional[Any] = tree def A ( self : Any , A : Node | None ) -> int: if node is None: return 0 return node.value + ( self.depth_first_search(node.left ) + self.depth_first_search(node.right ) ) def __iter__( self : int ) -> Iterator[int]: yield self.depth_first_search(self.tree ) if __name__ == "__main__": import doctest doctest.testmod()
33
'''simple docstring''' import math from typing import Optional import numpy as np from ...configuration_utils import PretrainedConfig from ...utils import logging A =logging.get_logger(__name__) A ={ 'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json', 'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json', } class _a ( __a ): __a : Union[str, Any] = """encodec""" def __init__( self : Tuple , lowercase : List[str]=[1.5, 3.0, 6.0, 12.0, 24.0] , lowercase : Any=24_000 , lowercase : str=1 , lowercase : Optional[int]=False , lowercase : Optional[Any]=None , lowercase : str=None , lowercase : Tuple=128 , lowercase : Union[str, Any]=32 , lowercase : Union[str, Any]=1 , lowercase : Optional[Any]=[8, 5, 4, 2] , lowercase : Any="weight_norm" , lowercase : Tuple=7 , lowercase : int=7 , lowercase : Dict=3 , lowercase : List[Any]=2 , lowercase : str=True , lowercase : List[str]="reflect" , lowercase : List[Any]=2 , lowercase : Optional[Any]=2 , lowercase : int=1.0 , lowercase : Dict=1_024 , lowercase : str=None , lowercase : Union[str, Any]=True , **lowercase : Optional[int] , ): '''simple docstring''' UpperCAmelCase = target_bandwidths UpperCAmelCase = sampling_rate UpperCAmelCase = audio_channels UpperCAmelCase = normalize UpperCAmelCase = chunk_length_s UpperCAmelCase = overlap UpperCAmelCase = hidden_size UpperCAmelCase = num_filters UpperCAmelCase = num_residual_layers UpperCAmelCase = upsampling_ratios UpperCAmelCase = norm_type UpperCAmelCase = kernel_size UpperCAmelCase = last_kernel_size UpperCAmelCase = residual_kernel_size UpperCAmelCase = dilation_growth_rate UpperCAmelCase = use_causal_conv UpperCAmelCase = pad_mode UpperCAmelCase = compress UpperCAmelCase = num_lstm_layers UpperCAmelCase = trim_right_ratio UpperCAmelCase = codebook_size UpperCAmelCase = codebook_dim if codebook_dim is not None else hidden_size UpperCAmelCase = use_conv_shortcut if self.norm_type not in ["weight_norm", "time_group_norm"]: raise ValueError( f"self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}" ) super().__init__(**lowercase ) @property def A ( self : Dict ): '''simple docstring''' if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def A ( self : Union[str, Any] ): '''simple docstring''' if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 , int((1.0 - self.overlap) * self.chunk_length ) ) @property def A ( self : Any ): '''simple docstring''' UpperCAmelCase = np.prod(self.upsampling_ratios ) return math.ceil(self.sampling_rate / hop_length ) @property def A ( self : Optional[int] ): '''simple docstring''' return int(1_000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
34
0
'''simple docstring''' import argparse import os import re __a = "src/transformers" # Pattern that looks at the indentation in a line. __a = re.compile(R"^(\s*)\S") # Pattern that matches `"key":" and puts `key` in group 0. __a = re.compile(R"^\s*\"([^\"]+)\":") # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. __a = re.compile(R"^\s*_import_structure\[\"([^\"]+)\"\]") # Pattern that matches `"key",` and puts `key` in group 0. __a = re.compile(R"^\s*\"([^\"]+)\",\s*$") # Pattern that matches any `[stuff]` and puts `stuff` in group 0. __a = re.compile(R"\[([^\]]+)\]") def __snake_case( _lowerCAmelCase ) -> List[Any]: snake_case__ : int = _re_indent.search(_lowerCAmelCase ) return "" if search is None else search.groups()[0] def __snake_case( _lowerCAmelCase , _lowerCAmelCase="" , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> List[str]: snake_case__ : str = 0 snake_case__ : Union[str, Any] = code.split("""\n""" ) if start_prompt is not None: while not lines[index].startswith(_lowerCAmelCase ): index += 1 snake_case__ : Tuple = ["""\n""".join(lines[:index] )] else: snake_case__ : List[str] = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). snake_case__ : Optional[int] = [lines[index]] index += 1 while index < len(_lowerCAmelCase ) and (end_prompt is None or not lines[index].startswith(_lowerCAmelCase )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(_lowerCAmelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ): current_block.append(lines[index] ) blocks.append("""\n""".join(_lowerCAmelCase ) ) if index < len(_lowerCAmelCase ) - 1: snake_case__ : str = [lines[index + 1]] index += 1 else: snake_case__ : int = [] else: blocks.append("""\n""".join(_lowerCAmelCase ) ) snake_case__ : Optional[Any] = [lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(_lowerCAmelCase ) > 0: blocks.append("""\n""".join(_lowerCAmelCase ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(_lowerCAmelCase ): blocks.append("""\n""".join(lines[index:] ) ) return blocks def __snake_case( _lowerCAmelCase ) -> Tuple: def _inner(_lowerCAmelCase ): return key(_lowerCAmelCase ).lower().replace("""_""" , """""" ) return _inner def __snake_case( _lowerCAmelCase , _lowerCAmelCase=None ) -> List[Any]: # If no key is provided, we use a noop. def noop(_lowerCAmelCase ): return x if key is None: snake_case__ : Optional[int] = noop # Constants are all uppercase, they go first. snake_case__ : Optional[int] = [obj for obj in objects if key(_lowerCAmelCase ).isupper()] # Classes are not all uppercase but start with a capital, they go second. snake_case__ : int = [obj for obj in objects if key(_lowerCAmelCase )[0].isupper() and not key(_lowerCAmelCase ).isupper()] # Functions begin with a lowercase, they go last. snake_case__ : str = [obj for obj in objects if not key(_lowerCAmelCase )[0].isupper()] snake_case__ : List[str] = ignore_underscore(_lowerCAmelCase ) return sorted(_lowerCAmelCase , key=_lowerCAmelCase ) + sorted(_lowerCAmelCase , key=_lowerCAmelCase ) + sorted(_lowerCAmelCase , key=_lowerCAmelCase ) def __snake_case( _lowerCAmelCase ) -> int: # This inner function sort imports between [ ]. def _replace(_lowerCAmelCase ): snake_case__ : Union[str, Any] = match.groups()[0] if "," not in imports: return f"[{imports}]" snake_case__ : int = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: snake_case__ : List[str] = keys[:-1] return "[" + ", ".join([f"\"{k}\"" for k in sort_objects(_lowerCAmelCase )] ) + "]" snake_case__ : str = import_statement.split("""\n""" ) if len(_lowerCAmelCase ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. snake_case__ : Dict = 2 if lines[1].strip() == """[""" else 1 snake_case__ : str = [(i, _re_strip_line.search(_lowerCAmelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] snake_case__ : str = sort_objects(_lowerCAmelCase , key=lambda _lowerCAmelCase : x[1] ) snake_case__ : Union[str, Any] = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(_lowerCAmelCase ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: snake_case__ : Union[str, Any] = _re_bracket_content.sub(_replace , lines[1] ) else: snake_case__ : List[Any] = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: snake_case__ : List[str] = keys[:-1] snake_case__ : int = get_indent(lines[1] ) + """, """.join([f"\"{k}\"" for k in sort_objects(_lowerCAmelCase )] ) return "\n".join(_lowerCAmelCase ) else: # Finally we have to deal with imports fitting on one line snake_case__ : Optional[Any] = _re_bracket_content.sub(_replace , _lowerCAmelCase ) return import_statement def __snake_case( _lowerCAmelCase , _lowerCAmelCase=True ) -> Dict: with open(_lowerCAmelCase , encoding="""utf-8""" ) as f: snake_case__ : Optional[int] = f.read() if "_import_structure" not in code: return # Blocks of indent level 0 snake_case__ : Optional[int] = split_code_in_indented_blocks( _lowerCAmelCase , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" ) # We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(_lowerCAmelCase ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. snake_case__ : Optional[Any] = main_blocks[block_idx] snake_case__ : Dict = block.split("""\n""" ) # Get to the start of the imports. snake_case__ : Dict = 0 while line_idx < len(_lowerCAmelCase ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: snake_case__ : Union[str, Any] = len(_lowerCAmelCase ) else: line_idx += 1 if line_idx >= len(_lowerCAmelCase ): continue # Ignore beginning and last line: they don't contain anything. snake_case__ : List[str] = """\n""".join(block_lines[line_idx:-1] ) snake_case__ : str = get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. snake_case__ : Optional[int] = split_code_in_indented_blocks(_lowerCAmelCase , indent_level=_lowerCAmelCase ) # We have two categories of import key: list or _import_structure[key].append/extend snake_case__ : Tuple = _re_direct_key if """_import_structure = {""" in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. snake_case__ : Optional[Any] = [(pattern.search(_lowerCAmelCase ).groups()[0] if pattern.search(_lowerCAmelCase ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. snake_case__ : Dict = [(i, key) for i, key in enumerate(_lowerCAmelCase ) if key is not None] snake_case__ : Union[str, Any] = [x[0] for x in sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. snake_case__ : List[Any] = 0 snake_case__ : Optional[Any] = [] for i in range(len(_lowerCAmelCase ) ): if keys[i] is None: reorderded_blocks.append(internal_blocks[i] ) else: snake_case__ : Optional[Any] = sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reorderded_blocks.append(_lowerCAmelCase ) count += 1 # And we put our main block back together with its first and last line. snake_case__ : Dict = """\n""".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] ) if code != "\n".join(_lowerCAmelCase ): if check_only: return True else: print(f"Overwriting {file}." ) with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f: f.write("""\n""".join(_lowerCAmelCase ) ) def __snake_case( _lowerCAmelCase=True ) -> Tuple: snake_case__ : str = [] for root, _, files in os.walk(_lowerCAmelCase ): if "__init__.py" in files: snake_case__ : Union[str, Any] = sort_imports(os.path.join(_lowerCAmelCase , """__init__.py""" ) , check_only=_lowerCAmelCase ) if result: snake_case__ : Union[str, Any] = [os.path.join(_lowerCAmelCase , """__init__.py""" )] if len(_lowerCAmelCase ) > 0: raise ValueError(f"Would overwrite {len(_lowerCAmelCase )} files, run `make style`." ) if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.") __a = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
35
'''simple docstring''' from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, logging if is_torch_available(): import torch A =logging.get_logger(__name__) class _a ( __a ): __a : str = ["""pixel_values"""] def __init__( self : Optional[int] , lowercase : bool = True , lowercase : Optional[Dict[str, int]] = None , lowercase : PILImageResampling = PILImageResampling.BILINEAR , lowercase : bool = True , lowercase : Dict[str, int] = None , lowercase : bool = True , lowercase : Union[int, float] = 1 / 255 , lowercase : bool = True , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[float, List[float]]] = None , **lowercase : Union[str, Any] , ): '''simple docstring''' super().__init__(**lowercase ) UpperCAmelCase = size if size is not None else {'''shortest_edge''': 256} UpperCAmelCase = get_size_dict(lowercase , default_to_square=lowercase ) UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} UpperCAmelCase = get_size_dict(lowercase , param_name='''crop_size''' ) UpperCAmelCase = do_resize UpperCAmelCase = size UpperCAmelCase = resample UpperCAmelCase = do_center_crop UpperCAmelCase = crop_size UpperCAmelCase = do_rescale UpperCAmelCase = rescale_factor UpperCAmelCase = do_normalize UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD def A ( self : Union[str, Any] , lowercase : np.ndarray , lowercase : Dict[str, int] , lowercase : PILImageResampling = PILImageResampling.BICUBIC , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : Any , ): '''simple docstring''' UpperCAmelCase = get_size_dict(lowercase , default_to_square=lowercase ) if "shortest_edge" not in size: raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" ) UpperCAmelCase = get_resize_output_image_size(lowercase , size=size['''shortest_edge'''] , default_to_square=lowercase ) return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase ) def A ( self : Union[str, Any] , lowercase : np.ndarray , lowercase : Dict[str, int] , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : int , ): '''simple docstring''' UpperCAmelCase = get_size_dict(lowercase ) if "height" not in size or "width" not in size: raise ValueError(f"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}" ) return center_crop(lowercase , size=(size['''height'''], size['''width''']) , data_format=lowercase , **lowercase ) def A ( self : Tuple , lowercase : np.ndarray , lowercase : float , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : List[str] ): '''simple docstring''' return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase ) def A ( self : Optional[int] , lowercase : np.ndarray , lowercase : Union[float, List[float]] , lowercase : Union[float, List[float]] , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : Union[str, Any] , ): '''simple docstring''' return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase ) def A ( self : Optional[int] , lowercase : ImageInput , lowercase : Optional[bool] = None , lowercase : Dict[str, int] = None , lowercase : PILImageResampling = None , lowercase : bool = None , lowercase : Dict[str, int] = None , lowercase : Optional[bool] = None , lowercase : Optional[float] = None , lowercase : Optional[bool] = None , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[str, TensorType]] = None , lowercase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowercase : Dict , ): '''simple docstring''' UpperCAmelCase = do_resize if do_resize is not None else self.do_resize UpperCAmelCase = size if size is not None else self.size UpperCAmelCase = get_size_dict(lowercase , default_to_square=lowercase ) UpperCAmelCase = resample if resample is not None else self.resample UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCAmelCase = crop_size if crop_size is not None else self.crop_size UpperCAmelCase = get_size_dict(lowercase , param_name='''crop_size''' ) UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase = image_mean if image_mean is not None else self.image_mean UpperCAmelCase = image_std if image_std is not None else self.image_std UpperCAmelCase = make_list_of_images(lowercase ) if not valid_images(lowercase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. UpperCAmelCase = [to_numpy_array(lowercase ) for image in images] if do_resize: UpperCAmelCase = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images] if do_center_crop: UpperCAmelCase = [self.center_crop(image=lowercase , size=lowercase ) for image in images] if do_rescale: UpperCAmelCase = [self.rescale(image=lowercase , scale=lowercase ) for image in images] if do_normalize: UpperCAmelCase = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images] UpperCAmelCase = [to_channel_dimension_format(lowercase , lowercase ) for image in images] UpperCAmelCase = {'''pixel_values''': images} return BatchFeature(data=lowercase , tensor_type=lowercase ) def A ( self : Tuple , lowercase : str , lowercase : List[Tuple] = None ): '''simple docstring''' UpperCAmelCase = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(lowercase ) != len(lowercase ): raise ValueError( '''Make sure that you pass in as many target sizes as the batch dimension of the logits''' ) if is_torch_tensor(lowercase ): UpperCAmelCase = target_sizes.numpy() UpperCAmelCase = [] for idx in range(len(lowercase ) ): UpperCAmelCase = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=lowercase ) UpperCAmelCase = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(lowercase ) else: UpperCAmelCase = logits.argmax(dim=1 ) UpperCAmelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
34
0
import logging import os import sys import warnings from dataclasses import dataclass, field from random import randint from typing import Optional import datasets import evaluate import numpy as np from datasets import DatasetDict, load_dataset import transformers from transformers import ( AutoConfig, AutoFeatureExtractor, AutoModelForAudioClassification, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version _snake_case = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.31.0") require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt") def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 16_000 ): '''simple docstring''' _lowerCAmelCase : List[str] = int(round(sample_rate * max_length ) ) if len(_lowerCamelCase ) <= sample_length: return wav _lowerCAmelCase : Tuple = randint(0 , len(_lowerCamelCase ) - sample_length - 1 ) return wav[random_offset : random_offset + sample_length] @dataclass class UpperCAmelCase_ : lowerCamelCase__ = field(default=a , metadata={'help': 'Name of a dataset from the datasets package'}) lowerCamelCase__ = field( default=a , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'}) lowerCamelCase__ = field( default=a , metadata={'help': 'A file containing the training audio paths and labels.'}) lowerCamelCase__ = field( default=a , metadata={'help': 'A file containing the validation audio paths and labels.'}) lowerCamelCase__ = field( default='train' , metadata={ 'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\'' } , ) lowerCamelCase__ = field( default='validation' , metadata={ 'help': ( 'The name of the training data set split to use (via the datasets library). Defaults to \'validation\'' ) } , ) lowerCamelCase__ = field( default='audio' , metadata={'help': 'The name of the dataset column containing the audio data. Defaults to \'audio\''} , ) lowerCamelCase__ = field( default='label' , metadata={'help': 'The name of the dataset column containing the labels. Defaults to \'label\''}) lowerCamelCase__ = field( default=a , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of training examples to this ' 'value if set.' ) } , ) lowerCamelCase__ = field( default=a , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of evaluation examples to this ' 'value if set.' ) } , ) lowerCamelCase__ = field( default=20 , metadata={'help': 'Audio clips will be randomly cut to this length during training if the value is set.'} , ) @dataclass class UpperCAmelCase_ : lowerCamelCase__ = field( default='facebook/wav2vec2-base' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , ) lowerCamelCase__ = field( default=a , metadata={'help': 'Pretrained config name or path if not the same as model_name'}) lowerCamelCase__ = field( default=a , metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'}) lowerCamelCase__ = field( default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , ) lowerCamelCase__ = field( default=a , metadata={'help': 'Name or path of preprocessor config.'}) lowerCamelCase__ = field( default=a , metadata={'help': 'Whether to freeze the feature encoder layers of the model.'}) lowerCamelCase__ = field( default=a , metadata={'help': 'Whether to generate an attention mask in the feature extractor.'}) lowerCamelCase__ = field( default=a , metadata={ 'help': ( 'Will use the token generated when running `huggingface-cli login` (necessary to use this script ' 'with private models).' ) } , ) lowerCamelCase__ = field( default=a , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'}) lowerCamelCase__ = field( default=a , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , ) def snake_case__ ( self): '''simple docstring''' if not self.freeze_feature_extractor and self.freeze_feature_encoder: warnings.warn( "The argument `--freeze_feature_extractor` is deprecated and " "will be removed in a future version. Use `--freeze_feature_encoder`" "instead. Setting `freeze_feature_encoder==True`.", __a, ) if self.freeze_feature_extractor and not self.freeze_feature_encoder: raise ValueError( "The argument `--freeze_feature_extractor` is deprecated and " "should not be used in combination with `--freeze_feature_encoder`." "Only make use of `--freeze_feature_encoder`.") def A ( ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_audio_classification" , _lowerCamelCase , _lowerCamelCase ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _lowerCAmelCase : Tuple = training_args.get_process_log_level() logger.setLevel(_lowerCamelCase ) transformers.utils.logging.set_verbosity(_lowerCamelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} " + F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" ) logger.info(F"Training/evaluation parameters {training_args}" ) # Set seed before initializing model. set_seed(training_args.seed ) # Detecting last checkpoint. _lowerCAmelCase : Dict = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _lowerCAmelCase : Optional[Any] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to train from scratch." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Initialize our dataset and prepare it for the audio classification task. _lowerCAmelCase : Tuple = DatasetDict() _lowerCAmelCase : str = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , ) _lowerCAmelCase : List[Any] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , ) if data_args.audio_column_name not in raw_datasets["train"].column_names: raise ValueError( F"--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. " "Make sure to set `--audio_column_name` to the correct audio column - one of " F"{', '.join(raw_datasets['train'].column_names )}." ) if data_args.label_column_name not in raw_datasets["train"].column_names: raise ValueError( F"--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. " "Make sure to set `--label_column_name` to the correct text column - one of " F"{', '.join(raw_datasets['train'].column_names )}." ) # Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over # transformer outputs in the classifier, but it doesn't always lead to better accuracy _lowerCAmelCase : Any = AutoFeatureExtractor.from_pretrained( model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # `datasets` takes care of automatically loading and resampling the audio, # so we just need to set the correct target sampling rate. _lowerCAmelCase : Any = raw_datasets.cast_column( data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) ) _lowerCAmelCase : Union[str, Any] = feature_extractor.model_input_names[0] def train_transforms(_lowerCamelCase ): _lowerCAmelCase : Dict = [] for audio in batch[data_args.audio_column_name]: _lowerCAmelCase : Any = random_subsample( audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate ) subsampled_wavs.append(_lowerCamelCase ) _lowerCAmelCase : List[str] = feature_extractor(_lowerCamelCase , sampling_rate=feature_extractor.sampling_rate ) _lowerCAmelCase : Optional[int] = {model_input_name: inputs.get(_lowerCamelCase )} _lowerCAmelCase : List[str] = list(batch[data_args.label_column_name] ) return output_batch def val_transforms(_lowerCamelCase ): _lowerCAmelCase : Union[str, Any] = [audio["array"] for audio in batch[data_args.audio_column_name]] _lowerCAmelCase : Tuple = feature_extractor(_lowerCamelCase , sampling_rate=feature_extractor.sampling_rate ) _lowerCAmelCase : List[str] = {model_input_name: inputs.get(_lowerCamelCase )} _lowerCAmelCase : Union[str, Any] = list(batch[data_args.label_column_name] ) return output_batch # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. _lowerCAmelCase : int = raw_datasets["train"].features[data_args.label_column_name].names _lowerCAmelCase , _lowerCAmelCase : str = {}, {} for i, label in enumerate(_lowerCamelCase ): _lowerCAmelCase : List[str] = str(_lowerCamelCase ) _lowerCAmelCase : Tuple = label # Load the accuracy metric from the datasets package _lowerCAmelCase : Dict = evaluate.load("accuracy" ) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with # `predictions` and `label_ids` fields) and has to return a dictionary string to float. def compute_metrics(_lowerCamelCase ): _lowerCAmelCase : int = np.argmax(eval_pred.predictions , axis=1 ) return metric.compute(predictions=_lowerCamelCase , references=eval_pred.label_ids ) _lowerCAmelCase : Union[str, Any] = AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path , num_labels=len(_lowerCamelCase ) , labelaid=_lowerCamelCase , idalabel=_lowerCamelCase , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) _lowerCAmelCase : Optional[int] = AutoModelForAudioClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) # freeze the convolutional waveform encoder if model_args.freeze_feature_encoder: model.freeze_feature_encoder() if training_args.do_train: if data_args.max_train_samples is not None: _lowerCAmelCase : Union[str, Any] = ( raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) ) # Set the training transforms raw_datasets["train"].set_transform(_lowerCamelCase , output_all_columns=_lowerCamelCase ) if training_args.do_eval: if data_args.max_eval_samples is not None: _lowerCAmelCase : int = ( raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms raw_datasets["eval"].set_transform(_lowerCamelCase , output_all_columns=_lowerCamelCase ) # Initialize our trainer _lowerCAmelCase : Optional[Any] = Trainer( model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=_lowerCamelCase , tokenizer=_lowerCamelCase , ) # Training if training_args.do_train: _lowerCAmelCase : Any = None if training_args.resume_from_checkpoint is not None: _lowerCAmelCase : Optional[int] = training_args.resume_from_checkpoint elif last_checkpoint is not None: _lowerCAmelCase : Union[str, Any] = last_checkpoint _lowerCAmelCase : Optional[Any] = trainer.train(resume_from_checkpoint=_lowerCamelCase ) trainer.save_model() trainer.log_metrics("train" , train_result.metrics ) trainer.save_metrics("train" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: _lowerCAmelCase : Dict = trainer.evaluate() trainer.log_metrics("eval" , _lowerCamelCase ) trainer.save_metrics("eval" , _lowerCamelCase ) # Write model card and (optionally) push to hub _lowerCAmelCase : int = { "finetuned_from": model_args.model_name_or_path, "tasks": "audio-classification", "dataset": data_args.dataset_name, "tags": ["audio-classification"], } if training_args.push_to_hub: trainer.push_to_hub(**_lowerCamelCase ) else: trainer.create_model_card(**_lowerCamelCase ) if __name__ == "__main__": main()
36
'''simple docstring''' import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process A =logging.getLogger(__name__) def snake_case_ (_a : Dict , _a : Union[str, Any] ): return (preds == labels).mean() @dataclass class _a : __a : str = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) __a : Optional[str] = field( default=__a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) __a : Optional[str] = field( default=__a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) __a : Optional[str] = field( default=__a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) @dataclass class _a : __a : str = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} ) __a : str = field(metadata={"""help""": """Should contain the data files for the task."""} ) __a : int = field( default=128 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) __a : bool = field( default=__a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) def snake_case_ (): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F"Output directory ({training_args.output_dir}) already exists and is not empty. Use" ''' --overwrite_output_dir to overcome.''' ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('''Training/evaluation parameters %s''' , _a ) # Set seed set_seed(training_args.seed ) try: UpperCAmelCase = processors[data_args.task_name]() UpperCAmelCase = processor.get_labels() UpperCAmelCase = len(_a ) except KeyError: raise ValueError('''Task not found: %s''' % (data_args.task_name) ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. UpperCAmelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_a , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , ) UpperCAmelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) UpperCAmelCase = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_a , cache_dir=model_args.cache_dir , ) # Get datasets UpperCAmelCase = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=_a , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) UpperCAmelCase = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=_a , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def compute_metrics(_a : EvalPrediction ) -> Dict: UpperCAmelCase = np.argmax(p.predictions , axis=1 ) return {"acc": simple_accuracy(_a , p.label_ids )} # Data collator UpperCAmelCase = DataCollatorWithPadding(_a , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer UpperCAmelCase = Trainer( model=_a , args=_a , train_dataset=_a , eval_dataset=_a , compute_metrics=_a , data_collator=_a , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation UpperCAmelCase = {} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) UpperCAmelCase = trainer.evaluate() UpperCAmelCase = os.path.join(training_args.output_dir , '''eval_results.txt''' ) if trainer.is_world_master(): with open(_a , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key, value in result.items(): logger.info(''' %s = %s''' , _a , _a ) writer.write('''%s = %s\n''' % (key, value) ) results.update(_a ) return results def snake_case_ (_a : Optional[int] ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
34
0
'''simple docstring''' import argparse from collections import OrderedDict from pathlib import Path import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision.transforms import functional as F from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection from transformers.utils import logging logging.set_verbosity_info() _lowerCAmelCase = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) _lowerCAmelCase = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""") ) rename_keys.append( (F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight""")) rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias""")) rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight""")) rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias""")) rename_keys.append( (F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias""")) rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight""")) rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias""")) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append( ( F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""", F"""decoder.layers.{i}.encoder_attn.out_proj.weight""", ) ) rename_keys.append( ( F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""", F"""decoder.layers.{i}.encoder_attn.out_proj.bias""", ) ) rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight""")) rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias""")) rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight""")) rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias""")) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias""")) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight""")) rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias""")) # convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ('''input_proj.weight''', '''input_projection.weight'''), ('''input_proj.bias''', '''input_projection.bias'''), ('''query_embed.weight''', '''query_position_embeddings.weight'''), ('''transformer.encoder.norm.weight''', '''encoder.layernorm.weight'''), ('''transformer.encoder.norm.bias''', '''encoder.layernorm.bias'''), ('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''), ('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''), ('''class_embed.weight''', '''class_labels_classifier.weight'''), ('''class_embed.bias''', '''class_labels_classifier.bias'''), ('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''), ('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''), ('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''), ('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''), ('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''), ('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''), ] ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : List[Any] = state_dict.pop(UpperCamelCase ) lowerCAmelCase__ : Tuple = val def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Optional[int] = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: lowerCAmelCase__ : List[str] = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" ) lowerCAmelCase__ : List[Any] = value else: lowerCAmelCase__ : Optional[int] = value return new_state_dict def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Tuple = """""" # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) lowerCAmelCase__ : Union[str, Any] = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" ) lowerCAmelCase__ : Any = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict lowerCAmelCase__ : Union[str, Any] = in_proj_weight[:256, :] lowerCAmelCase__ : List[Any] = in_proj_bias[:256] lowerCAmelCase__ : str = in_proj_weight[256:512, :] lowerCAmelCase__ : Optional[int] = in_proj_bias[256:512] lowerCAmelCase__ : Union[str, Any] = in_proj_weight[-256:, :] lowerCAmelCase__ : str = in_proj_bias[-256:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention lowerCAmelCase__ : List[str] = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" ) lowerCAmelCase__ : Optional[Any] = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict lowerCAmelCase__ : Optional[int] = in_proj_weight[:256, :] lowerCAmelCase__ : Dict = in_proj_bias[:256] lowerCAmelCase__ : Any = in_proj_weight[256:512, :] lowerCAmelCase__ : Dict = in_proj_bias[256:512] lowerCAmelCase__ : Tuple = in_proj_weight[-256:, :] lowerCAmelCase__ : Union[str, Any] = in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention lowerCAmelCase__ : List[Any] = state_dict.pop( f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" ) lowerCAmelCase__ : List[str] = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) of cross-attention to the state dict lowerCAmelCase__ : str = in_proj_weight_cross_attn[:256, :] lowerCAmelCase__ : List[Any] = in_proj_bias_cross_attn[:256] lowerCAmelCase__ : Optional[Any] = in_proj_weight_cross_attn[256:512, :] lowerCAmelCase__ : str = in_proj_bias_cross_attn[256:512] lowerCAmelCase__ : Dict = in_proj_weight_cross_attn[-256:, :] lowerCAmelCase__ : str = in_proj_bias_cross_attn[-256:] def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCAmelCase__ , lowerCAmelCase__ : str = image.size lowerCAmelCase__ : Tuple = max(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : str = 800 if """detection""" in checkpoint_url else 1000 lowerCAmelCase__ : Union[str, Any] = target_max_size / current_max_size lowerCAmelCase__ : str = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) ) return resized_image def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Tuple = F.to_tensor(UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = F.normalize(UpperCamelCase , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ) return image @torch.no_grad() def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" logger.info("""Converting model...""" ) # load original state dict lowerCAmelCase__ : str = torch.hub.load_state_dict_from_url(UpperCamelCase , map_location="""cpu""" ) # rename keys for src, dest in rename_keys: rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : Tuple = rename_backbone_keys(UpperCamelCase ) # query, key and value matrices need special treatment read_in_q_k_v(UpperCamelCase ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them lowerCAmelCase__ : List[str] = """model.""" for key in state_dict.copy().keys(): if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ): lowerCAmelCase__ : Optional[Any] = state_dict.pop(UpperCamelCase ) lowerCAmelCase__ : int = val # create HuggingFace model and load state dict lowerCAmelCase__ : List[str] = TableTransformerConfig( backbone="""resnet18""" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , ) if "detection" in checkpoint_url: lowerCAmelCase__ : int = 15 lowerCAmelCase__ : Tuple = 2 lowerCAmelCase__ : Optional[Any] = {0: """table""", 1: """table rotated"""} lowerCAmelCase__ : Union[str, Any] = idalabel lowerCAmelCase__ : Tuple = {v: k for k, v in idalabel.items()} else: lowerCAmelCase__ : Optional[Any] = 125 lowerCAmelCase__ : int = 6 lowerCAmelCase__ : Union[str, Any] = { 0: """table""", 1: """table column""", 2: """table row""", 3: """table column header""", 4: """table projected row header""", 5: """table spanning cell""", } lowerCAmelCase__ : Any = idalabel lowerCAmelCase__ : Any = {v: k for k, v in idalabel.items()} lowerCAmelCase__ : List[Any] = DetrImageProcessor( format="""coco_detection""" , max_size=800 if """detection""" in checkpoint_url else 1000 ) lowerCAmelCase__ : Optional[int] = TableTransformerForObjectDetection(UpperCamelCase ) model.load_state_dict(UpperCamelCase ) model.eval() # verify our conversion lowerCAmelCase__ : List[Any] = """example_pdf.png""" if """detection""" in checkpoint_url else """example_table.png""" lowerCAmelCase__ : int = hf_hub_download(repo_id="""nielsr/example-pdf""" , repo_type="""dataset""" , filename=UpperCamelCase ) lowerCAmelCase__ : List[str] = Image.open(UpperCamelCase ).convert("""RGB""" ) lowerCAmelCase__ : Union[str, Any] = normalize(resize(UpperCamelCase , UpperCamelCase ) ).unsqueeze(0 ) lowerCAmelCase__ : Tuple = model(UpperCamelCase ) if "detection" in checkpoint_url: lowerCAmelCase__ : List[str] = (1, 15, 3) lowerCAmelCase__ : Optional[Any] = torch.tensor( [[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] ) lowerCAmelCase__ : Optional[Any] = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] ) else: lowerCAmelCase__ : Any = (1, 125, 7) lowerCAmelCase__ : str = torch.tensor( [[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] ) lowerCAmelCase__ : List[str] = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, :3, :3] , UpperCamelCase , atol=1e-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] , UpperCamelCase , atol=1e-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase ) model.save_pretrained(UpperCamelCase ) image_processor.save_pretrained(UpperCamelCase ) if push_to_hub: # Push model to HF hub logger.info("""Pushing model to the hub...""" ) lowerCAmelCase__ : Optional[Any] = ( """microsoft/table-transformer-detection""" if """detection""" in checkpoint_url else """microsoft/table-transformer-structure-recognition""" ) model.push_to_hub(UpperCamelCase ) image_processor.push_to_hub(UpperCamelCase ) if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_url''', default='''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''', type=str, choices=[ '''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''', '''https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth''', ], help='''URL of the Table Transformer checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) _lowerCAmelCase = parser.parse_args() convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
37
'''simple docstring''' import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _a ( __a ): __a : int = ["""image_processor""", """tokenizer"""] __a : Union[str, Any] = """ChineseCLIPImageProcessor""" __a : List[Any] = ("""BertTokenizer""", """BertTokenizerFast""") def __init__( self : Dict , lowercase : Union[str, Any]=None , lowercase : Dict=None , **lowercase : Optional[Any] ): '''simple docstring''' UpperCAmelCase = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , lowercase , ) UpperCAmelCase = kwargs.pop('''feature_extractor''' ) UpperCAmelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(lowercase , lowercase ) UpperCAmelCase = self.image_processor def __call__( self : Tuple , lowercase : Optional[Any]=None , lowercase : Union[str, Any]=None , lowercase : int=None , **lowercase : Dict ): '''simple docstring''' if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: UpperCAmelCase = self.tokenizer(lowercase , return_tensors=lowercase , **lowercase ) if images is not None: UpperCAmelCase = self.image_processor(lowercase , return_tensors=lowercase , **lowercase ) if text is not None and images is not None: UpperCAmelCase = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**lowercase ) , tensor_type=lowercase ) def A ( self : int , *lowercase : Tuple , **lowercase : List[str] ): '''simple docstring''' return self.tokenizer.batch_decode(*lowercase , **lowercase ) def A ( self : Optional[Any] , *lowercase : int , **lowercase : Optional[int] ): '''simple docstring''' return self.tokenizer.decode(*lowercase , **lowercase ) @property def A ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase = self.tokenizer.model_input_names UpperCAmelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def A ( self : List[Any] ): '''simple docstring''' warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowercase , ) return self.image_processor_class
34
0
# limitations under the License. # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401 from .utils import deprecate deprecate( '''pipelines_utils''', '''0.22.0''', '''Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.''', standard_warn=False, stacklevel=3, )
38
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging A =logging.get_logger(__name__) A ={ 'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json', # See all Marian models at https://huggingface.co/models?filter=marian } class _a ( __a ): __a : List[Any] = """marian""" __a : Union[str, Any] = ["""past_key_values"""] __a : List[str] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""} def __init__( self : List[Any] , lowercase : Union[str, Any]=58_101 , lowercase : Tuple=None , lowercase : str=1_024 , lowercase : Optional[int]=12 , lowercase : Optional[int]=4_096 , lowercase : int=16 , lowercase : List[Any]=12 , lowercase : int=4_096 , lowercase : Optional[int]=16 , lowercase : int=0.0 , lowercase : Tuple=0.0 , lowercase : Tuple=True , lowercase : Union[str, Any]=True , lowercase : List[Any]="gelu" , lowercase : Tuple=1_024 , lowercase : str=0.1 , lowercase : str=0.0 , lowercase : Optional[int]=0.0 , lowercase : Dict=0.02 , lowercase : Union[str, Any]=58_100 , lowercase : List[str]=False , lowercase : str=58_100 , lowercase : Any=0 , lowercase : Optional[Any]=0 , lowercase : Tuple=True , **lowercase : Optional[int] , ): '''simple docstring''' UpperCAmelCase = vocab_size UpperCAmelCase = decoder_vocab_size or vocab_size UpperCAmelCase = max_position_embeddings UpperCAmelCase = d_model UpperCAmelCase = encoder_ffn_dim UpperCAmelCase = encoder_layers UpperCAmelCase = encoder_attention_heads UpperCAmelCase = decoder_ffn_dim UpperCAmelCase = decoder_layers UpperCAmelCase = decoder_attention_heads UpperCAmelCase = dropout UpperCAmelCase = attention_dropout UpperCAmelCase = activation_dropout UpperCAmelCase = activation_function UpperCAmelCase = init_std UpperCAmelCase = encoder_layerdrop UpperCAmelCase = decoder_layerdrop UpperCAmelCase = use_cache UpperCAmelCase = encoder_layers UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True UpperCAmelCase = share_encoder_decoder_embeddings super().__init__( pad_token_id=lowercase , eos_token_id=lowercase , is_encoder_decoder=lowercase , decoder_start_token_id=lowercase , forced_eos_token_id=lowercase , **lowercase , ) class _a ( __a ): @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs def A ( self : int ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: UpperCAmelCase = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: UpperCAmelCase = {0: '''batch'''} UpperCAmelCase = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: UpperCAmelCase = {0: '''batch''', 1: '''decoder_sequence'''} UpperCAmelCase = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(lowercase , direction='''inputs''' ) elif self.task == "causal-lm": # TODO: figure this case out. UpperCAmelCase = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: UpperCAmelCase , UpperCAmelCase = self.num_layers for i in range(lowercase ): UpperCAmelCase = {0: '''batch''', 2: '''past_sequence + sequence'''} UpperCAmelCase = {0: '''batch''', 2: '''past_sequence + sequence'''} else: UpperCAmelCase = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}), ('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}), ] ) return common_inputs @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs def A ( self : Any ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: UpperCAmelCase = super().outputs else: UpperCAmelCase = super(lowercase , self ).outputs if self.use_past: UpperCAmelCase , UpperCAmelCase = self.num_layers for i in range(lowercase ): UpperCAmelCase = {0: '''batch''', 2: '''past_sequence + sequence'''} UpperCAmelCase = {0: '''batch''', 2: '''past_sequence + sequence'''} return common_outputs def A ( self : Dict , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ): '''simple docstring''' UpperCAmelCase = self._generate_dummy_inputs_for_encoder_and_decoder( lowercase , lowercase , lowercase , lowercase , lowercase ) # Generate decoder inputs UpperCAmelCase = seq_length if not self.use_past else 1 UpperCAmelCase = self._generate_dummy_inputs_for_encoder_and_decoder( lowercase , lowercase , lowercase , lowercase , lowercase ) UpperCAmelCase = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()} UpperCAmelCase = dict(**lowercase , **lowercase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch UpperCAmelCase , UpperCAmelCase = common_inputs['''input_ids'''].shape UpperCAmelCase = common_inputs['''decoder_input_ids'''].shape[1] UpperCAmelCase , UpperCAmelCase = self.num_attention_heads UpperCAmelCase = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) UpperCAmelCase = decoder_seq_length + 3 UpperCAmelCase = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) UpperCAmelCase = torch.cat( [common_inputs['''decoder_attention_mask'''], torch.ones(lowercase , lowercase )] , dim=1 ) UpperCAmelCase = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered UpperCAmelCase , UpperCAmelCase = self.num_layers UpperCAmelCase = min(lowercase , lowercase ) UpperCAmelCase = max(lowercase , lowercase ) - min_num_layers UpperCAmelCase = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder''' for _ in range(lowercase ): common_inputs["past_key_values"].append( ( torch.zeros(lowercase ), torch.zeros(lowercase ), torch.zeros(lowercase ), torch.zeros(lowercase ), ) ) # TODO: test this. UpperCAmelCase = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape for _ in range(lowercase , lowercase ): common_inputs["past_key_values"].append((torch.zeros(lowercase ), torch.zeros(lowercase )) ) return common_inputs def A ( self : int , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ): '''simple docstring''' UpperCAmelCase = self._generate_dummy_inputs_for_encoder_and_decoder( lowercase , lowercase , lowercase , lowercase , lowercase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch UpperCAmelCase , UpperCAmelCase = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values UpperCAmelCase = seqlen + 2 UpperCAmelCase , UpperCAmelCase = self.num_layers UpperCAmelCase , UpperCAmelCase = self.num_attention_heads UpperCAmelCase = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) UpperCAmelCase = common_inputs['''attention_mask'''].dtype UpperCAmelCase = torch.cat( [common_inputs['''attention_mask'''], torch.ones(lowercase , lowercase , dtype=lowercase )] , dim=1 ) UpperCAmelCase = [ (torch.zeros(lowercase ), torch.zeros(lowercase )) for _ in range(lowercase ) ] return common_inputs def A ( self : str , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ): '''simple docstring''' UpperCAmelCase = compute_effective_axis_dimension( lowercase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX UpperCAmelCase = tokenizer.num_special_tokens_to_add(lowercase ) UpperCAmelCase = compute_effective_axis_dimension( lowercase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowercase ) # Generate dummy inputs according to compute batch and sequence UpperCAmelCase = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size UpperCAmelCase = dict(tokenizer(lowercase , return_tensors=lowercase ) ) return common_inputs def A ( self : List[str] , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: UpperCAmelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm( lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase ) else: UpperCAmelCase = self._generate_dummy_inputs_for_causal_lm( lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase ) return common_inputs def A ( self : List[Any] , lowercase : Any , lowercase : Tuple , lowercase : Any , lowercase : Any ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: UpperCAmelCase = super()._flatten_past_key_values_(lowercase , lowercase , lowercase , lowercase ) else: UpperCAmelCase = super(lowercase , self )._flatten_past_key_values_( lowercase , lowercase , lowercase , lowercase ) @property def A ( self : Any ): '''simple docstring''' return 1E-4
34
0
import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class __lowerCamelCase ( unittest.TestCase): """simple docstring""" def __init__( self , UpperCAmelCase , UpperCAmelCase=7 , UpperCAmelCase=3 , UpperCAmelCase=18 , UpperCAmelCase=30 , UpperCAmelCase=400 , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=[0.5, 0.5, 0.5] , UpperCAmelCase=[0.5, 0.5, 0.5] , ): """simple docstring""" _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = num_channels _UpperCAmelCase = image_size _UpperCAmelCase = min_resolution _UpperCAmelCase = max_resolution _UpperCAmelCase = do_resize _UpperCAmelCase = size if size is not None else {'height': 18, 'width': 20} _UpperCAmelCase = do_thumbnail _UpperCAmelCase = do_align_axis _UpperCAmelCase = do_pad _UpperCAmelCase = do_normalize _UpperCAmelCase = image_mean _UpperCAmelCase = image_std def UpperCamelCase ( self ): """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class __lowerCamelCase ( snake_case__ , unittest.TestCase): """simple docstring""" UpperCamelCase__ = DonutImageProcessor if is_vision_available() else None def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase = DonutImageProcessingTester(self ) @property def UpperCamelCase ( self ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCAmelCase , 'do_resize' ) ) self.assertTrue(hasattr(UpperCAmelCase , 'size' ) ) self.assertTrue(hasattr(UpperCAmelCase , 'do_thumbnail' ) ) self.assertTrue(hasattr(UpperCAmelCase , 'do_align_long_axis' ) ) self.assertTrue(hasattr(UpperCAmelCase , 'do_pad' ) ) self.assertTrue(hasattr(UpperCAmelCase , 'do_normalize' ) ) self.assertTrue(hasattr(UpperCAmelCase , 'image_mean' ) ) self.assertTrue(hasattr(UpperCAmelCase , 'image_std' ) ) def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'height': 18, 'width': 20} ) _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'height': 42, 'width': 42} ) # Previous config had dimensions in (width, height) order _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) ) self.assertEqual(image_processor.size , {'height': 84, 'width': 42} ) def UpperCamelCase ( self ): """simple docstring""" pass @is_flaky() def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase , Image.Image ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(UpperCAmelCase , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) @is_flaky() def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , numpify=UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase , np.ndarray ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(UpperCAmelCase , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) @is_flaky() def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , torchify=UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase , torch.Tensor ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(UpperCAmelCase , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , )
39
'''simple docstring''' import os def snake_case_ (): UpperCAmelCase = os.path.join(os.path.dirname(_a ) , '''num.txt''' ) with open(_a ) as file_hand: return str(sum(int(_a ) for line in file_hand ) )[:1_0] if __name__ == "__main__": print(solution())
34
0
"""simple docstring""" import os import warnings from typing import List, Optional from ...tokenization_utils_base import BatchEncoding from ...utils import logging from .configuration_rag import RagConfig __lowercase = logging.get_logger(__name__) class _A : """simple docstring""" def __init__( self : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : Any): a : List[str] = question_encoder a : Union[str, Any] = generator a : Dict = self.question_encoder def __snake_case ( self : List[Any] , __UpperCAmelCase : Any): if os.path.isfile(__UpperCAmelCase): raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''') os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase) a : Tuple = os.path.join(__UpperCAmelCase , "question_encoder_tokenizer") a : Optional[Any] = os.path.join(__UpperCAmelCase , "generator_tokenizer") self.question_encoder.save_pretrained(__UpperCAmelCase) self.generator.save_pretrained(__UpperCAmelCase) @classmethod def __snake_case ( cls : Optional[Any] , __UpperCAmelCase : Optional[Any] , **__UpperCAmelCase : Optional[Any]): # dynamically import AutoTokenizer from ..auto.tokenization_auto import AutoTokenizer a : int = kwargs.pop("config" , __UpperCAmelCase) if config is None: a : Union[str, Any] = RagConfig.from_pretrained(__UpperCAmelCase) a : str = AutoTokenizer.from_pretrained( __UpperCAmelCase , config=config.question_encoder , subfolder="question_encoder_tokenizer") a : Optional[Any] = AutoTokenizer.from_pretrained( __UpperCAmelCase , config=config.generator , subfolder="generator_tokenizer") return cls(question_encoder=__UpperCAmelCase , generator=__UpperCAmelCase) def __call__( self : Optional[int] , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : int): return self.current_tokenizer(*__UpperCAmelCase , **__UpperCAmelCase) def __snake_case ( self : List[Any] , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : Tuple): return self.generator.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase) def __snake_case ( self : List[str] , *__UpperCAmelCase : str , **__UpperCAmelCase : List[Any]): return self.generator.decode(*__UpperCAmelCase , **__UpperCAmelCase) def __snake_case ( self : str): a : Optional[int] = self.question_encoder def __snake_case ( self : Optional[int]): a : str = self.generator def __snake_case ( self : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[List[str]] = None , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : str = "longest" , __UpperCAmelCase : str = None , __UpperCAmelCase : bool = True , **__UpperCAmelCase : List[Any] , ): warnings.warn( "`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the " "regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` " "context manager to prepare your targets. See the documentation of your specific tokenizer for more " "details" , __UpperCAmelCase , ) if max_length is None: a : str = self.current_tokenizer.model_max_length a : int = self( __UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors=__UpperCAmelCase , max_length=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , **__UpperCAmelCase , ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: a : str = self.current_tokenizer.model_max_length a : int = self( text_target=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors=__UpperCAmelCase , padding=__UpperCAmelCase , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase , **__UpperCAmelCase , ) a : str = labels["input_ids"] return model_inputs
40
'''simple docstring''' import argparse import logging import os from pathlib import Path from typing import Any, Dict import pytorch_lightning as pl from pytorch_lightning.utilities import rank_zero_info from transformers import ( AdamW, AutoConfig, AutoModel, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoModelWithLMHead, AutoTokenizer, PretrainedConfig, PreTrainedTokenizer, ) from transformers.optimization import ( Adafactor, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.utils.versions import require_version A =logging.getLogger(__name__) require_version('pytorch_lightning>=1.0.4') A ={ 'base': AutoModel, 'sequence-classification': AutoModelForSequenceClassification, 'question-answering': AutoModelForQuestionAnswering, 'pretraining': AutoModelForPreTraining, 'token-classification': AutoModelForTokenClassification, 'language-modeling': AutoModelWithLMHead, 'summarization': AutoModelForSeqaSeqLM, 'translation': AutoModelForSeqaSeqLM, } # update this and the import above to support new schedulers from transformers.optimization A ={ 'linear': get_linear_schedule_with_warmup, 'cosine': get_cosine_schedule_with_warmup, 'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup, 'polynomial': get_polynomial_decay_schedule_with_warmup, # '': get_constant_schedule, # not supported for now # '': get_constant_schedule_with_warmup, # not supported for now } A =sorted(arg_to_scheduler.keys()) A ='{' + ', '.join(arg_to_scheduler_choices) + '}' class _a ( pl.LightningModule ): def __init__( self : List[str] , lowercase : argparse.Namespace , lowercase : List[Any]=None , lowercase : Dict="base" , lowercase : Optional[int]=None , lowercase : Dict=None , lowercase : Tuple=None , **lowercase : Optional[int] , ): '''simple docstring''' super().__init__() # TODO: move to self.save_hyperparameters() # self.save_hyperparameters() # can also expand arguments into trainer signature for easier reading self.save_hyperparameters(lowercase ) UpperCAmelCase = 0 UpperCAmelCase = Path(self.hparams.output_dir ) UpperCAmelCase = self.hparams.cache_dir if self.hparams.cache_dir else None if config is None: UpperCAmelCase = AutoConfig.from_pretrained( self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'''num_labels''': num_labels} if num_labels is not None else {}) , cache_dir=lowercase , **lowercase , ) else: UpperCAmelCase = config UpperCAmelCase = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''') for p in extra_model_params: if getattr(self.hparams , lowercase , lowercase ): assert hasattr(self.config , lowercase ), f"model config doesn't have a `{p}` attribute" setattr(self.config , lowercase , getattr(self.hparams , lowercase ) ) if tokenizer is None: UpperCAmelCase = AutoTokenizer.from_pretrained( self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=lowercase , ) else: UpperCAmelCase = tokenizer UpperCAmelCase = MODEL_MODES[mode] if model is None: UpperCAmelCase = self.model_type.from_pretrained( self.hparams.model_name_or_path , from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=lowercase , ) else: UpperCAmelCase = model def A ( self : List[Any] , *lowercase : List[str] , **lowercase : List[str] ): '''simple docstring''' UpperCAmelCase = self.model_type.from_pretrained(*lowercase , **lowercase ) def A ( self : Tuple ): '''simple docstring''' UpperCAmelCase = arg_to_scheduler[self.hparams.lr_scheduler] UpperCAmelCase = get_schedule_func( self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() ) UpperCAmelCase = {'''scheduler''': scheduler, '''interval''': '''step''', '''frequency''': 1} return scheduler def A ( self : str ): '''simple docstring''' UpperCAmelCase = self.model UpperCAmelCase = ['''bias''', '''LayerNorm.weight'''] UpperCAmelCase = [ { '''params''': [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay ) ], # check this named paramters '''weight_decay''': self.hparams.weight_decay, }, { '''params''': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0, }, ] if self.hparams.adafactor: UpperCAmelCase = Adafactor( lowercase , lr=self.hparams.learning_rate , scale_parameter=lowercase , relative_step=lowercase ) else: UpperCAmelCase = AdamW( lowercase , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon ) UpperCAmelCase = optimizer UpperCAmelCase = self.get_lr_scheduler() return [optimizer], [scheduler] def A ( self : List[Any] , lowercase : int , lowercase : List[str] ): '''simple docstring''' return self.validation_step(lowercase , lowercase ) def A ( self : List[Any] , lowercase : Tuple ): '''simple docstring''' return self.validation_end(lowercase ) def A ( self : List[Any] ): '''simple docstring''' UpperCAmelCase = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores UpperCAmelCase = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs def A ( self : List[str] , lowercase : Any ): '''simple docstring''' if stage == "test": UpperCAmelCase = len(self.test_dataloader().dataset ) else: UpperCAmelCase = self.get_dataloader('''train''' , self.hparams.train_batch_size , shuffle=lowercase ) UpperCAmelCase = len(self.train_dataloader().dataset ) def A ( self : List[str] , lowercase : str , lowercase : int , lowercase : bool = False ): '''simple docstring''' raise NotImplementedError('''You must implement this for your task''' ) def A ( self : Union[str, Any] ): '''simple docstring''' return self.train_loader def A ( self : Optional[Any] ): '''simple docstring''' return self.get_dataloader('''dev''' , self.hparams.eval_batch_size , shuffle=lowercase ) def A ( self : List[Any] ): '''simple docstring''' return self.get_dataloader('''test''' , self.hparams.eval_batch_size , shuffle=lowercase ) def A ( self : Any , lowercase : Union[str, Any] ): '''simple docstring''' return os.path.join( self.hparams.data_dir , '''cached_{}_{}_{}'''.format( lowercase , list(filter(lowercase , self.hparams.model_name_or_path.split('''/''' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , ) @pl.utilities.rank_zero_only def A ( self : List[str] , lowercase : Dict[str, Any] ): '''simple docstring''' UpperCAmelCase = self.output_dir.joinpath('''best_tfmr''' ) UpperCAmelCase = self.step_count self.model.save_pretrained(lowercase ) self.tokenizer.save_pretrained(lowercase ) @staticmethod def A ( lowercase : Optional[int] , lowercase : List[str] ): '''simple docstring''' parser.add_argument( '''--model_name_or_path''' , default=lowercase , type=lowercase , required=lowercase , help='''Path to pretrained model or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--config_name''' , default='''''' , type=lowercase , help='''Pretrained config name or path if not the same as model_name''' ) parser.add_argument( '''--tokenizer_name''' , default=lowercase , type=lowercase , help='''Pretrained tokenizer name or path if not the same as model_name''' , ) parser.add_argument( '''--cache_dir''' , default=str(Path(lowercase ).parent / '''test_run''' / '''cache''' ) , type=lowercase , help='''Where do you want to store the pre-trained models downloaded from huggingface.co''' , ) parser.add_argument( '''--encoder_layerdrop''' , type=lowercase , help='''Encoder layer dropout probability (Optional). Goes into model.config''' , ) parser.add_argument( '''--decoder_layerdrop''' , type=lowercase , help='''Decoder layer dropout probability (Optional). Goes into model.config''' , ) parser.add_argument( '''--dropout''' , type=lowercase , help='''Dropout probability (Optional). Goes into model.config''' , ) parser.add_argument( '''--attention_dropout''' , type=lowercase , help='''Attention dropout probability (Optional). Goes into model.config''' , ) parser.add_argument('''--learning_rate''' , default=5E-5 , type=lowercase , help='''The initial learning rate for Adam.''' ) parser.add_argument( '''--lr_scheduler''' , default='''linear''' , choices=lowercase , metavar=lowercase , type=lowercase , help='''Learning rate scheduler''' , ) parser.add_argument('''--weight_decay''' , default=0.0 , type=lowercase , help='''Weight decay if we apply some.''' ) parser.add_argument('''--adam_epsilon''' , default=1E-8 , type=lowercase , help='''Epsilon for Adam optimizer.''' ) parser.add_argument('''--warmup_steps''' , default=0 , type=lowercase , help='''Linear warmup over warmup_steps.''' ) parser.add_argument('''--num_workers''' , default=4 , type=lowercase , help='''kwarg passed to DataLoader''' ) parser.add_argument('''--num_train_epochs''' , dest='''max_epochs''' , default=3 , type=lowercase ) parser.add_argument('''--train_batch_size''' , default=32 , type=lowercase ) parser.add_argument('''--eval_batch_size''' , default=32 , type=lowercase ) parser.add_argument('''--adafactor''' , action='''store_true''' ) class _a ( pl.Callback ): def A ( self : Dict , lowercase : Optional[Any] , lowercase : List[Any] ): '''simple docstring''' if ( trainer.is_global_zero and trainer.global_rank == 0 ): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed. pl_module.model.rag.retriever.init_retrieval() # better to use hook functions. class _a ( pl.Callback ): def A ( self : Optional[int] , lowercase : Union[str, Any] , lowercase : Any ): '''simple docstring''' for name, param in pl_module.model.rag.named_parameters(): if param.grad is None: print(lowercase ) class _a ( pl.Callback ): def A ( self : Optional[int] , lowercase : Optional[int] , lowercase : Dict ): '''simple docstring''' UpperCAmelCase = trainer.lr_schedulers[0]['''scheduler'''] UpperCAmelCase = {f"lr_group_{i}": lr for i, lr in enumerate(lr_scheduler.get_lr() )} pl_module.logger.log_metrics(lowercase ) def A ( self : Tuple , lowercase : pl.Trainer , lowercase : pl.LightningModule ): '''simple docstring''' rank_zero_info('''***** Validation results *****''' ) UpperCAmelCase = trainer.callback_metrics # Log results for key in sorted(lowercase ): if key not in ["log", "progress_bar"]: rank_zero_info('''{} = {}\n'''.format(lowercase , str(metrics[key] ) ) ) def A ( self : Dict , lowercase : pl.Trainer , lowercase : pl.LightningModule ): '''simple docstring''' rank_zero_info('''***** Test results *****''' ) UpperCAmelCase = trainer.callback_metrics # Log and save results to file UpperCAmelCase = os.path.join(pl_module.hparams.output_dir , '''test_results.txt''' ) with open(lowercase , '''w''' ) as writer: for key in sorted(lowercase ): if key not in ["log", "progress_bar"]: rank_zero_info('''{} = {}\n'''.format(lowercase , str(metrics[key] ) ) ) writer.write('''{} = {}\n'''.format(lowercase , str(metrics[key] ) ) ) def snake_case_ (_a : int , _a : Optional[Any] ): # To allow all pl args uncomment the following line # parser = pl.Trainer.add_argparse_args(parser) parser.add_argument( '''--output_dir''' , default=str(Path(_a ).parent / '''test_run''' / '''model_checkpoints''' ) , type=_a , help='''The output directory where the model predictions and checkpoints will be written.''' , ) parser.add_argument( '''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , ) parser.add_argument( '''--fp16_opt_level''' , type=_a , default='''O2''' , help=( '''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].''' '''See details at https://nvidia.github.io/apex/amp.html''' ) , ) parser.add_argument('''--n_tpu_cores''' , dest='''tpu_cores''' , type=_a ) parser.add_argument('''--max_grad_norm''' , dest='''gradient_clip_val''' , default=1.0 , type=_a , help='''Max gradient norm''' ) parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' ) parser.add_argument('''--do_predict''' , action='''store_true''' , help='''Whether to run predictions on the test set.''' ) parser.add_argument( '''--gradient_accumulation_steps''' , dest='''accumulate_grad_batches''' , type=_a , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , ) parser.add_argument('''--seed''' , type=_a , default=4_2 , help='''random seed for initialization''' ) parser.add_argument( '''--data_dir''' , default=str(Path(_a ).parent / '''test_run''' / '''dummy-train-data''' ) , type=_a , help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''' , ) def snake_case_ (_a : BaseTransformer , _a : argparse.Namespace , _a : List[Any]=None , _a : Tuple=True , _a : int=[] , _a : Any=None , _a : int=None , **_a : Optional[Any] , ): pl.seed_everything(args.seed ) # init model UpperCAmelCase = Path(model.hparams.output_dir ) odir.mkdir(exist_ok=_a ) # add custom checkpoints if checkpoint_callback is None: UpperCAmelCase = pl.callbacks.ModelCheckpoint( filepath=args.output_dir , prefix='''checkpoint''' , monitor='''val_loss''' , mode='''min''' , save_top_k=1 ) if early_stopping_callback: extra_callbacks.append(_a ) if logging_callback is None: UpperCAmelCase = LoggingCallback() UpperCAmelCase = {} if args.fpaa: UpperCAmelCase = 1_6 if args.gpus > 1: UpperCAmelCase = '''auto''' UpperCAmelCase = '''ddp''' UpperCAmelCase = args.accumulate_grad_batches UpperCAmelCase = None UpperCAmelCase = '''auto''' UpperCAmelCase = pl.Trainer.from_argparse_args( _a , weights_summary=_a , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=_a , val_check_interval=1 , num_sanity_val_steps=2 , **_a , ) if args.do_train: trainer.fit(_a ) else: print('''RAG modeling tests with new set functions successfuly executed!''' ) return trainer
34
0
'''simple docstring''' def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Optional[int]: stooge(UpperCamelCase , 0 , len(UpperCamelCase ) - 1 ) return arr def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any: if i >= h: return # If first element is smaller than the last then swap them if arr[i] > arr[h]: lowerCamelCase__ , lowerCamelCase__ : List[Any] = arr[h], arr[i] # If there are more than 2 elements in the array if h - i + 1 > 2: lowerCamelCase__ : Optional[int] = (int)((h - i + 1) / 3 ) # Recursively sort first 2/3 elements stooge(UpperCamelCase , UpperCamelCase , (h - t) ) # Recursively sort last 2/3 elements stooge(UpperCamelCase , i + t , (UpperCamelCase) ) # Recursively sort first 2/3 elements stooge(UpperCamelCase , UpperCamelCase , (h - t) ) if __name__ == "__main__": _A : Union[str, Any] =input('''Enter numbers separated by a comma:\n''').strip() _A : int =[int(item) for item in user_input.split(''',''')] print(stooge_sort(unsorted))
41
'''simple docstring''' from queue import PriorityQueue from typing import Any import numpy as np def snake_case_ (_a : dict , _a : str , _a : set , _a : set , _a : dict , _a : dict , _a : PriorityQueue , _a : dict , _a : float | int , ): for nxt, d in graph[v]: if nxt in visited_forward: continue UpperCAmelCase = cst_fwd.get(_a , np.inf ) UpperCAmelCase = cst_fwd[v] + d if new_cost_f < old_cost_f: queue.put((new_cost_f, nxt) ) UpperCAmelCase = new_cost_f UpperCAmelCase = v if nxt in visited_backward: if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: UpperCAmelCase = cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance def snake_case_ (_a : str , _a : str , _a : dict , _a : dict ): UpperCAmelCase = -1 UpperCAmelCase = set() UpperCAmelCase = set() UpperCAmelCase = {source: 0} UpperCAmelCase = {destination: 0} UpperCAmelCase = {source: None} UpperCAmelCase = {destination: None} UpperCAmelCase = PriorityQueue() UpperCAmelCase = PriorityQueue() UpperCAmelCase = np.inf queue_forward.put((0, source) ) queue_backward.put((0, destination) ) if source == destination: return 0 while not queue_forward.empty() and not queue_backward.empty(): UpperCAmelCase , UpperCAmelCase = queue_forward.get() visited_forward.add(_a ) UpperCAmelCase , UpperCAmelCase = queue_backward.get() visited_backward.add(_a ) UpperCAmelCase = pass_and_relaxation( _a , _a , _a , _a , _a , _a , _a , _a , _a , ) UpperCAmelCase = pass_and_relaxation( _a , _a , _a , _a , _a , _a , _a , _a , _a , ) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break if shortest_distance != np.inf: UpperCAmelCase = shortest_distance return shortest_path_distance A ={ 'B': [['C', 1]], 'C': [['D', 1]], 'D': [['F', 1]], 'E': [['B', 1], ['G', 2]], 'F': [], 'G': [['F', 1]], } A ={ 'B': [['E', 1]], 'C': [['B', 1]], 'D': [['C', 1]], 'F': [['D', 1], ['G', 1]], 'E': [[None, np.inf]], 'G': [['E', 2]], } if __name__ == "__main__": import doctest doctest.testmod()
34
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowercase : str = {"configuration_unispeech": ["UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP", "UniSpeechConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Any = [ "UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST", "UniSpeechForCTC", "UniSpeechForPreTraining", "UniSpeechForSequenceClassification", "UniSpeechModel", "UniSpeechPreTrainedModel", ] if TYPE_CHECKING: from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_unispeech import ( UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST, UniSpeechForCTC, UniSpeechForPreTraining, UniSpeechForSequenceClassification, UniSpeechModel, UniSpeechPreTrainedModel, ) else: import sys lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
42
'''simple docstring''' import argparse import json from pathlib import Path import torch import torchaudio from datasets import load_dataset from huggingface_hub import hf_hub_download from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification from transformers.utils import logging logging.set_verbosity_info() A =logging.get_logger(__name__) def snake_case_ (_a : List[str] ): UpperCAmelCase = ASTConfig() if "10-10" in model_name: pass elif "speech-commands" in model_name: UpperCAmelCase = 1_2_8 elif "12-12" in model_name: UpperCAmelCase = 1_2 UpperCAmelCase = 1_2 elif "14-14" in model_name: UpperCAmelCase = 1_4 UpperCAmelCase = 1_4 elif "16-16" in model_name: UpperCAmelCase = 1_6 UpperCAmelCase = 1_6 else: raise ValueError('''Model not supported''' ) UpperCAmelCase = '''huggingface/label-files''' if "speech-commands" in model_name: UpperCAmelCase = 3_5 UpperCAmelCase = '''speech-commands-v2-id2label.json''' else: UpperCAmelCase = 5_2_7 UpperCAmelCase = '''audioset-id2label.json''' UpperCAmelCase = json.load(open(hf_hub_download(_a , _a , repo_type='''dataset''' ) , '''r''' ) ) UpperCAmelCase = {int(_a ): v for k, v in idalabel.items()} UpperCAmelCase = idalabel UpperCAmelCase = {v: k for k, v in idalabel.items()} return config def snake_case_ (_a : Tuple ): if "module.v" in name: UpperCAmelCase = name.replace('''module.v''' , '''audio_spectrogram_transformer''' ) if "cls_token" in name: UpperCAmelCase = name.replace('''cls_token''' , '''embeddings.cls_token''' ) if "dist_token" in name: UpperCAmelCase = name.replace('''dist_token''' , '''embeddings.distillation_token''' ) if "pos_embed" in name: UpperCAmelCase = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' ) if "patch_embed.proj" in name: UpperCAmelCase = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) # transformer blocks if "blocks" in name: UpperCAmelCase = name.replace('''blocks''' , '''encoder.layer''' ) if "attn.proj" in name: UpperCAmelCase = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: UpperCAmelCase = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: UpperCAmelCase = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: UpperCAmelCase = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: UpperCAmelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: UpperCAmelCase = name.replace('''mlp.fc2''' , '''output.dense''' ) # final layernorm if "audio_spectrogram_transformer.norm" in name: UpperCAmelCase = name.replace('''audio_spectrogram_transformer.norm''' , '''audio_spectrogram_transformer.layernorm''' ) # classifier head if "module.mlp_head.0" in name: UpperCAmelCase = name.replace('''module.mlp_head.0''' , '''classifier.layernorm''' ) if "module.mlp_head.1" in name: UpperCAmelCase = name.replace('''module.mlp_head.1''' , '''classifier.dense''' ) return name def snake_case_ (_a : Dict , _a : List[Any] ): for key in orig_state_dict.copy().keys(): UpperCAmelCase = orig_state_dict.pop(_a ) if "qkv" in key: UpperCAmelCase = key.split('''.''' ) UpperCAmelCase = int(key_split[3] ) UpperCAmelCase = config.hidden_size if "weight" in key: UpperCAmelCase = val[:dim, :] UpperCAmelCase = val[dim : dim * 2, :] UpperCAmelCase = val[-dim:, :] else: UpperCAmelCase = val[:dim] UpperCAmelCase = val[dim : dim * 2] UpperCAmelCase = val[-dim:] else: UpperCAmelCase = val return orig_state_dict def snake_case_ (_a : Tuple ): UpperCAmelCase = [ '''module.v.head.weight''', '''module.v.head.bias''', '''module.v.head_dist.weight''', '''module.v.head_dist.bias''', ] for k in ignore_keys: state_dict.pop(_a , _a ) @torch.no_grad() def snake_case_ (_a : int , _a : Union[str, Any] , _a : Dict=False ): UpperCAmelCase = get_audio_spectrogram_transformer_config(_a ) UpperCAmelCase = { '''ast-finetuned-audioset-10-10-0.4593''': ( '''https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1''' ), '''ast-finetuned-audioset-10-10-0.450''': ( '''https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1''' ), '''ast-finetuned-audioset-10-10-0.448''': ( '''https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1''' ), '''ast-finetuned-audioset-10-10-0.448-v2''': ( '''https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1''' ), '''ast-finetuned-audioset-12-12-0.447''': ( '''https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1''' ), '''ast-finetuned-audioset-14-14-0.443''': ( '''https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1''' ), '''ast-finetuned-audioset-16-16-0.442''': ( '''https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1''' ), '''ast-finetuned-speech-commands-v2''': ( '''https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1''' ), } # load original state_dict UpperCAmelCase = model_name_to_url[model_name] UpperCAmelCase = torch.hub.load_state_dict_from_url(_a , map_location='''cpu''' ) # remove some keys remove_keys(_a ) # rename some keys UpperCAmelCase = convert_state_dict(_a , _a ) # load 🤗 model UpperCAmelCase = ASTForAudioClassification(_a ) model.eval() model.load_state_dict(_a ) # verify outputs on dummy input # source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62 UpperCAmelCase = -4.267_7393 if '''speech-commands''' not in model_name else -6.84_5978 UpperCAmelCase = 4.568_9974 if '''speech-commands''' not in model_name else 5.565_4526 UpperCAmelCase = 1_0_2_4 if '''speech-commands''' not in model_name else 1_2_8 UpperCAmelCase = ASTFeatureExtractor(mean=_a , std=_a , max_length=_a ) if "speech-commands" in model_name: UpperCAmelCase = load_dataset('''speech_commands''' , '''v0.02''' , split='''validation''' ) UpperCAmelCase = dataset[0]['''audio''']['''array'''] else: UpperCAmelCase = hf_hub_download( repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' , ) UpperCAmelCase , UpperCAmelCase = torchaudio.load(_a ) UpperCAmelCase = waveform.squeeze().numpy() UpperCAmelCase = feature_extractor(_a , sampling_rate=1_6_0_0_0 , return_tensors='''pt''' ) # forward pass UpperCAmelCase = model(**_a ) UpperCAmelCase = outputs.logits if model_name == "ast-finetuned-audioset-10-10-0.4593": UpperCAmelCase = torch.tensor([-0.8760, -7.0042, -8.6602] ) elif model_name == "ast-finetuned-audioset-10-10-0.450": UpperCAmelCase = torch.tensor([-1.1986, -7.0903, -8.2718] ) elif model_name == "ast-finetuned-audioset-10-10-0.448": UpperCAmelCase = torch.tensor([-2.6128, -8.0080, -9.4344] ) elif model_name == "ast-finetuned-audioset-10-10-0.448-v2": UpperCAmelCase = torch.tensor([-1.5080, -7.4534, -8.8917] ) elif model_name == "ast-finetuned-audioset-12-12-0.447": UpperCAmelCase = torch.tensor([-0.5050, -6.5833, -8.0843] ) elif model_name == "ast-finetuned-audioset-14-14-0.443": UpperCAmelCase = torch.tensor([-0.3826, -7.0336, -8.2413] ) elif model_name == "ast-finetuned-audioset-16-16-0.442": UpperCAmelCase = torch.tensor([-1.2113, -6.9101, -8.3470] ) elif model_name == "ast-finetuned-speech-commands-v2": UpperCAmelCase = torch.tensor([6.1589, -8.0566, -8.7984] ) else: raise ValueError('''Unknown model name''' ) if not torch.allclose(logits[0, :3] , _a , atol=1E-4 ): raise ValueError('''Logits don\'t match''' ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: Path(_a ).mkdir(exist_ok=_a ) print(F"Saving model {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(_a ) print(F"Saving feature extractor to {pytorch_dump_folder_path}" ) feature_extractor.save_pretrained(_a ) if push_to_hub: print('''Pushing model and feature extractor to the hub...''' ) model.push_to_hub(F"MIT/{model_name}" ) feature_extractor.push_to_hub(F"MIT/{model_name}" ) if __name__ == "__main__": A =argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='ast-finetuned-audioset-10-10-0.4593', type=str, help='Name of the Audio Spectrogram Transformer model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) A =parser.parse_args() convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
34
0
from __future__ import annotations from PIL import Image # Define glider example __lowercase = [ [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], ] # Define blinker example __lowercase = [[0, 1, 0], [0, 1, 0], [0, 1, 0]] def lowerCamelCase ( SCREAMING_SNAKE_CASE ): '''simple docstring''' __UpperCamelCase :int = [] for i in range(len(SCREAMING_SNAKE_CASE ) ): __UpperCamelCase :Dict = [] for j in range(len(cells[i] ) ): # Get the number of live neighbours __UpperCamelCase :List[str] = 0 if i > 0 and j > 0: neighbour_count += cells[i - 1][j - 1] if i > 0: neighbour_count += cells[i - 1][j] if i > 0 and j < len(cells[i] ) - 1: neighbour_count += cells[i - 1][j + 1] if j > 0: neighbour_count += cells[i][j - 1] if j < len(cells[i] ) - 1: neighbour_count += cells[i][j + 1] if i < len(SCREAMING_SNAKE_CASE ) - 1 and j > 0: neighbour_count += cells[i + 1][j - 1] if i < len(SCREAMING_SNAKE_CASE ) - 1: neighbour_count += cells[i + 1][j] if i < len(SCREAMING_SNAKE_CASE ) - 1 and j < len(cells[i] ) - 1: neighbour_count += cells[i + 1][j + 1] # Rules of the game of life (excerpt from Wikipedia): # 1. Any live cell with two or three live neighbours survives. # 2. Any dead cell with three live neighbours becomes a live cell. # 3. All other live cells die in the next generation. # Similarly, all other dead cells stay dead. __UpperCamelCase :List[str] = cells[i][j] == 1 if ( (alive and 2 <= neighbour_count <= 3) or not alive and neighbour_count == 3 ): next_generation_row.append(1 ) else: next_generation_row.append(0 ) next_generation.append(SCREAMING_SNAKE_CASE ) return next_generation def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' __UpperCamelCase :Optional[Any] = [] for _ in range(SCREAMING_SNAKE_CASE ): # Create output image __UpperCamelCase :Dict = Image.new('''RGB''' , (len(cells[0] ), len(SCREAMING_SNAKE_CASE )) ) __UpperCamelCase :Any = img.load() # Save cells to image for x in range(len(SCREAMING_SNAKE_CASE ) ): for y in range(len(cells[0] ) ): __UpperCamelCase :Optional[Any] = 255 - cells[y][x] * 255 __UpperCamelCase :int = (colour, colour, colour) # Save image images.append(SCREAMING_SNAKE_CASE ) __UpperCamelCase :Optional[int] = new_generation(SCREAMING_SNAKE_CASE ) return images if __name__ == "__main__": __lowercase = generate_images(GLIDER, 16) images[0].save('''out.gif''', save_all=True, append_images=images[1:])
43
'''simple docstring''' from __future__ import annotations def snake_case_ (_a : Dict , _a : str , _a : Optional[Any] , _a : List[str] ): # noqa: E741 while r - l > 1: UpperCAmelCase = (l + r) // 2 if v[m] >= key: UpperCAmelCase = m else: UpperCAmelCase = m # noqa: E741 return r def snake_case_ (_a : list[int] ): if len(_a ) == 0: return 0 UpperCAmelCase = [0] * len(_a ) UpperCAmelCase = 1 UpperCAmelCase = v[0] for i in range(1 , len(_a ) ): if v[i] < tail[0]: UpperCAmelCase = v[i] elif v[i] > tail[length - 1]: UpperCAmelCase = v[i] length += 1 else: UpperCAmelCase = v[i] return length if __name__ == "__main__": import doctest doctest.testmod()
34
0
"""simple docstring""" from manim import * class __A ( SCREAMING_SNAKE_CASE_ ): def __A ( self ): _lowerCAmelCase : str = Rectangle(height=0.5 , width=0.5 ) _lowerCAmelCase : List[Any] = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 ) _lowerCAmelCase : Tuple = Rectangle(height=0.2_5 , width=0.2_5 ) _lowerCAmelCase : Optional[int] = [mem.copy() for i in range(6 )] _lowerCAmelCase : Optional[Any] = [mem.copy() for i in range(6 )] _lowerCAmelCase : List[str] = VGroup(*a__ ).arrange(a__ , buff=0 ) _lowerCAmelCase : Dict = VGroup(*a__ ).arrange(a__ , buff=0 ) _lowerCAmelCase : str = VGroup(a__ , a__ ).arrange(a__ , buff=0 ) _lowerCAmelCase : str = Text("""CPU""" , font_size=24 ) _lowerCAmelCase : str = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(a__ ) _lowerCAmelCase : Union[str, Any] = [mem.copy() for i in range(4 )] _lowerCAmelCase : str = VGroup(*a__ ).arrange(a__ , buff=0 ) _lowerCAmelCase : str = Text("""GPU""" , font_size=24 ) _lowerCAmelCase : str = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ ) gpu.move_to([-1, -1, 0] ) self.add(a__ ) _lowerCAmelCase : Optional[Any] = [mem.copy() for i in range(6 )] _lowerCAmelCase : Optional[Any] = VGroup(*a__ ).arrange(a__ , buff=0 ) _lowerCAmelCase : int = Text("""Model""" , font_size=24 ) _lowerCAmelCase : int = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ ) model.move_to([3, -1.0, 0] ) self.add(a__ ) _lowerCAmelCase : Optional[Any] = [] _lowerCAmelCase : str = [] for i, rect in enumerate(a__ ): _lowerCAmelCase : int = fill.copy().set_fill(a__ , opacity=0.8 ) target.move_to(a__ ) model_arr.append(a__ ) _lowerCAmelCase : Dict = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(a__ , opacity=0.8 ) cpu_target.move_to(cpu_left_col_base[i] ) model_cpu_arr.append(a__ ) self.add(*a__ , *a__ ) _lowerCAmelCase : str = [meta_mem.copy() for i in range(6 )] _lowerCAmelCase : Dict = [meta_mem.copy() for i in range(6 )] _lowerCAmelCase : Optional[int] = VGroup(*a__ ).arrange(a__ , buff=0 ) _lowerCAmelCase : Optional[Any] = VGroup(*a__ ).arrange(a__ , buff=0 ) _lowerCAmelCase : int = VGroup(a__ , a__ ).arrange(a__ , buff=0 ) _lowerCAmelCase : Dict = Text("""Disk""" , font_size=24 ) _lowerCAmelCase : Optional[int] = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ ) disk.move_to([-4, -1.2_5, 0] ) self.add(a__ , a__ ) _lowerCAmelCase : str = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) _lowerCAmelCase : Dict = MarkupText( F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(a__ , a__ ) _lowerCAmelCase : List[str] = MarkupText( F"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , ) blue_text.next_to(a__ , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(a__ ) _lowerCAmelCase : List[str] = MarkupText( F"Now watch as an input is passed through the model\nand how the memory is utilized and handled." , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(a__ ) ) _lowerCAmelCase : Union[str, Any] = Square(0.3 ) input.set_fill(a__ , opacity=1.0 ) input.set_stroke(width=0.0 ) input.next_to(model_base[0] , a__ , buff=0.5 ) self.play(Write(a__ ) ) input.generate_target() input.target.next_to(model_arr[0] , direction=a__ , buff=0.0_2 ) self.play(MoveToTarget(a__ ) ) self.play(FadeOut(a__ ) ) _lowerCAmelCase : Dict = Arrow(start=a__ , end=a__ , color=a__ , buff=0.5 ) a.next_to(model_arr[0].get_left() , a__ , buff=0.2 ) model_cpu_arr[0].generate_target() model_cpu_arr[0].target.move_to(gpu_rect[0] ) _lowerCAmelCase : Optional[Any] = MarkupText( F"As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back." , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(a__ , run_time=3 ) ) _lowerCAmelCase : Optional[int] = {"""run_time""": 1, """fade_in""": True, """fade_out""": True, """buff""": 0.0_2} self.play( Write(a__ ) , Circumscribe(model_arr[0] , color=a__ , **a__ ) , Circumscribe(model_cpu_arr[0] , color=a__ , **a__ ) , Circumscribe(gpu_rect[0] , color=a__ , **a__ ) , ) self.play(MoveToTarget(model_cpu_arr[0] ) ) _lowerCAmelCase : Any = a.copy() for i in range(6 ): a_c.next_to(model_arr[i].get_right() + 0.0_2 , a__ , buff=0.2 ) input.generate_target() input.target.move_to(model_arr[i].get_right() + 0.0_2 ) _lowerCAmelCase : Union[str, Any] = AnimationGroup( FadeOut(a__ , run_time=0.5 ) , MoveToTarget(a__ , run_time=0.5 ) , FadeIn(a__ , run_time=0.5 ) , lag_ratio=0.2 ) self.play(a__ ) model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[i] ) if i < 5: model_cpu_arr[i + 1].generate_target() model_cpu_arr[i + 1].target.move_to(gpu_rect[0] ) if i >= 1: _lowerCAmelCase : int = 0.7 self.play( Circumscribe(model_arr[i] , **a__ ) , Circumscribe(cpu_left_col_base[i] , **a__ ) , Circumscribe(cpu_left_col_base[i + 1] , color=a__ , **a__ ) , Circumscribe(gpu_rect[0] , color=a__ , **a__ ) , Circumscribe(model_arr[i + 1] , color=a__ , **a__ ) , ) if i < 1: self.play( MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , ) else: self.play( MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , ) else: model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] ) input.generate_target() input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.0_2 , buff=0.2 ) self.play( Circumscribe(model_arr[-1] , color=a__ , **a__ ) , Circumscribe(cpu_left_col_base[-1] , color=a__ , **a__ ) , Circumscribe(gpu_rect[0] , color=a__ , **a__ ) , ) self.play(MoveToTarget(model_cpu_arr[i] ) ) _lowerCAmelCase : Any = a_c _lowerCAmelCase : Any = a_c.copy() input.generate_target() input.target.next_to(model_base[-1] , RIGHT + 0.0_2 , buff=0.5 ) self.play( FadeOut(a__ ) , FadeOut(a__ , run_time=0.5 ) , ) _lowerCAmelCase : List[str] = MarkupText(F"Inference on a model too large for GPU memory\nis successfully completed." , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(a__ , run_time=3 ) , MoveToTarget(a__ ) ) self.wait()
44
'''simple docstring''' def snake_case_ (_a : str , _a : str ): UpperCAmelCase = len(_a ) + 1 UpperCAmelCase = len(_a ) + 1 # dp is a 2d matrix where dp[i][j] denotes whether prefix string of # length i of input_string matches with prefix string of length j of # given pattern. # "dp" stands for dynamic programming. UpperCAmelCase = [[0 for i in range(_a )] for j in range(_a )] # since string of zero length match pattern of zero length UpperCAmelCase = 1 # since pattern of zero length will never match with string of non-zero length for i in range(1 , _a ): UpperCAmelCase = 0 # since string of zero length will match with pattern where there # is at least one * alternatively for j in range(1 , _a ): UpperCAmelCase = dp[0][j - 2] if pattern[j - 1] == '''*''' else 0 # now using bottom-up approach to find for all remaining lengths for i in range(1 , _a ): for j in range(1 , _a ): if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".": UpperCAmelCase = dp[i - 1][j - 1] elif pattern[j - 1] == "*": if dp[i][j - 2] == 1: UpperCAmelCase = 1 elif pattern[j - 2] in (input_string[i - 1], "."): UpperCAmelCase = dp[i - 1][j] else: UpperCAmelCase = 0 else: UpperCAmelCase = 0 return bool(dp[-1][-1] ) if __name__ == "__main__": import doctest doctest.testmod() # inputing the strings # input_string = input("input a string :") # pattern = input("input a pattern :") A ='aab' A ='c*a*b' # using function to check whether given string matches the given pattern if match_pattern(input_string, pattern): print(f"""{input_string} matches the given pattern {pattern}""") else: print(f"""{input_string} does not match with the given pattern {pattern}""")
34
0
"""simple docstring""" from __future__ import annotations def lowercase ( lowerCAmelCase__ : list[int] , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : list[list[str]] , lowerCAmelCase__ : int , ) -> None: __a = len(lowerCAmelCase__ ) # If row is equal to the size of the board it means there are a queen in each row in # the current board (possible_board) if row == n: # We convert the variable possible_board that looks like this: [1, 3, 0, 2] to # this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . '] boards.append(['''. ''' * i + '''Q ''' + '''. ''' * (n - 1 - i) for i in possible_board] ) return # We iterate each column in the row to find all possible results in each row for col in range(lowerCAmelCase__ ): # We apply that we learned previously. First we check that in the current board # (possible_board) there are not other same value because if there is it means # that there are a collision in vertical. Then we apply the two formulas we # learned before: # # 45º: y - x = b or 45: row - col = b # 135º: y + x = b or row + col = b. # # And we verify if the results of this two formulas not exist in their variables # respectively. (diagonal_right_collisions, diagonal_left_collisions) # # If any or these are True it means there is a collision so we continue to the # next value in the for loop. if ( col in possible_board or row - col in diagonal_right_collisions or row + col in diagonal_left_collisions ): continue # If it is False we call dfs function again and we update the inputs depth_first_search( [*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , lowerCAmelCase__ , lowerCAmelCase__ , ) def lowercase ( lowerCAmelCase__ : int ) -> None: __a = [] depth_first_search([] , [] , [] , lowerCAmelCase__ , lowerCAmelCase__ ) # Print all the boards for board in boards: for column in board: print(lowerCAmelCase__ ) print('''''' ) print(len(lowerCAmelCase__ ) , '''solutions were found.''' ) if __name__ == "__main__": import doctest doctest.testmod() n_queens_solution(4)
45
'''simple docstring''' import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, PerceiverTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): A ='pt' elif is_tf_available(): A ='tf' else: A ='jax' class _a ( __a , unittest.TestCase ): __a : Optional[Any] = PerceiverTokenizer __a : str = False def A ( self : Union[str, Any] ): '''simple docstring''' super().setUp() UpperCAmelCase = PerceiverTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def A ( self : Optional[int] ): '''simple docstring''' return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' ) def A ( self : Union[str, Any] , **lowercase : int ): '''simple docstring''' return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase ) def A ( self : Tuple , lowercase : str , lowercase : List[str]=False , lowercase : Union[str, Any]=20 , lowercase : Union[str, Any]=5 ): '''simple docstring''' UpperCAmelCase = [] for i in range(len(lowercase ) ): try: UpperCAmelCase = tokenizer.decode([i] , clean_up_tokenization_spaces=lowercase ) except UnicodeDecodeError: pass toks.append((i, tok) ) UpperCAmelCase = list(filter(lambda lowercase : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , lowercase ) ) UpperCAmelCase = list(filter(lambda lowercase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowercase ) , lowercase ) ) if max_length is not None and len(lowercase ) > max_length: UpperCAmelCase = toks[:max_length] if min_length is not None and len(lowercase ) < min_length and len(lowercase ) > 0: while len(lowercase ) < min_length: UpperCAmelCase = toks + toks # toks_str = [t[1] for t in toks] UpperCAmelCase = [t[0] for t in toks] # Ensure consistency UpperCAmelCase = tokenizer.decode(lowercase , clean_up_tokenization_spaces=lowercase ) if " " not in output_txt and len(lowercase ) > 1: UpperCAmelCase = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowercase ) + ''' ''' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowercase ) ) if with_prefix_space: UpperCAmelCase = ''' ''' + output_txt UpperCAmelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase ) return output_txt, output_ids def A ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase = self.perceiver_tokenizer UpperCAmelCase = '''Unicode €.''' UpperCAmelCase = tokenizer(lowercase ) UpperCAmelCase = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5] self.assertEqual(encoded['''input_ids'''] , lowercase ) # decoding UpperCAmelCase = tokenizer.decode(lowercase ) self.assertEqual(lowercase , '''[CLS]Unicode €.[SEP]''' ) UpperCAmelCase = tokenizer('''e è é ê ë''' ) UpperCAmelCase = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5] self.assertEqual(encoded['''input_ids'''] , lowercase ) # decoding UpperCAmelCase = tokenizer.decode(lowercase ) self.assertEqual(lowercase , '''[CLS]e è é ê ë[SEP]''' ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' ) def A ( self : str ): '''simple docstring''' UpperCAmelCase = self.perceiver_tokenizer UpperCAmelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] # fmt: off UpperCAmelCase = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0] # fmt: on UpperCAmelCase = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase ) self.assertIsInstance(lowercase , lowercase ) if FRAMEWORK != "jax": UpperCAmelCase = list(batch.input_ids.numpy()[0] ) else: UpperCAmelCase = list(batch.input_ids.tolist()[0] ) self.assertListEqual(lowercase , lowercase ) self.assertEqual((2, 38) , batch.input_ids.shape ) self.assertEqual((2, 38) , batch.attention_mask.shape ) def A ( self : str ): '''simple docstring''' UpperCAmelCase = self.perceiver_tokenizer UpperCAmelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] UpperCAmelCase = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase ) # check if input_ids are returned and no decoder_input_ids self.assertIn('''input_ids''' , lowercase ) self.assertIn('''attention_mask''' , lowercase ) self.assertNotIn('''decoder_input_ids''' , lowercase ) self.assertNotIn('''decoder_attention_mask''' , lowercase ) def A ( self : Dict ): '''simple docstring''' UpperCAmelCase = self.perceiver_tokenizer UpperCAmelCase = [ '''Summary of the text.''', '''Another summary.''', ] UpperCAmelCase = tokenizer( text_target=lowercase , max_length=32 , padding='''max_length''' , truncation=lowercase , return_tensors=lowercase ) self.assertEqual(32 , targets['''input_ids'''].shape[1] ) def A ( self : int ): '''simple docstring''' UpperCAmelCase = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test UpperCAmelCase = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): # Isolate this from the other tests because we save additional tokens/etc UpperCAmelCase = tempfile.mkdtemp() UpperCAmelCase = ''' He is very happy, UNwant\u00E9d,running''' UpperCAmelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase ) tokenizer.save_pretrained(lowercase ) UpperCAmelCase = tokenizer.__class__.from_pretrained(lowercase ) UpperCAmelCase = after_tokenizer.encode(lowercase , add_special_tokens=lowercase ) self.assertListEqual(lowercase , lowercase ) shutil.rmtree(lowercase ) UpperCAmelCase = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): # Isolate this from the other tests because we save additional tokens/etc UpperCAmelCase = tempfile.mkdtemp() UpperCAmelCase = ''' He is very happy, UNwant\u00E9d,running''' tokenizer.add_tokens(['''bim''', '''bambam'''] ) UpperCAmelCase = tokenizer.additional_special_tokens additional_special_tokens.append('''new_additional_special_token''' ) tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} ) UpperCAmelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase ) tokenizer.save_pretrained(lowercase ) UpperCAmelCase = tokenizer.__class__.from_pretrained(lowercase ) UpperCAmelCase = after_tokenizer.encode(lowercase , add_special_tokens=lowercase ) self.assertListEqual(lowercase , lowercase ) self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) UpperCAmelCase = tokenizer.__class__.from_pretrained(lowercase , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(lowercase ) def A ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(lowercase ) with open(os.path.join(lowercase , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file: UpperCAmelCase = json.load(lowercase ) with open(os.path.join(lowercase , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file: UpperCAmelCase = json.load(lowercase ) UpperCAmelCase = [f"<extra_id_{i}>" for i in range(125 )] UpperCAmelCase = added_tokens_extra_ids + [ '''an_additional_special_token''' ] UpperCAmelCase = added_tokens_extra_ids + [ '''an_additional_special_token''' ] with open(os.path.join(lowercase , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile: json.dump(lowercase , lowercase ) with open(os.path.join(lowercase , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile: json.dump(lowercase , lowercase ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files UpperCAmelCase = tokenizer_class.from_pretrained( lowercase , ) self.assertIn( '''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens ) self.assertEqual( ['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained UpperCAmelCase = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=lowercase )] UpperCAmelCase = tokenizer_class.from_pretrained( lowercase , additional_special_tokens=lowercase , ) self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens ) self.assertEqual( ['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , ) def A ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase = self.perceiver_tokenizer self.assertEqual(tokenizer.decode([178] ) , '''�''' ) def A ( self : Union[str, Any] ): '''simple docstring''' pass def A ( self : Any ): '''simple docstring''' pass def A ( self : Dict ): '''simple docstring''' pass def A ( self : str ): '''simple docstring''' pass def A ( self : List[str] ): '''simple docstring''' UpperCAmelCase = self.get_tokenizers(fast=lowercase , do_lower_case=lowercase ) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): UpperCAmelCase = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]'''] UpperCAmelCase = tokenizer.convert_tokens_to_string(lowercase ) self.assertIsInstance(lowercase , lowercase )
34
0
"""simple docstring""" import sys import webbrowser import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": print("Googling.....") SCREAMING_SNAKE_CASE__ = "https://www.google.com/search?q=" + " ".join(sys.argv[1:]) SCREAMING_SNAKE_CASE__ = requests.get(url, headers={"UserAgent": UserAgent().random}) # res.raise_for_status() with open("project1a.html", "wb") as out_file: # only for knowing the class for data in res.iter_content(10_000): out_file.write(data) SCREAMING_SNAKE_CASE__ = BeautifulSoup(res.text, "html.parser") SCREAMING_SNAKE_CASE__ = list(soup.select(".eZt8xd"))[:5] print(len(links)) for link in links: if link.text == "Maps": webbrowser.open(link.get("href")) else: webbrowser.open(f'https://google.com{link.get("href")}')
46
'''simple docstring''' import os from distutils.util import strtobool def snake_case_ (_a : Union[str, Any] , _a : List[Any] ): for e in env_keys: UpperCAmelCase = int(os.environ.get(_a , -1 ) ) if val >= 0: return val return default def snake_case_ (_a : Dict , _a : Any=False ): UpperCAmelCase = os.environ.get(_a , str(_a ) ) return strtobool(_a ) == 1 # As its name indicates `strtobool` actually returns an int... def snake_case_ (_a : str , _a : Optional[Any]="no" ): UpperCAmelCase = os.environ.get(_a , str(_a ) ) return value
34
0
'''simple docstring''' import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class A__ : def __init__( self : List[Any] , _a : List[str] , _a : Optional[Any]=2 , _a : List[Any]=8 , _a : Tuple=True , _a : Dict=True , _a : Any=True , _a : int=True , _a : List[str]=99 , _a : int=16 , _a : Union[str, Any]=5 , _a : Optional[int]=2 , _a : Optional[Any]=36 , _a : List[str]="gelu" , _a : Any=0.0 , _a : str=0.0 , _a : Optional[Any]=512 , _a : Tuple=16 , _a : Optional[int]=2 , _a : int=0.02 , _a : int=3 , _a : Optional[Any]=4 , _a : Dict=None , ) -> str: '''simple docstring''' _SCREAMING_SNAKE_CASE =parent _SCREAMING_SNAKE_CASE =batch_size _SCREAMING_SNAKE_CASE =seq_length _SCREAMING_SNAKE_CASE =is_training _SCREAMING_SNAKE_CASE =use_input_mask _SCREAMING_SNAKE_CASE =use_token_type_ids _SCREAMING_SNAKE_CASE =use_labels _SCREAMING_SNAKE_CASE =vocab_size _SCREAMING_SNAKE_CASE =hidden_size _SCREAMING_SNAKE_CASE =num_hidden_layers _SCREAMING_SNAKE_CASE =num_attention_heads _SCREAMING_SNAKE_CASE =intermediate_size _SCREAMING_SNAKE_CASE =hidden_act _SCREAMING_SNAKE_CASE =hidden_dropout_prob _SCREAMING_SNAKE_CASE =attention_probs_dropout_prob _SCREAMING_SNAKE_CASE =max_position_embeddings _SCREAMING_SNAKE_CASE =type_vocab_size _SCREAMING_SNAKE_CASE =type_sequence_label_size _SCREAMING_SNAKE_CASE =initializer_range _SCREAMING_SNAKE_CASE =num_labels _SCREAMING_SNAKE_CASE =num_choices _SCREAMING_SNAKE_CASE =scope def A ( self : Any ) -> str: '''simple docstring''' _SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _SCREAMING_SNAKE_CASE =None if self.use_input_mask: _SCREAMING_SNAKE_CASE =random_attention_mask([self.batch_size, self.seq_length] ) _SCREAMING_SNAKE_CASE =None if self.use_token_type_ids: _SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _SCREAMING_SNAKE_CASE =None _SCREAMING_SNAKE_CASE =None _SCREAMING_SNAKE_CASE =None if self.use_labels: _SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.type_sequence_label_size ) _SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.num_choices ) _SCREAMING_SNAKE_CASE =self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def A ( self : int ) -> Tuple: '''simple docstring''' return MraConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , ) def A ( self : Dict ) -> str: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.get_config() _SCREAMING_SNAKE_CASE =300 return config def A ( self : str ) -> List[str]: '''simple docstring''' ( ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ) =self.prepare_config_and_inputs() _SCREAMING_SNAKE_CASE =True _SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) _SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def A ( self : Any , _a : List[Any] , _a : List[Any] , _a : Union[str, Any] , _a : str , _a : List[Any] , _a : Optional[Any] , _a : List[str] ) -> Optional[Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE =MraModel(config=_a ) model.to(_a ) model.eval() _SCREAMING_SNAKE_CASE =model(_a , attention_mask=_a , token_type_ids=_a ) _SCREAMING_SNAKE_CASE =model(_a , token_type_ids=_a ) _SCREAMING_SNAKE_CASE =model(_a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A ( self : int , _a : List[Any] , _a : List[str] , _a : List[str] , _a : Dict , _a : Optional[Any] , _a : Any , _a : List[Any] , _a : Union[str, Any] , _a : List[str] , ) -> str: '''simple docstring''' _SCREAMING_SNAKE_CASE =True _SCREAMING_SNAKE_CASE =MraModel(_a ) model.to(_a ) model.eval() _SCREAMING_SNAKE_CASE =model( _a , attention_mask=_a , token_type_ids=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , ) _SCREAMING_SNAKE_CASE =model( _a , attention_mask=_a , token_type_ids=_a , encoder_hidden_states=_a , ) _SCREAMING_SNAKE_CASE =model(_a , attention_mask=_a , token_type_ids=_a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A ( self : List[str] , _a : Optional[Any] , _a : Dict , _a : Optional[int] , _a : str , _a : Optional[int] , _a : Optional[Any] , _a : Tuple ) -> int: '''simple docstring''' _SCREAMING_SNAKE_CASE =MraForMaskedLM(config=_a ) model.to(_a ) model.eval() _SCREAMING_SNAKE_CASE =model(_a , attention_mask=_a , token_type_ids=_a , labels=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A ( self : List[str] , _a : Optional[int] , _a : List[Any] , _a : Union[str, Any] , _a : Union[str, Any] , _a : Optional[int] , _a : Dict , _a : List[str] ) -> List[str]: '''simple docstring''' _SCREAMING_SNAKE_CASE =MraForQuestionAnswering(config=_a ) model.to(_a ) model.eval() _SCREAMING_SNAKE_CASE =model( _a , attention_mask=_a , token_type_ids=_a , start_positions=_a , end_positions=_a , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def A ( self : List[str] , _a : Optional[int] , _a : int , _a : Any , _a : str , _a : str , _a : Optional[int] , _a : Dict ) -> Optional[Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.num_labels _SCREAMING_SNAKE_CASE =MraForSequenceClassification(_a ) model.to(_a ) model.eval() _SCREAMING_SNAKE_CASE =model(_a , attention_mask=_a , token_type_ids=_a , labels=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A ( self : Optional[Any] , _a : List[Any] , _a : List[str] , _a : Any , _a : List[Any] , _a : List[str] , _a : int , _a : Dict ) -> Dict: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.num_labels _SCREAMING_SNAKE_CASE =MraForTokenClassification(config=_a ) model.to(_a ) model.eval() _SCREAMING_SNAKE_CASE =model(_a , attention_mask=_a , token_type_ids=_a , labels=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def A ( self : List[str] , _a : int , _a : Optional[int] , _a : int , _a : Dict , _a : Union[str, Any] , _a : Optional[int] , _a : Any ) -> Tuple: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.num_choices _SCREAMING_SNAKE_CASE =MraForMultipleChoice(config=_a ) model.to(_a ) model.eval() _SCREAMING_SNAKE_CASE =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _SCREAMING_SNAKE_CASE =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _SCREAMING_SNAKE_CASE =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _SCREAMING_SNAKE_CASE =model( _a , attention_mask=_a , token_type_ids=_a , labels=_a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def A ( self : Tuple ) -> str: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs() ( ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ) =config_and_inputs _SCREAMING_SNAKE_CASE ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class A__ ( A__ , unittest.TestCase ): A__ = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) A__ = False A__ = False A__ = False A__ = False A__ = () def A ( self : Dict ) -> Dict: '''simple docstring''' _SCREAMING_SNAKE_CASE =MraModelTester(self ) _SCREAMING_SNAKE_CASE =ConfigTester(self , config_class=_a , hidden_size=37 ) def A ( self : Tuple ) -> Optional[int]: '''simple docstring''' self.config_tester.run_common_tests() def A ( self : Dict ) -> Tuple: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_a ) def A ( self : List[str] ) -> Any: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _SCREAMING_SNAKE_CASE =type self.model_tester.create_and_check_model(*_a ) def A ( self : str ) -> Any: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_a ) def A ( self : List[str] ) -> Optional[int]: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_a ) def A ( self : Optional[int] ) -> Any: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_a ) def A ( self : Dict ) -> Union[str, Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_a ) def A ( self : List[str] ) -> str: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_a ) @slow def A ( self : int ) -> List[Any]: '''simple docstring''' for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _SCREAMING_SNAKE_CASE =MraModel.from_pretrained(_a ) self.assertIsNotNone(_a ) @unittest.skip(reason='MRA does not output attentions' ) def A ( self : Optional[int] ) -> Tuple: '''simple docstring''' return @require_torch class A__ ( unittest.TestCase ): @slow def A ( self : Optional[Any] ) -> Tuple: '''simple docstring''' _SCREAMING_SNAKE_CASE =MraModel.from_pretrained('uw-madison/mra-base-512-4' ) _SCREAMING_SNAKE_CASE =torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): _SCREAMING_SNAKE_CASE =model(_a )[0] _SCREAMING_SNAKE_CASE =torch.Size((1, 256, 768) ) self.assertEqual(output.shape , _a ) _SCREAMING_SNAKE_CASE =torch.tensor( [[[-0.01_40, 0.08_30, -0.03_81], [0.15_46, 0.14_02, 0.02_20], [0.11_62, 0.08_51, 0.01_65]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1e-4 ) ) @slow def A ( self : Dict ) -> Union[str, Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE =MraForMaskedLM.from_pretrained('uw-madison/mra-base-512-4' ) _SCREAMING_SNAKE_CASE =torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): _SCREAMING_SNAKE_CASE =model(_a )[0] _SCREAMING_SNAKE_CASE =5_0265 _SCREAMING_SNAKE_CASE =torch.Size((1, 256, vocab_size) ) self.assertEqual(output.shape , _a ) _SCREAMING_SNAKE_CASE =torch.tensor( [[[9.25_95, -3.60_38, 11.88_19], [9.38_69, -3.26_93, 11.09_56], [11.85_24, -3.49_38, 13.12_10]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1e-4 ) ) @slow def A ( self : List[Any] ) -> str: '''simple docstring''' _SCREAMING_SNAKE_CASE =MraForMaskedLM.from_pretrained('uw-madison/mra-base-4096-8-d3' ) _SCREAMING_SNAKE_CASE =torch.arange(4096 ).unsqueeze(0 ) with torch.no_grad(): _SCREAMING_SNAKE_CASE =model(_a )[0] _SCREAMING_SNAKE_CASE =5_0265 _SCREAMING_SNAKE_CASE =torch.Size((1, 4096, vocab_size) ) self.assertEqual(output.shape , _a ) _SCREAMING_SNAKE_CASE =torch.tensor( [[[5.47_89, -2.35_64, 7.50_64], [7.90_67, -1.33_69, 9.96_68], [9.07_12, -1.81_06, 7.03_80]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1e-4 ) )
47
'''simple docstring''' import argparse import logging from collections import namedtuple import torch from model_bertabs import BertAbsSummarizer from models.model_builder import AbsSummarizer # The authors' implementation from transformers import BertTokenizer logging.basicConfig(level=logging.INFO) A =logging.getLogger(__name__) A ='Hello world! cécé herlolip' A =namedtuple( 'BertAbsConfig', [ 'temp_dir', 'large', 'use_bert_emb', 'finetune_bert', 'encoder', 'share_emb', 'max_pos', 'enc_layers', 'enc_hidden_size', 'enc_heads', 'enc_ff_size', 'enc_dropout', 'dec_layers', 'dec_hidden_size', 'dec_heads', 'dec_ff_size', 'dec_dropout', ], ) def snake_case_ (_a : List[Any] , _a : Any ): UpperCAmelCase = BertAbsConfig( temp_dir='''.''' , finetune_bert=_a , large=_a , share_emb=_a , use_bert_emb=_a , encoder='''bert''' , max_pos=5_1_2 , enc_layers=6 , enc_hidden_size=5_1_2 , enc_heads=8 , enc_ff_size=5_1_2 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_6_8 , dec_heads=8 , dec_ff_size=2_0_4_8 , dec_dropout=0.2 , ) UpperCAmelCase = torch.load(_a , lambda _a , _a : storage ) UpperCAmelCase = AbsSummarizer(_a , torch.device('''cpu''' ) , _a ) original.eval() UpperCAmelCase = BertAbsSummarizer(_a , torch.device('''cpu''' ) ) new_model.eval() # ------------------- # Convert the weights # ------------------- logging.info('''convert the model''' ) new_model.bert.load_state_dict(original.bert.state_dict() ) new_model.decoder.load_state_dict(original.decoder.state_dict() ) new_model.generator.load_state_dict(original.generator.state_dict() ) # ---------------------------------- # Make sure the outpus are identical # ---------------------------------- logging.info('''Make sure that the models\' outputs are identical''' ) UpperCAmelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' ) # prepare the model inputs UpperCAmelCase = tokenizer.encode('''This is sample éàalj\'-.''' ) encoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(_a )) ) UpperCAmelCase = torch.tensor(_a ).unsqueeze(0 ) UpperCAmelCase = tokenizer.encode('''This is sample 3 éàalj\'-.''' ) decoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(_a )) ) UpperCAmelCase = torch.tensor(_a ).unsqueeze(0 ) # failsafe to make sure the weights reset does not affect the # loaded weights. assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0 # forward pass UpperCAmelCase = encoder_input_ids UpperCAmelCase = decoder_input_ids UpperCAmelCase = UpperCAmelCase = None UpperCAmelCase = None UpperCAmelCase = UpperCAmelCase = None UpperCAmelCase = UpperCAmelCase = None UpperCAmelCase = None # The original model does not apply the geneator layer immediatly but rather in # the beam search (where it combines softmax + linear layer). Since we already # apply the softmax in our generation process we only apply the linear layer here. # We make sure that the outputs of the full stack are identical UpperCAmelCase = original(_a , _a , _a , _a , _a , _a , _a )[0] UpperCAmelCase = original.generator(_a ) UpperCAmelCase = new_model( _a , _a , _a , _a , _a )[0] UpperCAmelCase = new_model.generator(_a ) UpperCAmelCase = torch.max(torch.abs(output_converted_model - output_original_model ) ).item() print('''Maximum absolute difference beween weights: {:.2f}'''.format(_a ) ) UpperCAmelCase = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item() print('''Maximum absolute difference beween weights: {:.2f}'''.format(_a ) ) UpperCAmelCase = torch.allclose(_a , _a , atol=1E-3 ) if are_identical: logging.info('''all weights are equal up to 1e-3''' ) else: raise ValueError('''the weights are different. The new model is likely different from the original one.''' ) # The model has been saved with torch.save(model) and this is bound to the exact # directory structure. We save the state_dict instead. logging.info('''saving the model\'s state dictionary''' ) torch.save( new_model.state_dict() , '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''' ) if __name__ == "__main__": A =argparse.ArgumentParser() parser.add_argument( '--bertabs_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.', ) A =parser.parse_args() convert_bertabs_checkpoints( args.bertabs_checkpoint_path, args.pytorch_dump_folder_path, )
34
0
import warnings from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.get_logger(__name__) class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' lowerCamelCase_ : Optional[Any] = ["""input_values""", """attention_mask"""] def __init__( self , UpperCamelCase__ = 1 , UpperCamelCase__ = 1_6000 , UpperCamelCase__ = 0.0 , UpperCamelCase__ = False , UpperCamelCase__ = 80 , UpperCamelCase__ = 16 , UpperCamelCase__ = 64 , UpperCamelCase__ = "hann_window" , UpperCamelCase__ = 1.0 , UpperCamelCase__ = 80 , UpperCamelCase__ = 7600 , UpperCamelCase__ = 1e-10 , UpperCamelCase__ = 2 , UpperCamelCase__ = True , **UpperCamelCase__ , ) -> Dict: super().__init__(feature_size=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , padding_value=UpperCamelCase__ , **UpperCamelCase__ ) lowerCamelCase : Any = do_normalize lowerCamelCase : Tuple = return_attention_mask lowerCamelCase : Optional[Any] = num_mel_bins lowerCamelCase : Optional[int] = hop_length lowerCamelCase : Dict = win_length lowerCamelCase : Any = win_function lowerCamelCase : Any = frame_signal_scale lowerCamelCase : int = fmin lowerCamelCase : int = fmax lowerCamelCase : Optional[int] = mel_floor lowerCamelCase : Any = reduction_factor lowerCamelCase : Tuple = win_length * sampling_rate // 1000 lowerCamelCase : int = hop_length * sampling_rate // 1000 lowerCamelCase : int = optimal_fft_length(self.sample_size ) lowerCamelCase : List[str] = (self.n_fft // 2) + 1 lowerCamelCase : List[str] = window_function(window_length=self.sample_size , name=self.win_function , periodic=UpperCamelCase__ ) lowerCamelCase : int = mel_filter_bank( num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm="slaney" , mel_scale="slaney" , ) if frame_signal_scale != 1.0: warnings.warn( "The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers" , UpperCamelCase__ , ) if reduction_factor != 2.0: warnings.warn( "The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers" , UpperCamelCase__ , ) @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def _lowercase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 0.0 ) -> List[np.ndarray]: if attention_mask is not None: lowerCamelCase : List[Any] = np.array(UpperCamelCase__ , np.intaa ) lowerCamelCase : str = [] for vector, length in zip(UpperCamelCase__ , attention_mask.sum(-1 ) ): lowerCamelCase : Union[str, Any] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 ) if length < normed_slice.shape[0]: lowerCamelCase : List[Any] = padding_value normed_input_values.append(UpperCamelCase__ ) else: lowerCamelCase : str = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values] return normed_input_values def _lowercase ( self , UpperCamelCase__ , ) -> np.ndarray: lowerCamelCase : Optional[int] = spectrogram( UpperCamelCase__ , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel="log10" , ) return log_mel_spec.T def __call__( self , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> BatchFeature: if audio is None and audio_target is None: raise ValueError("You must provide either `audio` or `audio_target` values." ) if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with''' F''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( "It is strongly recommended to pass the ``sampling_rate`` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) if audio is not None: lowerCamelCase : Dict = self._process_audio( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ , ) else: lowerCamelCase : Dict = None if audio_target is not None: lowerCamelCase : Optional[int] = self._process_audio( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ , ) if inputs is None: return inputs_target else: lowerCamelCase : Optional[Any] = inputs_target["input_values"] lowerCamelCase : List[str] = inputs_target.get("attention_mask" ) if decoder_attention_mask is not None: lowerCamelCase : Dict = decoder_attention_mask return inputs def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> BatchFeature: lowerCamelCase : Dict = isinstance(UpperCamelCase__ , np.ndarray ) and len(speech.shape ) > 1 if is_batched_numpy and len(speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) lowerCamelCase : Optional[int] = is_batched_numpy or ( isinstance(UpperCamelCase__ , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: lowerCamelCase : Dict = [np.asarray(UpperCamelCase__ , dtype=np.floataa ) for speech in speech] elif not is_batched and not isinstance(UpperCamelCase__ , np.ndarray ): lowerCamelCase : Optional[int] = np.asarray(UpperCamelCase__ , dtype=np.floataa ) elif isinstance(UpperCamelCase__ , np.ndarray ) and speech.dtype is np.dtype(np.floataa ): lowerCamelCase : str = speech.astype(np.floataa ) # always return batch if not is_batched: lowerCamelCase : List[Any] = [speech] # needed to make pad() work on spectrogram inputs lowerCamelCase : Any = self.feature_size # convert into correct format for padding if is_target: lowerCamelCase : List[Any] = [self._extract_mel_features(UpperCamelCase__ ) for waveform in speech] lowerCamelCase : Union[str, Any] = BatchFeature({"input_values": features} ) lowerCamelCase : Any = self.num_mel_bins else: lowerCamelCase : List[str] = BatchFeature({"input_values": speech} ) lowerCamelCase : Tuple = self.pad( UpperCamelCase__ , padding=UpperCamelCase__ , max_length=UpperCamelCase__ , truncation=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , **UpperCamelCase__ , ) lowerCamelCase : Optional[int] = feature_size_hack # convert input values to correct format lowerCamelCase : Optional[Any] = padded_inputs["input_values"] if not isinstance(input_values[0] , np.ndarray ): lowerCamelCase : Any = [np.asarray(UpperCamelCase__ , dtype=np.floataa ) for array in input_values] elif ( not isinstance(UpperCamelCase__ , np.ndarray ) and isinstance(input_values[0] , np.ndarray ) and input_values[0].dtype is np.dtype(np.floataa ) ): lowerCamelCase : Any = [array.astype(np.floataa ) for array in input_values] elif isinstance(UpperCamelCase__ , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ): lowerCamelCase : int = input_values.astype(np.floataa ) # convert attention_mask to correct format lowerCamelCase : Any = padded_inputs.get("attention_mask" ) if attention_mask is not None: lowerCamelCase : Dict = [np.asarray(UpperCamelCase__ , dtype=np.intaa ) for array in attention_mask] # zero-mean and unit-variance normalization if not is_target and self.do_normalize: lowerCamelCase : Any = ( attention_mask if self._get_padding_strategies(UpperCamelCase__ , max_length=UpperCamelCase__ ) is not PaddingStrategy.DO_NOT_PAD else None ) lowerCamelCase : Any = self.zero_mean_unit_var_norm( padded_inputs["input_values"] , attention_mask=UpperCamelCase__ , padding_value=self.padding_value ) if return_tensors is not None: lowerCamelCase : Tuple = padded_inputs.convert_to_tensors(UpperCamelCase__ ) return padded_inputs def _lowercase ( self ) -> Dict[str, Any]: lowerCamelCase : Optional[int] = super().to_dict() # Don't serialize these as they are derived from the other properties. lowerCamelCase : Dict = ["window", "mel_filters", "sample_size", "sample_stride", "n_fft", "n_freqs"] for name in names: if name in output: del output[name] return output
48
'''simple docstring''' from ..utils import DummyObject, requires_backends class _a ( metaclass=__a ): __a : int = ["""flax""", """transformers"""] def __init__( self : Optional[Any] , *lowercase : str , **lowercase : List[Any] ): '''simple docstring''' requires_backends(self , ['''flax''', '''transformers'''] ) @classmethod def A ( cls : Union[str, Any] , *lowercase : List[Any] , **lowercase : List[str] ): '''simple docstring''' requires_backends(cls , ['''flax''', '''transformers'''] ) @classmethod def A ( cls : Union[str, Any] , *lowercase : Tuple , **lowercase : int ): '''simple docstring''' requires_backends(cls , ['''flax''', '''transformers'''] ) class _a ( metaclass=__a ): __a : int = ["""flax""", """transformers"""] def __init__( self : int , *lowercase : Tuple , **lowercase : Dict ): '''simple docstring''' requires_backends(self , ['''flax''', '''transformers'''] ) @classmethod def A ( cls : List[str] , *lowercase : Optional[int] , **lowercase : List[Any] ): '''simple docstring''' requires_backends(cls , ['''flax''', '''transformers'''] ) @classmethod def A ( cls : Dict , *lowercase : Union[str, Any] , **lowercase : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ['''flax''', '''transformers'''] ) class _a ( metaclass=__a ): __a : int = ["""flax""", """transformers"""] def __init__( self : Optional[int] , *lowercase : Union[str, Any] , **lowercase : Any ): '''simple docstring''' requires_backends(self , ['''flax''', '''transformers'''] ) @classmethod def A ( cls : Union[str, Any] , *lowercase : Tuple , **lowercase : Any ): '''simple docstring''' requires_backends(cls , ['''flax''', '''transformers'''] ) @classmethod def A ( cls : Any , *lowercase : Dict , **lowercase : Optional[Any] ): '''simple docstring''' requires_backends(cls , ['''flax''', '''transformers'''] ) class _a ( metaclass=__a ): __a : Any = ["""flax""", """transformers"""] def __init__( self : Any , *lowercase : Optional[Any] , **lowercase : Optional[int] ): '''simple docstring''' requires_backends(self , ['''flax''', '''transformers'''] ) @classmethod def A ( cls : Dict , *lowercase : Optional[Any] , **lowercase : Optional[Any] ): '''simple docstring''' requires_backends(cls , ['''flax''', '''transformers'''] ) @classmethod def A ( cls : Union[str, Any] , *lowercase : str , **lowercase : Any ): '''simple docstring''' requires_backends(cls , ['''flax''', '''transformers'''] )
34
0
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a = len(_UpperCAmelCase ) __a = [[0] * n for i in range(_UpperCAmelCase )] for i in range(_UpperCAmelCase ): __a = y_points[i] for i in range(2 , _UpperCAmelCase ): for j in range(_UpperCAmelCase , _UpperCAmelCase ): __a = ( (xa - x_points[j - i + 1]) * q[j][i - 1] - (xa - x_points[j]) * q[j - 1][i - 1] ) / (x_points[j] - x_points[j - i + 1]) return [q[n - 1][n - 1], q] if __name__ == "__main__": import doctest doctest.testmod()
49
'''simple docstring''' import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() A =logging.get_logger(__name__) A ={ 'b0': efficientnet.EfficientNetBa, 'b1': efficientnet.EfficientNetBa, 'b2': efficientnet.EfficientNetBa, 'b3': efficientnet.EfficientNetBa, 'b4': efficientnet.EfficientNetBa, 'b5': efficientnet.EfficientNetBa, 'b6': efficientnet.EfficientNetBa, 'b7': efficientnet.EfficientNetBa, } A ={ 'b0': { 'hidden_dim': 12_80, 'width_coef': 1.0, 'depth_coef': 1.0, 'image_size': 2_24, 'dropout_rate': 0.2, 'dw_padding': [], }, 'b1': { 'hidden_dim': 12_80, 'width_coef': 1.0, 'depth_coef': 1.1, 'image_size': 2_40, 'dropout_rate': 0.2, 'dw_padding': [16], }, 'b2': { 'hidden_dim': 14_08, 'width_coef': 1.1, 'depth_coef': 1.2, 'image_size': 2_60, 'dropout_rate': 0.3, 'dw_padding': [5, 8, 16], }, 'b3': { 'hidden_dim': 15_36, 'width_coef': 1.2, 'depth_coef': 1.4, 'image_size': 3_00, 'dropout_rate': 0.3, 'dw_padding': [5, 18], }, 'b4': { 'hidden_dim': 17_92, 'width_coef': 1.4, 'depth_coef': 1.8, 'image_size': 3_80, 'dropout_rate': 0.4, 'dw_padding': [6], }, 'b5': { 'hidden_dim': 20_48, 'width_coef': 1.6, 'depth_coef': 2.2, 'image_size': 4_56, 'dropout_rate': 0.4, 'dw_padding': [13, 27], }, 'b6': { 'hidden_dim': 23_04, 'width_coef': 1.8, 'depth_coef': 2.6, 'image_size': 5_28, 'dropout_rate': 0.5, 'dw_padding': [31], }, 'b7': { 'hidden_dim': 25_60, 'width_coef': 2.0, 'depth_coef': 3.1, 'image_size': 6_00, 'dropout_rate': 0.5, 'dw_padding': [18], }, } def snake_case_ (_a : List[str] ): UpperCAmelCase = EfficientNetConfig() UpperCAmelCase = CONFIG_MAP[model_name]['''hidden_dim'''] UpperCAmelCase = CONFIG_MAP[model_name]['''width_coef'''] UpperCAmelCase = CONFIG_MAP[model_name]['''depth_coef'''] UpperCAmelCase = CONFIG_MAP[model_name]['''image_size'''] UpperCAmelCase = CONFIG_MAP[model_name]['''dropout_rate'''] UpperCAmelCase = CONFIG_MAP[model_name]['''dw_padding'''] UpperCAmelCase = '''huggingface/label-files''' UpperCAmelCase = '''imagenet-1k-id2label.json''' UpperCAmelCase = 1_0_0_0 UpperCAmelCase = json.load(open(hf_hub_download(_a , _a , repo_type='''dataset''' ) , '''r''' ) ) UpperCAmelCase = {int(_a ): v for k, v in idalabel.items()} UpperCAmelCase = idalabel UpperCAmelCase = {v: k for k, v in idalabel.items()} return config def snake_case_ (): UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg''' UpperCAmelCase = Image.open(requests.get(_a , stream=_a ).raw ) return im def snake_case_ (_a : str ): UpperCAmelCase = CONFIG_MAP[model_name]['''image_size'''] UpperCAmelCase = EfficientNetImageProcessor( size={'''height''': size, '''width''': size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.4785_3944, 0.473_2864, 0.4743_4163] , do_center_crop=_a , ) return preprocessor def snake_case_ (_a : Optional[Any] ): UpperCAmelCase = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )] UpperCAmelCase = sorted(set(_a ) ) UpperCAmelCase = len(_a ) UpperCAmelCase = {b: str(_a ) for b, i in zip(_a , range(_a ) )} UpperCAmelCase = [] rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') ) rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') ) rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') ) rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') ) rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') ) for b in block_names: UpperCAmelCase = block_name_mapping[b] rename_keys.append((F"block{b}_expand_conv/kernel:0", F"encoder.blocks.{hf_b}.expansion.expand_conv.weight") ) rename_keys.append((F"block{b}_expand_bn/gamma:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.weight") ) rename_keys.append((F"block{b}_expand_bn/beta:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.bias") ) rename_keys.append( (F"block{b}_expand_bn/moving_mean:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.running_mean") ) rename_keys.append( (F"block{b}_expand_bn/moving_variance:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.running_var") ) rename_keys.append( (F"block{b}_dwconv/depthwise_kernel:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight") ) rename_keys.append((F"block{b}_bn/gamma:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight") ) rename_keys.append((F"block{b}_bn/beta:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias") ) rename_keys.append( (F"block{b}_bn/moving_mean:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean") ) rename_keys.append( (F"block{b}_bn/moving_variance:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var") ) rename_keys.append((F"block{b}_se_reduce/kernel:0", F"encoder.blocks.{hf_b}.squeeze_excite.reduce.weight") ) rename_keys.append((F"block{b}_se_reduce/bias:0", F"encoder.blocks.{hf_b}.squeeze_excite.reduce.bias") ) rename_keys.append((F"block{b}_se_expand/kernel:0", F"encoder.blocks.{hf_b}.squeeze_excite.expand.weight") ) rename_keys.append((F"block{b}_se_expand/bias:0", F"encoder.blocks.{hf_b}.squeeze_excite.expand.bias") ) rename_keys.append( (F"block{b}_project_conv/kernel:0", F"encoder.blocks.{hf_b}.projection.project_conv.weight") ) rename_keys.append((F"block{b}_project_bn/gamma:0", F"encoder.blocks.{hf_b}.projection.project_bn.weight") ) rename_keys.append((F"block{b}_project_bn/beta:0", F"encoder.blocks.{hf_b}.projection.project_bn.bias") ) rename_keys.append( (F"block{b}_project_bn/moving_mean:0", F"encoder.blocks.{hf_b}.projection.project_bn.running_mean") ) rename_keys.append( (F"block{b}_project_bn/moving_variance:0", F"encoder.blocks.{hf_b}.projection.project_bn.running_var") ) rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') ) rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') ) rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') ) rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') ) rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') ) UpperCAmelCase = {} for item in rename_keys: if item[0] in original_param_names: UpperCAmelCase = '''efficientnet.''' + item[1] UpperCAmelCase = '''classifier.weight''' UpperCAmelCase = '''classifier.bias''' return key_mapping def snake_case_ (_a : Dict , _a : List[str] , _a : Dict ): for key, value in tf_params.items(): if "normalization" in key: continue UpperCAmelCase = key_mapping[key] if "_conv" in key and "kernel" in key: UpperCAmelCase = torch.from_numpy(_a ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: UpperCAmelCase = torch.from_numpy(_a ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: UpperCAmelCase = torch.from_numpy(np.transpose(_a ) ) else: UpperCAmelCase = torch.from_numpy(_a ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(_a ) @torch.no_grad() def snake_case_ (_a : Optional[Any] , _a : List[str] , _a : Optional[int] , _a : Dict ): UpperCAmelCase = model_classes[model_name]( include_top=_a , weights='''imagenet''' , input_tensor=_a , input_shape=_a , pooling=_a , classes=1_0_0_0 , classifier_activation='''softmax''' , ) UpperCAmelCase = original_model.trainable_variables UpperCAmelCase = original_model.non_trainable_variables UpperCAmelCase = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: UpperCAmelCase = param.numpy() UpperCAmelCase = list(tf_params.keys() ) # Load HuggingFace model UpperCAmelCase = get_efficientnet_config(_a ) UpperCAmelCase = EfficientNetForImageClassification(_a ).eval() UpperCAmelCase = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print('''Converting parameters...''' ) UpperCAmelCase = rename_keys(_a ) replace_params(_a , _a , _a ) # Initialize preprocessor and preprocess input image UpperCAmelCase = convert_image_processor(_a ) UpperCAmelCase = preprocessor(images=prepare_img() , return_tensors='''pt''' ) # HF model inference hf_model.eval() with torch.no_grad(): UpperCAmelCase = hf_model(**_a ) UpperCAmelCase = outputs.logits.detach().numpy() # Original model inference UpperCAmelCase = False UpperCAmelCase = CONFIG_MAP[model_name]['''image_size'''] UpperCAmelCase = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) UpperCAmelCase = image.img_to_array(_a ) UpperCAmelCase = np.expand_dims(_a , axis=0 ) UpperCAmelCase = original_model.predict(_a ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(_a , _a , atol=1E-3 ), "The predicted logits are not the same." print('''Model outputs match!''' ) if save_model: # Create folder to save model if not os.path.isdir(_a ): os.mkdir(_a ) # Save converted model and image processor hf_model.save_pretrained(_a ) preprocessor.save_pretrained(_a ) if push_to_hub: # Push model and image processor to hub print(F"Pushing converted {model_name} to the hub..." ) UpperCAmelCase = F"efficientnet-{model_name}" preprocessor.push_to_hub(_a ) hf_model.push_to_hub(_a ) if __name__ == "__main__": A =argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='b0', type=str, help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].', ) parser.add_argument( '--pytorch_dump_folder_path', default='hf_model', type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument('--save_model', action='store_true', help='Save model to local') parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub') A =parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
34
0
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing the experiment tracking capability, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## _UpperCAmelCase : List[Any] = 16 _UpperCAmelCase : Any = 32 def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase = 16 ) -> List[str]: lowerCamelCase__ : Any = AutoTokenizer.from_pretrained('bert-base-cased' ) lowerCamelCase__ : List[Any] = load_dataset('glue' , 'mrpc' ) def tokenize_function(_UpperCAmelCase ): # max_length=None => use the model max length (it's actually the default) lowerCamelCase__ : Optional[int] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): lowerCamelCase__ : List[Any] = datasets.map( _UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowerCamelCase__ : List[Any] = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(_UpperCAmelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. lowerCamelCase__ : int = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": lowerCamelCase__ : int = 16 elif accelerator.mixed_precision != "no": lowerCamelCase__ : Dict = 8 else: lowerCamelCase__ : Optional[int] = None return tokenizer.pad( _UpperCAmelCase , padding='longest' , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors='pt' , ) # Instantiate dataloaders. lowerCamelCase__ : Optional[int] = DataLoader( tokenized_datasets['train'] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase ) lowerCamelCase__ : str = DataLoader( tokenized_datasets['validation'] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders _UpperCAmelCase : Optional[Any] = mocked_dataloaders # noqa: F811 def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> str: # For testing only if os.environ.get('TESTING_MOCKED_DATALOADERS' , _UpperCAmelCase ) == "1": lowerCamelCase__ : Optional[int] = 2 # Initialize Accelerator # New Code # # We pass in "all" to `log_with` to grab all available trackers in the environment # Note: If using a custom `Tracker` class, should be passed in here such as: # >>> log_with = ["all", MyCustomTrackerClassInstance()] if args.with_tracking: lowerCamelCase__ : List[Any] = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir ) else: lowerCamelCase__ : Optional[int] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowerCamelCase__ : Optional[Any] = config['lr'] lowerCamelCase__ : Tuple = int(config['num_epochs'] ) lowerCamelCase__ : Union[str, Any] = int(config['seed'] ) lowerCamelCase__ : str = int(config['batch_size'] ) set_seed(_UpperCAmelCase ) lowerCamelCase__ , lowerCamelCase__ : Optional[int] = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase ) lowerCamelCase__ : Optional[int] = evaluate.load('glue' , 'mrpc' ) # If the batch size is too big we use gradient accumulation lowerCamelCase__ : Optional[Any] = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: lowerCamelCase__ : Any = batch_size // MAX_GPU_BATCH_SIZE lowerCamelCase__ : str = MAX_GPU_BATCH_SIZE # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowerCamelCase__ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=_UpperCAmelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). lowerCamelCase__ : Optional[Any] = model.to(accelerator.device ) # Instantiate optimizer lowerCamelCase__ : List[str] = AdamW(params=model.parameters() , lr=_UpperCAmelCase ) # Instantiate scheduler lowerCamelCase__ : List[Any] = get_linear_schedule_with_warmup( optimizer=_UpperCAmelCase , num_warmup_steps=100 , num_training_steps=(len(_UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = accelerator.prepare( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # New Code # # We need to initialize the trackers we use. Overall configurations can also be stored if args.with_tracking: lowerCamelCase__ : Optional[Any] = os.path.split(_UpperCAmelCase )[-1].split('.' )[0] accelerator.init_trackers(_UpperCAmelCase , _UpperCAmelCase ) # Now we train the model for epoch in range(_UpperCAmelCase ): model.train() # New Code # # For our tracking example, we will log the total loss of each epoch if args.with_tracking: lowerCamelCase__ : Union[str, Any] = 0 for step, batch in enumerate(_UpperCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) lowerCamelCase__ : Any = model(**_UpperCAmelCase ) lowerCamelCase__ : Dict = outputs.loss # New Code # if args.with_tracking: total_loss += loss.detach().float() lowerCamelCase__ : Dict = loss / gradient_accumulation_steps accelerator.backward(_UpperCAmelCase ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(_UpperCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True` (the default). batch.to(accelerator.device ) with torch.no_grad(): lowerCamelCase__ : str = model(**_UpperCAmelCase ) lowerCamelCase__ : List[str] = outputs.logits.argmax(dim=-1 ) lowerCamelCase__ , lowerCamelCase__ : Dict = accelerator.gather_for_metrics((predictions, batch['labels']) ) metric.add_batch( predictions=_UpperCAmelCase , references=_UpperCAmelCase , ) lowerCamelCase__ : Tuple = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" , _UpperCAmelCase ) # New Code # # To actually log, we call `Accelerator.log` # The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int` if args.with_tracking: accelerator.log( { 'accuracy': eval_metric['accuracy'], 'f1': eval_metric['f1'], 'train_loss': total_loss.item() / len(_UpperCAmelCase ), 'epoch': epoch, } , step=_UpperCAmelCase , ) # New Code # # When a run is finished, you should call `accelerator.end_training()` # to close all of the open trackers if args.with_tracking: accelerator.end_training() def SCREAMING_SNAKE_CASE ( ) -> List[str]: lowerCamelCase__ : Any = argparse.ArgumentParser(description='Simple example of training script.' ) parser.add_argument( '--mixed_precision' , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose' 'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.' 'and an Nvidia Ampere GPU.' , ) parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' ) parser.add_argument( '--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , ) parser.add_argument( '--project_dir' , type=_UpperCAmelCase , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , ) lowerCamelCase__ : Union[str, Any] = parser.parse_args() lowerCamelCase__ : str = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16} training_function(_UpperCAmelCase , _UpperCAmelCase ) if __name__ == "__main__": main()
50
'''simple docstring''' from datetime import datetime import requests from bsa import BeautifulSoup if __name__ == "__main__": A =input('Enter image url: ').strip() print(f"""Downloading image from {url} ...""") A =BeautifulSoup(requests.get(url).content, 'html.parser') # The image URL is in the content field of the first meta tag with property og:image A =soup.find('meta', {'property': 'og:image'})['content'] A =requests.get(image_url).content A =f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg""" with open(file_name, 'wb') as fp: fp.write(image_data) print(f"""Done. Image saved to disk as {file_name}.""")
34
0
from typing import List import numpy as np def A (__A : dict ) -> int: """simple docstring""" UpperCAmelCase_ = {key: len(__A ) for key, value in gen_kwargs.items() if isinstance(__A , __A )} if len(set(lists_lengths.values() ) ) > 1: raise RuntimeError( ( '''Sharding is ambiguous for this dataset: ''' + '''we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n''' + '''\n'''.join(F"""\t- key {key} has length {length}""" for key, length in lists_lengths.items() ) + '''\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, ''' + '''and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.''' ) ) UpperCAmelCase_ = max(lists_lengths.values() , default=0 ) return max(1 , __A ) def A (__A : int , __A : int ) -> List[range]: """simple docstring""" UpperCAmelCase_ = [] for group_idx in range(__A ): UpperCAmelCase_ = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break UpperCAmelCase_ = shards_indices_per_group[-1].stop if shards_indices_per_group else 0 UpperCAmelCase_ = range(__A , start + num_shards_to_add ) shards_indices_per_group.append(__A ) return shards_indices_per_group def A (__A : dict , __A : int ) -> List[dict]: """simple docstring""" UpperCAmelCase_ = _number_of_shards_in_gen_kwargs(__A ) if num_shards == 1: return [dict(__A )] else: UpperCAmelCase_ = _distribute_shards(num_shards=__A , max_num_jobs=__A ) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(__A , __A ) else value for key, value in gen_kwargs.items() } for group_idx in range(len(__A ) ) ] def A (__A : List[dict] ) -> dict: """simple docstring""" return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key] , __A ) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def A (__A : np.random.Generator , __A : dict ) -> dict: """simple docstring""" UpperCAmelCase_ = {len(__A ) for value in gen_kwargs.values() if isinstance(__A , __A )} UpperCAmelCase_ = {} for size in list_sizes: UpperCAmelCase_ = list(range(__A ) ) rng.shuffle(indices_per_size[size] ) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes UpperCAmelCase_ = dict(__A ) for key, value in shuffled_kwargs.items(): if isinstance(__A , __A ): UpperCAmelCase_ = [value[i] for i in indices_per_size[len(__A )]] return shuffled_kwargs
51
'''simple docstring''' from math import acos, sin from typing import List, Tuple, Union import numpy as np import torch from PIL import Image from ...models import AutoencoderKL, UNetaDConditionModel from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel class _a ( __a ): __a : str = ["""vqvae"""] def __init__( self : str , lowercase : AutoencoderKL , lowercase : UNetaDConditionModel , lowercase : Mel , lowercase : Union[DDIMScheduler, DDPMScheduler] , ): '''simple docstring''' super().__init__() self.register_modules(unet=lowercase , scheduler=lowercase , mel=lowercase , vqvae=lowercase ) def A ( self : Optional[Any] ): '''simple docstring''' return 50 if isinstance(self.scheduler , lowercase ) else 1_000 @torch.no_grad() def __call__( self : Optional[Any] , lowercase : int = 1 , lowercase : str = None , lowercase : np.ndarray = None , lowercase : int = 0 , lowercase : int = 0 , lowercase : int = None , lowercase : torch.Generator = None , lowercase : float = 0 , lowercase : float = 0 , lowercase : torch.Generator = None , lowercase : float = 0 , lowercase : torch.Tensor = None , lowercase : torch.Tensor = None , lowercase : Tuple=True , ): '''simple docstring''' UpperCAmelCase = steps or self.get_default_steps() self.scheduler.set_timesteps(lowercase ) UpperCAmelCase = step_generator or generator # For backwards compatibility if type(self.unet.config.sample_size ) == int: UpperCAmelCase = (self.unet.config.sample_size, self.unet.config.sample_size) if noise is None: UpperCAmelCase = randn_tensor( ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size[0], self.unet.config.sample_size[1], ) , generator=lowercase , device=self.device , ) UpperCAmelCase = noise UpperCAmelCase = None if audio_file is not None or raw_audio is not None: self.mel.load_audio(lowercase , lowercase ) UpperCAmelCase = self.mel.audio_slice_to_image(lowercase ) UpperCAmelCase = np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape( (input_image.height, input_image.width) ) UpperCAmelCase = (input_image / 255) * 2 - 1 UpperCAmelCase = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device ) if self.vqvae is not None: UpperCAmelCase = self.vqvae.encode(torch.unsqueeze(lowercase , 0 ) ).latent_dist.sample( generator=lowercase )[0] UpperCAmelCase = self.vqvae.config.scaling_factor * input_images if start_step > 0: UpperCAmelCase = self.scheduler.add_noise(lowercase , lowercase , self.scheduler.timesteps[start_step - 1] ) UpperCAmelCase = ( self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length ) UpperCAmelCase = int(mask_start_secs * pixels_per_second ) UpperCAmelCase = int(mask_end_secs * pixels_per_second ) UpperCAmelCase = self.scheduler.add_noise(lowercase , lowercase , torch.tensor(self.scheduler.timesteps[start_step:] ) ) for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ): if isinstance(self.unet , lowercase ): UpperCAmelCase = self.unet(lowercase , lowercase , lowercase )['''sample'''] else: UpperCAmelCase = self.unet(lowercase , lowercase )['''sample'''] if isinstance(self.scheduler , lowercase ): UpperCAmelCase = self.scheduler.step( model_output=lowercase , timestep=lowercase , sample=lowercase , eta=lowercase , generator=lowercase , )['''prev_sample'''] else: UpperCAmelCase = self.scheduler.step( model_output=lowercase , timestep=lowercase , sample=lowercase , generator=lowercase , )['''prev_sample'''] if mask is not None: if mask_start > 0: UpperCAmelCase = mask[:, step, :, :mask_start] if mask_end > 0: UpperCAmelCase = mask[:, step, :, -mask_end:] if self.vqvae is not None: # 0.18215 was scaling factor used in training to ensure unit variance UpperCAmelCase = 1 / self.vqvae.config.scaling_factor * images UpperCAmelCase = self.vqvae.decode(lowercase )['''sample'''] UpperCAmelCase = (images / 2 + 0.5).clamp(0 , 1 ) UpperCAmelCase = images.cpu().permute(0 , 2 , 3 , 1 ).numpy() UpperCAmelCase = (images * 255).round().astype('''uint8''' ) UpperCAmelCase = list( (Image.fromarray(_[:, :, 0] ) for _ in images) if images.shape[3] == 1 else (Image.fromarray(lowercase , mode='''RGB''' ).convert('''L''' ) for _ in images) ) UpperCAmelCase = [self.mel.image_to_audio(lowercase ) for _ in images] if not return_dict: return images, (self.mel.get_sample_rate(), audios) return BaseOutput(**AudioPipelineOutput(np.array(lowercase )[:, np.newaxis, :] ) , **ImagePipelineOutput(lowercase ) ) @torch.no_grad() def A ( self : Dict , lowercase : List[Image.Image] , lowercase : int = 50 ): '''simple docstring''' assert isinstance(self.scheduler , lowercase ) self.scheduler.set_timesteps(lowercase ) UpperCAmelCase = np.array( [np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] ) UpperCAmelCase = (sample / 255) * 2 - 1 UpperCAmelCase = torch.Tensor(lowercase ).to(self.device ) for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ): UpperCAmelCase = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps UpperCAmelCase = self.scheduler.alphas_cumprod[t] UpperCAmelCase = ( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) UpperCAmelCase = 1 - alpha_prod_t UpperCAmelCase = self.unet(lowercase , lowercase )['''sample'''] UpperCAmelCase = (1 - alpha_prod_t_prev) ** 0.5 * model_output UpperCAmelCase = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) UpperCAmelCase = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output return sample @staticmethod def A ( lowercase : torch.Tensor , lowercase : torch.Tensor , lowercase : float ): '''simple docstring''' UpperCAmelCase = acos(torch.dot(torch.flatten(lowercase ) , torch.flatten(lowercase ) ) / torch.norm(lowercase ) / torch.norm(lowercase ) ) return sin((1 - alpha) * theta ) * xa / sin(lowercase ) + sin(alpha * theta ) * xa / sin(lowercase )
34
0
from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, logging if is_torch_available(): import torch __lowerCamelCase : Tuple = logging.get_logger(__name__) class A__ ( __snake_case ): _UpperCAmelCase :Tuple = ['pixel_values'] def __init__( self , A_ = True , A_ = None , A_ = PILImageResampling.BILINEAR , A_ = True , A_ = None , A_ = True , A_ = 1 / 255 , A_ = True , A_ = None , A_ = None , **A_ , ): '''simple docstring''' super().__init__(**A_ ) UpperCamelCase : Optional[Any] = size if size is not None else {"shortest_edge": 256} UpperCamelCase : List[Any] = get_size_dict(A_ , default_to_square=A_ ) UpperCamelCase : Dict = crop_size if crop_size is not None else {"height": 224, "width": 224} UpperCamelCase : List[Any] = get_size_dict(A_ , param_name="crop_size" ) UpperCamelCase : Optional[int] = do_resize UpperCamelCase : str = size UpperCamelCase : str = resample UpperCamelCase : Optional[int] = do_center_crop UpperCamelCase : List[Any] = crop_size UpperCamelCase : Optional[int] = do_rescale UpperCamelCase : Any = rescale_factor UpperCamelCase : Optional[Any] = do_normalize UpperCamelCase : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN UpperCamelCase : str = image_std if image_std is not None else IMAGENET_STANDARD_STD def __UpperCamelCase( self , A_ , A_ , A_ = PILImageResampling.BICUBIC , A_ = None , **A_ , ): '''simple docstring''' UpperCamelCase : Dict = get_size_dict(A_ , default_to_square=A_ ) if "shortest_edge" not in size: raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) UpperCamelCase : Any = get_resize_output_image_size(A_ , size=size["shortest_edge"] , default_to_square=A_ ) return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ ) def __UpperCamelCase( self , A_ , A_ , A_ = None , **A_ , ): '''simple docstring''' UpperCamelCase : Dict = get_size_dict(A_ ) if "height" not in size or "width" not in size: raise ValueError(F"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" ) return center_crop(A_ , size=(size["height"], size["width"]) , data_format=A_ , **A_ ) def __UpperCamelCase( self , A_ , A_ , A_ = None , **A_ ): '''simple docstring''' return rescale(A_ , scale=A_ , data_format=A_ , **A_ ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ = None , **A_ , ): '''simple docstring''' return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ ) def __UpperCamelCase( self , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = ChannelDimension.FIRST , **A_ , ): '''simple docstring''' UpperCamelCase : str = do_resize if do_resize is not None else self.do_resize UpperCamelCase : Tuple = size if size is not None else self.size UpperCamelCase : Any = get_size_dict(A_ , default_to_square=A_ ) UpperCamelCase : Optional[Any] = resample if resample is not None else self.resample UpperCamelCase : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCamelCase : str = crop_size if crop_size is not None else self.crop_size UpperCamelCase : List[Any] = get_size_dict(A_ , param_name="crop_size" ) UpperCamelCase : Tuple = do_rescale if do_rescale is not None else self.do_rescale UpperCamelCase : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCamelCase : List[Any] = do_normalize if do_normalize is not None else self.do_normalize UpperCamelCase : List[Any] = image_mean if image_mean is not None else self.image_mean UpperCamelCase : Optional[Any] = image_std if image_std is not None else self.image_std UpperCamelCase : List[Any] = make_list_of_images(A_ ) if not valid_images(A_ ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. UpperCamelCase : Tuple = [to_numpy_array(A_ ) for image in images] if do_resize: UpperCamelCase : Union[str, Any] = [self.resize(image=A_ , size=A_ , resample=A_ ) for image in images] if do_center_crop: UpperCamelCase : Optional[Any] = [self.center_crop(image=A_ , size=A_ ) for image in images] if do_rescale: UpperCamelCase : int = [self.rescale(image=A_ , scale=A_ ) for image in images] if do_normalize: UpperCamelCase : str = [self.normalize(image=A_ , mean=A_ , std=A_ ) for image in images] UpperCamelCase : List[str] = [to_channel_dimension_format(A_ , A_ ) for image in images] UpperCamelCase : List[Any] = {"pixel_values": images} return BatchFeature(data=A_ , tensor_type=A_ ) def __UpperCamelCase( self , A_ , A_ = None ): '''simple docstring''' UpperCamelCase : Any = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(A_ ) != len(A_ ): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) if is_torch_tensor(A_ ): UpperCamelCase : Tuple = target_sizes.numpy() UpperCamelCase : Tuple = [] for idx in range(len(A_ ) ): UpperCamelCase : List[str] = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=A_ ) UpperCamelCase : Union[str, Any] = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(A_ ) else: UpperCamelCase : Optional[Any] = logits.argmax(dim=1 ) UpperCamelCase : int = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
52
'''simple docstring''' from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal A =logging.get_logger(__name__) A =TypeVar('DatasetType', Dataset, IterableDataset) def snake_case_ (_a : List[DatasetType] , _a : Optional[List[float]] = None , _a : Optional[int] = None , _a : Optional[DatasetInfo] = None , _a : Optional[NamedSplit] = None , _a : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ): from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError('''Unable to interleave an empty list of datasets.''' ) for i, dataset in enumerate(_a ): if not isinstance(_a , (Dataset, IterableDataset) ): if isinstance(_a , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} " '''is an empty dataset dictionary.''' ) raise ValueError( F"Dataset at position {i} has at least one split: {list(_a )}\n" F"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_a ) )}']" ) raise ValueError( F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_a ).__name__}." ) if i == 0: UpperCAmelCase , UpperCAmelCase = ( (Dataset, IterableDataset) if isinstance(_a , _a ) else (IterableDataset, Dataset) ) elif not isinstance(_a , _a ): raise ValueError( F"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(F"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." ) if dataset_type is Dataset: return _interleave_map_style_datasets( _a , _a , _a , info=_a , split=_a , stopping_strategy=_a ) else: return _interleave_iterable_datasets( _a , _a , _a , info=_a , split=_a , stopping_strategy=_a ) def snake_case_ (_a : List[DatasetType] , _a : Optional[DatasetInfo] = None , _a : Optional[NamedSplit] = None , _a : int = 0 , ): if not dsets: raise ValueError('''Unable to concatenate an empty list of datasets.''' ) for i, dataset in enumerate(_a ): if not isinstance(_a , (Dataset, IterableDataset) ): if isinstance(_a , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} " '''is an empty dataset dictionary.''' ) raise ValueError( F"Dataset at position {i} has at least one split: {list(_a )}\n" F"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_a ) )}']" ) raise ValueError( F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_a ).__name__}." ) if i == 0: UpperCAmelCase , UpperCAmelCase = ( (Dataset, IterableDataset) if isinstance(_a , _a ) else (IterableDataset, Dataset) ) elif not isinstance(_a , _a ): raise ValueError( F"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." ) if dataset_type is Dataset: return _concatenate_map_style_datasets(_a , info=_a , split=_a , axis=_a ) else: return _concatenate_iterable_datasets(_a , info=_a , split=_a , axis=_a )
34
0
'''simple docstring''' def lowercase__ ( __lowercase : int ) -> list[int]: """simple docstring""" if num <= 0: raise ValueError('Input must be a positive integer' ) __UpperCamelCase = [True] * (num + 1) __UpperCamelCase = 2 while p * p <= num: if primes[p]: for i in range(p * p , num + 1 , __lowercase ): __UpperCamelCase = False p += 1 return [prime for prime in range(2 , num + 1 ) if primes[prime]] if __name__ == "__main__": import doctest doctest.testmod() a__ : str =int(input('''Enter a positive integer: ''').strip()) print(prime_sieve_eratosthenes(user_num))
53
'''simple docstring''' from argparse import ArgumentParser from datasets.commands.convert import ConvertCommand from datasets.commands.dummy_data import DummyDataCommand from datasets.commands.env import EnvironmentCommand from datasets.commands.run_beam import RunBeamCommand from datasets.commands.test import TestCommand from datasets.utils.logging import set_verbosity_info def snake_case_ (_a : Tuple ): return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )} def snake_case_ (): UpperCAmelCase = ArgumentParser( '''HuggingFace Datasets CLI tool''' , usage='''datasets-cli <command> [<args>]''' , allow_abbrev=_a ) UpperCAmelCase = parser.add_subparsers(help='''datasets-cli command helpers''' ) set_verbosity_info() # Register commands ConvertCommand.register_subcommand(_a ) EnvironmentCommand.register_subcommand(_a ) TestCommand.register_subcommand(_a ) RunBeamCommand.register_subcommand(_a ) DummyDataCommand.register_subcommand(_a ) # Parse args UpperCAmelCase , UpperCAmelCase = parser.parse_known_args() if not hasattr(_a , '''func''' ): parser.print_help() exit(1 ) UpperCAmelCase = parse_unknown_args(_a ) # Run UpperCAmelCase = args.func(_a , **_a ) service.run() if __name__ == "__main__": main()
34
0
"""simple docstring""" import re from filelock import FileLock try: import nltk a__ : List[str] = True except (ImportError, ModuleNotFoundError): a__ : int = False if NLTK_AVAILABLE: with FileLock('''.lock''') as lock: nltk.download('''punkt''', quiet=True) def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' re.sub("<n>" , "" , lowerCAmelCase_ ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(lowerCAmelCase_ ) )
54
'''simple docstring''' import argparse import json import logging import os import sys from unittest.mock import patch from transformers.testing_utils import TestCasePlus, get_gpu_count, slow A =[ os.path.join(os.path.dirname(__file__), dirname) for dirname in [ 'text-classification', 'language-modeling', 'summarization', 'token-classification', 'question-answering', ] ] sys.path.extend(SRC_DIRS) if SRC_DIRS is not None: import run_clm_flax import run_flax_glue import run_flax_ner import run_mlm_flax import run_qa import run_summarization_flax import run_ta_mlm_flax logging.basicConfig(level=logging.DEBUG) A =logging.getLogger() def snake_case_ (): UpperCAmelCase = argparse.ArgumentParser() parser.add_argument('''-f''' ) UpperCAmelCase = parser.parse_args() return args.f def snake_case_ (_a : List[str] , _a : Union[str, Any]="eval" ): UpperCAmelCase = os.path.join(_a , F"{split}_results.json" ) if os.path.exists(_a ): with open(_a , '''r''' ) as f: return json.load(_a ) raise ValueError(F"can't find {path}" ) A =logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class _a ( __a ): def A ( self : Any ): '''simple docstring''' UpperCAmelCase = self.get_auto_remove_tmp_dir() UpperCAmelCase = f"\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split() with patch.object(lowercase , '''argv''' , lowercase ): run_flax_glue.main() UpperCAmelCase = get_results(lowercase ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) @slow def A ( self : Any ): '''simple docstring''' UpperCAmelCase = self.get_auto_remove_tmp_dir() UpperCAmelCase = f"\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split() with patch.object(lowercase , '''argv''' , lowercase ): run_clm_flax.main() UpperCAmelCase = get_results(lowercase ) self.assertLess(result['''eval_perplexity'''] , 100 ) @slow def A ( self : str ): '''simple docstring''' UpperCAmelCase = self.get_auto_remove_tmp_dir() UpperCAmelCase = f"\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n ".split() with patch.object(lowercase , '''argv''' , lowercase ): run_summarization_flax.main() UpperCAmelCase = get_results(lowercase , split='''test''' ) self.assertGreaterEqual(result['''test_rouge1'''] , 10 ) self.assertGreaterEqual(result['''test_rouge2'''] , 2 ) self.assertGreaterEqual(result['''test_rougeL'''] , 7 ) self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 ) @slow def A ( self : int ): '''simple docstring''' UpperCAmelCase = self.get_auto_remove_tmp_dir() UpperCAmelCase = f"\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n ".split() with patch.object(lowercase , '''argv''' , lowercase ): run_mlm_flax.main() UpperCAmelCase = get_results(lowercase ) self.assertLess(result['''eval_perplexity'''] , 42 ) @slow def A ( self : Tuple ): '''simple docstring''' UpperCAmelCase = self.get_auto_remove_tmp_dir() UpperCAmelCase = f"\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split() with patch.object(lowercase , '''argv''' , lowercase ): run_ta_mlm_flax.main() UpperCAmelCase = get_results(lowercase ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 ) @slow def A ( self : Tuple ): '''simple docstring''' UpperCAmelCase = 7 if get_gpu_count() > 1 else 2 UpperCAmelCase = self.get_auto_remove_tmp_dir() UpperCAmelCase = f"\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n ".split() with patch.object(lowercase , '''argv''' , lowercase ): run_flax_ner.main() UpperCAmelCase = get_results(lowercase ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) self.assertGreaterEqual(result['''eval_f1'''] , 0.3 ) @slow def A ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase = self.get_auto_remove_tmp_dir() UpperCAmelCase = f"\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n ".split() with patch.object(lowercase , '''argv''' , lowercase ): run_qa.main() UpperCAmelCase = get_results(lowercase ) self.assertGreaterEqual(result['''eval_f1'''] , 30 ) self.assertGreaterEqual(result['''eval_exact'''] , 30 )
34
0
'''simple docstring''' def __snake_case ( UpperCAmelCase_ : int = 50 ): lowerCamelCase_ = [1] * (length + 1) for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): ways_number[row_length] += ways_number[ row_length - tile_start - tile_length ] return ways_number[length] if __name__ == "__main__": print(f'''{solution() = }''')
55
'''simple docstring''' class _a : def __init__( self : Any ): '''simple docstring''' UpperCAmelCase = {} # Mapping from char to TrieNode UpperCAmelCase = False def A ( self : int , lowercase : list[str] ): '''simple docstring''' for word in words: self.insert(lowercase ) def A ( self : Optional[int] , lowercase : str ): '''simple docstring''' UpperCAmelCase = self for char in word: if char not in curr.nodes: UpperCAmelCase = TrieNode() UpperCAmelCase = curr.nodes[char] UpperCAmelCase = True def A ( self : Optional[int] , lowercase : str ): '''simple docstring''' UpperCAmelCase = self for char in word: if char not in curr.nodes: return False UpperCAmelCase = curr.nodes[char] return curr.is_leaf def A ( self : str , lowercase : str ): '''simple docstring''' def _delete(lowercase : TrieNode , lowercase : str , lowercase : int ) -> bool: if index == len(lowercase ): # If word does not exist if not curr.is_leaf: return False UpperCAmelCase = False return len(curr.nodes ) == 0 UpperCAmelCase = word[index] UpperCAmelCase = curr.nodes.get(lowercase ) # If char not in current trie node if not char_node: return False # Flag to check if node can be deleted UpperCAmelCase = _delete(lowercase , lowercase , index + 1 ) if delete_curr: del curr.nodes[char] return len(curr.nodes ) == 0 return delete_curr _delete(self , lowercase , 0 ) def snake_case_ (_a : TrieNode , _a : str ): if node.is_leaf: print(_a , end=''' ''' ) for key, value in node.nodes.items(): print_words(_a , word + key ) def snake_case_ (): UpperCAmelCase = '''banana bananas bandana band apple all beast'''.split() UpperCAmelCase = TrieNode() root.insert_many(_a ) # print_words(root, "") assert all(root.find(_a ) for word in words ) assert root.find('''banana''' ) assert not root.find('''bandanas''' ) assert not root.find('''apps''' ) assert root.find('''apple''' ) assert root.find('''all''' ) root.delete('''all''' ) assert not root.find('''all''' ) root.delete('''banana''' ) assert not root.find('''banana''' ) assert root.find('''bananas''' ) return True def snake_case_ (_a : str , _a : bool ): print(str(_a ) , '''works!''' if passes else '''doesn\'t work :(''' ) def snake_case_ (): assert test_trie() def snake_case_ (): print_results('''Testing trie functionality''' , test_trie() ) if __name__ == "__main__": main()
34
0
'''simple docstring''' import inspect import unittest from transformers import SegformerConfig, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_MAPPING, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerModel, ) from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import SegformerImageProcessor class a ( _lowerCamelCase ): def A_ ( self : Optional[int] ): snake_case_ = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(lowercase_ , '''hidden_sizes''' ) ) self.parent.assertTrue(hasattr(lowercase_ , '''num_attention_heads''' ) ) self.parent.assertTrue(hasattr(lowercase_ , '''num_encoder_blocks''' ) ) class a : def __init__( self : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Any=13 , lowercase_ : Optional[Any]=64 , lowercase_ : Any=3 , lowercase_ : Optional[Any]=4 , lowercase_ : Dict=[2, 2, 2, 2] , lowercase_ : int=[8, 4, 2, 1] , lowercase_ : str=[16, 32, 64, 128] , lowercase_ : Optional[Any]=[1, 4, 8, 16] , lowercase_ : Any=[1, 2, 4, 8] , lowercase_ : Optional[int]=True , lowercase_ : str=True , lowercase_ : List[str]="gelu" , lowercase_ : List[str]=0.1 , lowercase_ : Any=0.1 , lowercase_ : Any=0.02 , lowercase_ : Union[str, Any]=3 , lowercase_ : Union[str, Any]=None , ): snake_case_ = parent snake_case_ = batch_size snake_case_ = image_size snake_case_ = num_channels snake_case_ = num_encoder_blocks snake_case_ = sr_ratios snake_case_ = depths snake_case_ = hidden_sizes snake_case_ = downsampling_rates snake_case_ = num_attention_heads snake_case_ = is_training snake_case_ = use_labels snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = initializer_range snake_case_ = num_labels snake_case_ = scope def A_ ( self : str ): snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case_ = None if self.use_labels: snake_case_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) snake_case_ = self.get_config() return config, pixel_values, labels def A_ ( self : int ): return SegformerConfig( image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def A_ ( self : Any , lowercase_ : Optional[int] , lowercase_ : Any , lowercase_ : Any ): snake_case_ = SegformerModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() snake_case_ = model(lowercase_ ) snake_case_ = snake_case_ = self.image_size // (self.downsampling_rates[-1] * 2) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) ) def A_ ( self : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : Any ): snake_case_ = self.num_labels snake_case_ = SegformerForSemanticSegmentation(lowercase_ ) model.to(lowercase_ ) model.eval() snake_case_ = model(lowercase_ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) snake_case_ = model(lowercase_ , labels=lowercase_ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) self.parent.assertGreater(result.loss , 0.0 ) def A_ ( self : Any , lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : str ): snake_case_ = 1 snake_case_ = SegformerForSemanticSegmentation(config=lowercase_ ) model.to(lowercase_ ) model.eval() snake_case_ = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(lowercase_ ) snake_case_ = model(lowercase_ , labels=lowercase_ ) self.parent.assertGreater(result.loss , 0.0 ) def A_ ( self : List[str] ): snake_case_ = self.prepare_config_and_inputs() snake_case_ ,snake_case_ ,snake_case_ = config_and_inputs snake_case_ = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class a ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ): snake_case_ = ( ( SegformerModel, SegformerForSemanticSegmentation, SegformerForImageClassification, ) if is_torch_available() else () ) snake_case_ = ( { "feature-extraction": SegformerModel, "image-classification": SegformerForImageClassification, "image-segmentation": SegformerForSemanticSegmentation, } if is_torch_available() else {} ) snake_case_ = True snake_case_ = False snake_case_ = False snake_case_ = False def A_ ( self : Any ): snake_case_ = SegformerModelTester(self ) snake_case_ = SegformerConfigTester(self , config_class=lowercase_ ) def A_ ( self : Tuple ): self.config_tester.run_common_tests() def A_ ( self : int ): snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_ ) def A_ ( self : int ): snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_binary_image_segmentation(*lowercase_ ) def A_ ( self : Any ): snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_segmentation(*lowercase_ ) @unittest.skip('''SegFormer does not use inputs_embeds''' ) def A_ ( self : str ): pass @unittest.skip('''SegFormer does not have get_input_embeddings method and get_output_embeddings methods''' ) def A_ ( self : List[Any] ): pass def A_ ( self : Optional[Any] ): snake_case_ ,snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ = model_class(lowercase_ ) snake_case_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ = [*signature.parameters.keys()] snake_case_ = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , lowercase_ ) def A_ ( self : str ): snake_case_ ,snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ = True for model_class in self.all_model_classes: snake_case_ = True snake_case_ = False snake_case_ = True snake_case_ = model_class(lowercase_ ) model.to(lowercase_ ) model.eval() with torch.no_grad(): snake_case_ = model(**self._prepare_for_class(lowercase_ , lowercase_ ) ) snake_case_ = outputs.attentions snake_case_ = sum(self.model_tester.depths ) self.assertEqual(len(lowercase_ ) , lowercase_ ) # check that output_attentions also work using config del inputs_dict["output_attentions"] snake_case_ = True snake_case_ = model_class(lowercase_ ) model.to(lowercase_ ) model.eval() with torch.no_grad(): snake_case_ = model(**self._prepare_for_class(lowercase_ , lowercase_ ) ) snake_case_ = outputs.attentions self.assertEqual(len(lowercase_ ) , lowercase_ ) # verify the first attentions (first block, first layer) snake_case_ = (self.model_tester.image_size // 4) ** 2 snake_case_ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , ) # verify the last attentions (last block, last layer) snake_case_ = (self.model_tester.image_size // 32) ** 2 snake_case_ = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2 self.assertListEqual( list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , ) snake_case_ = len(lowercase_ ) # Check attention is always last and order is fine snake_case_ = True snake_case_ = True snake_case_ = model_class(lowercase_ ) model.to(lowercase_ ) model.eval() with torch.no_grad(): snake_case_ = model(**self._prepare_for_class(lowercase_ , lowercase_ ) ) self.assertEqual(out_len + 1 , len(lowercase_ ) ) snake_case_ = outputs.attentions self.assertEqual(len(lowercase_ ) , lowercase_ ) # verify the first attentions (first block, first layer) snake_case_ = (self.model_tester.image_size // 4) ** 2 snake_case_ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , ) def A_ ( self : str ): def check_hidden_states_output(lowercase_ : str , lowercase_ : List[str] , lowercase_ : List[str] ): snake_case_ = model_class(lowercase_ ) model.to(lowercase_ ) model.eval() with torch.no_grad(): snake_case_ = model(**self._prepare_for_class(lowercase_ , lowercase_ ) ) snake_case_ = outputs.hidden_states snake_case_ = self.model_tester.num_encoder_blocks self.assertEqual(len(lowercase_ ) , lowercase_ ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.hidden_sizes[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) snake_case_ ,snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ = True check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case_ = True check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ ) def A_ ( self : Union[str, Any] ): if not self.model_tester.is_training: return snake_case_ ,snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ = True for model_class in self.all_model_classes: if model_class in get_values(lowercase_ ): continue snake_case_ = model_class(lowercase_ ) model.to(lowercase_ ) model.train() snake_case_ = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ ) snake_case_ = model(**lowercase_ ).loss loss.backward() @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def A_ ( self : List[Any] ): pass @slow def A_ ( self : Dict ): for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ = SegformerModel.from_pretrained(lowercase_ ) self.assertIsNotNone(lowercase_ ) def __magic_name__ ( ) -> Optional[int]: '''simple docstring''' snake_case_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch class a ( unittest.TestCase ): @slow def A_ ( self : List[Any] ): # only resize + normalize snake_case_ = SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=lowercase_ , align=lowercase_ , do_random_crop=lowercase_ ) snake_case_ = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to( lowercase_ ) snake_case_ = prepare_img() snake_case_ = image_processor(images=lowercase_ , return_tensors='''pt''' ) snake_case_ = encoded_inputs.pixel_values.to(lowercase_ ) with torch.no_grad(): snake_case_ = model(lowercase_ ) snake_case_ = torch.Size((1, model.config.num_labels, 128, 128) ) self.assertEqual(outputs.logits.shape , lowercase_ ) snake_case_ = torch.tensor( [ [[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]], [[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]], [[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]], ] ).to(lowercase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , lowercase_ , atol=1e-4 ) ) @slow def A_ ( self : List[str] ): # only resize + normalize snake_case_ = SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=lowercase_ , align=lowercase_ , do_random_crop=lowercase_ ) snake_case_ = SegformerForSemanticSegmentation.from_pretrained( '''nvidia/segformer-b1-finetuned-cityscapes-1024-1024''' ).to(lowercase_ ) snake_case_ = prepare_img() snake_case_ = image_processor(images=lowercase_ , return_tensors='''pt''' ) snake_case_ = encoded_inputs.pixel_values.to(lowercase_ ) with torch.no_grad(): snake_case_ = model(lowercase_ ) snake_case_ = torch.Size((1, model.config.num_labels, 128, 128) ) self.assertEqual(outputs.logits.shape , lowercase_ ) snake_case_ = torch.tensor( [ [[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]], [[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]], [[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]], ] ).to(lowercase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , lowercase_ , atol=1e-1 ) ) @slow def A_ ( self : str ): # only resize + normalize snake_case_ = SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=lowercase_ , align=lowercase_ , do_random_crop=lowercase_ ) snake_case_ = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to( lowercase_ ) snake_case_ = prepare_img() snake_case_ = image_processor(images=lowercase_ , return_tensors='''pt''' ) snake_case_ = encoded_inputs.pixel_values.to(lowercase_ ) with torch.no_grad(): snake_case_ = model(lowercase_ ) snake_case_ = outputs.logits.detach().cpu() snake_case_ = image_processor.post_process_semantic_segmentation(outputs=lowercase_ , target_sizes=[(500, 300)] ) snake_case_ = torch.Size((500, 300) ) self.assertEqual(segmentation[0].shape , lowercase_ ) snake_case_ = image_processor.post_process_semantic_segmentation(outputs=lowercase_ ) snake_case_ = torch.Size((128, 128) ) self.assertEqual(segmentation[0].shape , lowercase_ )
56
'''simple docstring''' import math from typing import Optional import numpy as np from ...configuration_utils import PretrainedConfig from ...utils import logging A =logging.get_logger(__name__) A ={ 'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json', 'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json', } class _a ( __a ): __a : Union[str, Any] = """encodec""" def __init__( self : Tuple , lowercase : List[str]=[1.5, 3.0, 6.0, 12.0, 24.0] , lowercase : Any=24_000 , lowercase : str=1 , lowercase : Optional[int]=False , lowercase : Optional[Any]=None , lowercase : str=None , lowercase : Tuple=128 , lowercase : Union[str, Any]=32 , lowercase : Union[str, Any]=1 , lowercase : Optional[Any]=[8, 5, 4, 2] , lowercase : Any="weight_norm" , lowercase : Tuple=7 , lowercase : int=7 , lowercase : Dict=3 , lowercase : List[Any]=2 , lowercase : str=True , lowercase : List[str]="reflect" , lowercase : List[Any]=2 , lowercase : Optional[Any]=2 , lowercase : int=1.0 , lowercase : Dict=1_024 , lowercase : str=None , lowercase : Union[str, Any]=True , **lowercase : Optional[int] , ): '''simple docstring''' UpperCAmelCase = target_bandwidths UpperCAmelCase = sampling_rate UpperCAmelCase = audio_channels UpperCAmelCase = normalize UpperCAmelCase = chunk_length_s UpperCAmelCase = overlap UpperCAmelCase = hidden_size UpperCAmelCase = num_filters UpperCAmelCase = num_residual_layers UpperCAmelCase = upsampling_ratios UpperCAmelCase = norm_type UpperCAmelCase = kernel_size UpperCAmelCase = last_kernel_size UpperCAmelCase = residual_kernel_size UpperCAmelCase = dilation_growth_rate UpperCAmelCase = use_causal_conv UpperCAmelCase = pad_mode UpperCAmelCase = compress UpperCAmelCase = num_lstm_layers UpperCAmelCase = trim_right_ratio UpperCAmelCase = codebook_size UpperCAmelCase = codebook_dim if codebook_dim is not None else hidden_size UpperCAmelCase = use_conv_shortcut if self.norm_type not in ["weight_norm", "time_group_norm"]: raise ValueError( f"self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}" ) super().__init__(**lowercase ) @property def A ( self : Dict ): '''simple docstring''' if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def A ( self : Union[str, Any] ): '''simple docstring''' if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 , int((1.0 - self.overlap) * self.chunk_length ) ) @property def A ( self : Any ): '''simple docstring''' UpperCAmelCase = np.prod(self.upsampling_ratios ) return math.ceil(self.sampling_rate / hop_length ) @property def A ( self : Optional[int] ): '''simple docstring''' return int(1_000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
34
0
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING import torch from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class _UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' __UpperCAmelCase : Dict ="""dandelin/vilt-b32-finetuned-vqa""" __UpperCAmelCase : str =( """This is a tool that answers a question about an image. It takes an input named `image` which should be the """ """image containing the information, as well as a `question` which should be the question in English. It """ """returns a text that is the answer to the question.""" ) __UpperCAmelCase : List[str] ="""image_qa""" __UpperCAmelCase : Any =AutoProcessor __UpperCAmelCase : int =AutoModelForVisualQuestionAnswering __UpperCAmelCase : str =["""image""", """text"""] __UpperCAmelCase : Dict =["""text"""] def __init__( self , *__a , **__a ): requires_backends(self , ["vision"] ) super().__init__(*__a , **__a ) def snake_case ( self , __a , __a ): return self.pre_processor(__a , __a , return_tensors="pt" ) def snake_case ( self , __a ): with torch.no_grad(): return self.model(**__a ).logits def snake_case ( self , __a ): __lowerCAmelCase = outputs.argmax(-1 ).item() return self.model.config.idalabel[idx]
57
'''simple docstring''' from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, logging if is_torch_available(): import torch A =logging.get_logger(__name__) class _a ( __a ): __a : str = ["""pixel_values"""] def __init__( self : Optional[int] , lowercase : bool = True , lowercase : Optional[Dict[str, int]] = None , lowercase : PILImageResampling = PILImageResampling.BILINEAR , lowercase : bool = True , lowercase : Dict[str, int] = None , lowercase : bool = True , lowercase : Union[int, float] = 1 / 255 , lowercase : bool = True , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[float, List[float]]] = None , **lowercase : Union[str, Any] , ): '''simple docstring''' super().__init__(**lowercase ) UpperCAmelCase = size if size is not None else {'''shortest_edge''': 256} UpperCAmelCase = get_size_dict(lowercase , default_to_square=lowercase ) UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} UpperCAmelCase = get_size_dict(lowercase , param_name='''crop_size''' ) UpperCAmelCase = do_resize UpperCAmelCase = size UpperCAmelCase = resample UpperCAmelCase = do_center_crop UpperCAmelCase = crop_size UpperCAmelCase = do_rescale UpperCAmelCase = rescale_factor UpperCAmelCase = do_normalize UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD def A ( self : Union[str, Any] , lowercase : np.ndarray , lowercase : Dict[str, int] , lowercase : PILImageResampling = PILImageResampling.BICUBIC , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : Any , ): '''simple docstring''' UpperCAmelCase = get_size_dict(lowercase , default_to_square=lowercase ) if "shortest_edge" not in size: raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" ) UpperCAmelCase = get_resize_output_image_size(lowercase , size=size['''shortest_edge'''] , default_to_square=lowercase ) return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase ) def A ( self : Union[str, Any] , lowercase : np.ndarray , lowercase : Dict[str, int] , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : int , ): '''simple docstring''' UpperCAmelCase = get_size_dict(lowercase ) if "height" not in size or "width" not in size: raise ValueError(f"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}" ) return center_crop(lowercase , size=(size['''height'''], size['''width''']) , data_format=lowercase , **lowercase ) def A ( self : Tuple , lowercase : np.ndarray , lowercase : float , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : List[str] ): '''simple docstring''' return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase ) def A ( self : Optional[int] , lowercase : np.ndarray , lowercase : Union[float, List[float]] , lowercase : Union[float, List[float]] , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : Union[str, Any] , ): '''simple docstring''' return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase ) def A ( self : Optional[int] , lowercase : ImageInput , lowercase : Optional[bool] = None , lowercase : Dict[str, int] = None , lowercase : PILImageResampling = None , lowercase : bool = None , lowercase : Dict[str, int] = None , lowercase : Optional[bool] = None , lowercase : Optional[float] = None , lowercase : Optional[bool] = None , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[str, TensorType]] = None , lowercase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowercase : Dict , ): '''simple docstring''' UpperCAmelCase = do_resize if do_resize is not None else self.do_resize UpperCAmelCase = size if size is not None else self.size UpperCAmelCase = get_size_dict(lowercase , default_to_square=lowercase ) UpperCAmelCase = resample if resample is not None else self.resample UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCAmelCase = crop_size if crop_size is not None else self.crop_size UpperCAmelCase = get_size_dict(lowercase , param_name='''crop_size''' ) UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase = image_mean if image_mean is not None else self.image_mean UpperCAmelCase = image_std if image_std is not None else self.image_std UpperCAmelCase = make_list_of_images(lowercase ) if not valid_images(lowercase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. UpperCAmelCase = [to_numpy_array(lowercase ) for image in images] if do_resize: UpperCAmelCase = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images] if do_center_crop: UpperCAmelCase = [self.center_crop(image=lowercase , size=lowercase ) for image in images] if do_rescale: UpperCAmelCase = [self.rescale(image=lowercase , scale=lowercase ) for image in images] if do_normalize: UpperCAmelCase = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images] UpperCAmelCase = [to_channel_dimension_format(lowercase , lowercase ) for image in images] UpperCAmelCase = {'''pixel_values''': images} return BatchFeature(data=lowercase , tensor_type=lowercase ) def A ( self : Tuple , lowercase : str , lowercase : List[Tuple] = None ): '''simple docstring''' UpperCAmelCase = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(lowercase ) != len(lowercase ): raise ValueError( '''Make sure that you pass in as many target sizes as the batch dimension of the logits''' ) if is_torch_tensor(lowercase ): UpperCAmelCase = target_sizes.numpy() UpperCAmelCase = [] for idx in range(len(lowercase ) ): UpperCAmelCase = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=lowercase ) UpperCAmelCase = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(lowercase ) else: UpperCAmelCase = logits.argmax(dim=1 ) UpperCAmelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
34
0
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import VivitImageProcessor class a_ ( unittest.TestCase ): '''simple docstring''' def __init__( self , A , A=7 , A=3 , A=10 , A=18 , A=30 , A=400 , A=True , A=None , A=True , A=[0.5, 0.5, 0.5] , A=[0.5, 0.5, 0.5] , A=None , ) -> List[Any]: _SCREAMING_SNAKE_CASE = size if size is not None else {"""shortest_edge""": 18} _SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {"""height""": 18, """width""": 18} _SCREAMING_SNAKE_CASE = parent _SCREAMING_SNAKE_CASE = batch_size _SCREAMING_SNAKE_CASE = num_channels _SCREAMING_SNAKE_CASE = num_frames _SCREAMING_SNAKE_CASE = image_size _SCREAMING_SNAKE_CASE = min_resolution _SCREAMING_SNAKE_CASE = max_resolution _SCREAMING_SNAKE_CASE = do_resize _SCREAMING_SNAKE_CASE = size _SCREAMING_SNAKE_CASE = do_normalize _SCREAMING_SNAKE_CASE = image_mean _SCREAMING_SNAKE_CASE = image_std _SCREAMING_SNAKE_CASE = crop_size def snake_case_( self ) -> str: return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class a_ ( snake_case_ , unittest.TestCase ): '''simple docstring''' UpperCamelCase = VivitImageProcessor if is_vision_available() else None def snake_case_( self ) -> List[str]: _SCREAMING_SNAKE_CASE = VivitImageProcessingTester(self ) @property def snake_case_( self ) -> Any: return self.image_processor_tester.prepare_image_processor_dict() def snake_case_( self ) -> int: _SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A , """image_mean""" ) ) self.assertTrue(hasattr(A , """image_std""" ) ) self.assertTrue(hasattr(A , """do_normalize""" ) ) self.assertTrue(hasattr(A , """do_resize""" ) ) self.assertTrue(hasattr(A , """do_center_crop""" ) ) self.assertTrue(hasattr(A , """size""" ) ) def snake_case_( self ) -> int: _SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 18} ) self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} ) _SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"""shortest_edge""": 42} ) self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} ) def snake_case_( self ) -> Optional[int]: # Initialize image_processing _SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random PIL videos _SCREAMING_SNAKE_CASE = prepare_video_inputs(self.image_processor_tester , equal_resolution=A ) for video in video_inputs: self.assertIsInstance(A , A ) self.assertIsInstance(video[0] , Image.Image ) # Test not batched input _SCREAMING_SNAKE_CASE = image_processing(video_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched _SCREAMING_SNAKE_CASE = image_processing(A , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def snake_case_( self ) -> List[str]: # Initialize image_processing _SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _SCREAMING_SNAKE_CASE = prepare_video_inputs(self.image_processor_tester , equal_resolution=A , numpify=A ) for video in video_inputs: self.assertIsInstance(A , A ) self.assertIsInstance(video[0] , np.ndarray ) # Test not batched input _SCREAMING_SNAKE_CASE = image_processing(video_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched _SCREAMING_SNAKE_CASE = image_processing(A , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def snake_case_( self ) -> str: # Initialize image_processing _SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _SCREAMING_SNAKE_CASE = prepare_video_inputs(self.image_processor_tester , equal_resolution=A , torchify=A ) for video in video_inputs: self.assertIsInstance(A , A ) self.assertIsInstance(video[0] , torch.Tensor ) # Test not batched input _SCREAMING_SNAKE_CASE = image_processing(video_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched _SCREAMING_SNAKE_CASE = image_processing(A , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , )
58
'''simple docstring''' import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process A =logging.getLogger(__name__) def snake_case_ (_a : Dict , _a : Union[str, Any] ): return (preds == labels).mean() @dataclass class _a : __a : str = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) __a : Optional[str] = field( default=__a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) __a : Optional[str] = field( default=__a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) __a : Optional[str] = field( default=__a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) @dataclass class _a : __a : str = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} ) __a : str = field(metadata={"""help""": """Should contain the data files for the task."""} ) __a : int = field( default=128 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) __a : bool = field( default=__a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) def snake_case_ (): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F"Output directory ({training_args.output_dir}) already exists and is not empty. Use" ''' --overwrite_output_dir to overcome.''' ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('''Training/evaluation parameters %s''' , _a ) # Set seed set_seed(training_args.seed ) try: UpperCAmelCase = processors[data_args.task_name]() UpperCAmelCase = processor.get_labels() UpperCAmelCase = len(_a ) except KeyError: raise ValueError('''Task not found: %s''' % (data_args.task_name) ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. UpperCAmelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_a , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , ) UpperCAmelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) UpperCAmelCase = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_a , cache_dir=model_args.cache_dir , ) # Get datasets UpperCAmelCase = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=_a , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) UpperCAmelCase = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=_a , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def compute_metrics(_a : EvalPrediction ) -> Dict: UpperCAmelCase = np.argmax(p.predictions , axis=1 ) return {"acc": simple_accuracy(_a , p.label_ids )} # Data collator UpperCAmelCase = DataCollatorWithPadding(_a , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer UpperCAmelCase = Trainer( model=_a , args=_a , train_dataset=_a , eval_dataset=_a , compute_metrics=_a , data_collator=_a , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation UpperCAmelCase = {} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) UpperCAmelCase = trainer.evaluate() UpperCAmelCase = os.path.join(training_args.output_dir , '''eval_results.txt''' ) if trainer.is_world_master(): with open(_a , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key, value in result.items(): logger.info(''' %s = %s''' , _a , _a ) writer.write('''%s = %s\n''' % (key, value) ) results.update(_a ) return results def snake_case_ (_a : Optional[int] ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
34
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __lowerCamelCase = { """configuration_pix2struct""": [ """PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Pix2StructConfig""", """Pix2StructTextConfig""", """Pix2StructVisionConfig""", ], """processing_pix2struct""": ["""Pix2StructProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = ["""Pix2StructImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = [ """PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""", """Pix2StructPreTrainedModel""", """Pix2StructForConditionalGeneration""", """Pix2StructVisionModel""", """Pix2StructTextModel""", ] if TYPE_CHECKING: from .configuration_pixastruct import ( PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP, PixaStructConfig, PixaStructTextConfig, PixaStructVisionConfig, ) from .processing_pixastruct import PixaStructProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_pixastruct import PixaStructImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pixastruct import ( PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST, PixaStructForConditionalGeneration, PixaStructPreTrainedModel, PixaStructTextModel, PixaStructVisionModel, ) else: import sys __lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
59
'''simple docstring''' import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _a ( __a ): __a : int = ["""image_processor""", """tokenizer"""] __a : Union[str, Any] = """ChineseCLIPImageProcessor""" __a : List[Any] = ("""BertTokenizer""", """BertTokenizerFast""") def __init__( self : Dict , lowercase : Union[str, Any]=None , lowercase : Dict=None , **lowercase : Optional[Any] ): '''simple docstring''' UpperCAmelCase = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , lowercase , ) UpperCAmelCase = kwargs.pop('''feature_extractor''' ) UpperCAmelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(lowercase , lowercase ) UpperCAmelCase = self.image_processor def __call__( self : Tuple , lowercase : Optional[Any]=None , lowercase : Union[str, Any]=None , lowercase : int=None , **lowercase : Dict ): '''simple docstring''' if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: UpperCAmelCase = self.tokenizer(lowercase , return_tensors=lowercase , **lowercase ) if images is not None: UpperCAmelCase = self.image_processor(lowercase , return_tensors=lowercase , **lowercase ) if text is not None and images is not None: UpperCAmelCase = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**lowercase ) , tensor_type=lowercase ) def A ( self : int , *lowercase : Tuple , **lowercase : List[str] ): '''simple docstring''' return self.tokenizer.batch_decode(*lowercase , **lowercase ) def A ( self : Optional[Any] , *lowercase : int , **lowercase : Optional[int] ): '''simple docstring''' return self.tokenizer.decode(*lowercase , **lowercase ) @property def A ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase = self.tokenizer.model_input_names UpperCAmelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def A ( self : List[Any] ): '''simple docstring''' warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowercase , ) return self.image_processor_class
34
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging snake_case__ : Any = logging.get_logger(__name__) snake_case__ : str = { '''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''', '''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 } class snake_case_( a__ ): __UpperCamelCase = '''mobilenet_v1''' def __init__( self : str , UpperCamelCase_ : List[Any]=3 , UpperCamelCase_ : Optional[int]=2_2_4 , UpperCamelCase_ : List[Any]=1.0 , UpperCamelCase_ : List[Any]=8 , UpperCamelCase_ : str="relu6" , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Optional[int]=0.999 , UpperCamelCase_ : List[str]=0.02 , UpperCamelCase_ : List[str]=0.001 , **UpperCamelCase_ : str , ): super().__init__(**UpperCamelCase_ ) if depth_multiplier <= 0: raise ValueError('''depth_multiplier must be greater than zero.''' ) lowerCAmelCase : List[str] = num_channels lowerCAmelCase : str = image_size lowerCAmelCase : Any = depth_multiplier lowerCAmelCase : Tuple = min_depth lowerCAmelCase : List[str] = hidden_act lowerCAmelCase : Optional[int] = tf_padding lowerCAmelCase : int = classifier_dropout_prob lowerCAmelCase : Dict = initializer_range lowerCAmelCase : List[Any] = layer_norm_eps class snake_case_( a__ ): __UpperCamelCase = version.parse('''1.11''' ) @property def lowerCamelCase__ ( self : int ): return OrderedDict([('''pixel_values''', {0: '''batch'''})] ) @property def lowerCamelCase__ ( self : str ): if self.task == "image-classification": return OrderedDict([('''logits''', {0: '''batch'''})] ) else: return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] ) @property def lowerCamelCase__ ( self : Optional[int] ): return 1E-4
60
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging A =logging.get_logger(__name__) A ={ 'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json', # See all Marian models at https://huggingface.co/models?filter=marian } class _a ( __a ): __a : List[Any] = """marian""" __a : Union[str, Any] = ["""past_key_values"""] __a : List[str] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""} def __init__( self : List[Any] , lowercase : Union[str, Any]=58_101 , lowercase : Tuple=None , lowercase : str=1_024 , lowercase : Optional[int]=12 , lowercase : Optional[int]=4_096 , lowercase : int=16 , lowercase : List[Any]=12 , lowercase : int=4_096 , lowercase : Optional[int]=16 , lowercase : int=0.0 , lowercase : Tuple=0.0 , lowercase : Tuple=True , lowercase : Union[str, Any]=True , lowercase : List[Any]="gelu" , lowercase : Tuple=1_024 , lowercase : str=0.1 , lowercase : str=0.0 , lowercase : Optional[int]=0.0 , lowercase : Dict=0.02 , lowercase : Union[str, Any]=58_100 , lowercase : List[str]=False , lowercase : str=58_100 , lowercase : Any=0 , lowercase : Optional[Any]=0 , lowercase : Tuple=True , **lowercase : Optional[int] , ): '''simple docstring''' UpperCAmelCase = vocab_size UpperCAmelCase = decoder_vocab_size or vocab_size UpperCAmelCase = max_position_embeddings UpperCAmelCase = d_model UpperCAmelCase = encoder_ffn_dim UpperCAmelCase = encoder_layers UpperCAmelCase = encoder_attention_heads UpperCAmelCase = decoder_ffn_dim UpperCAmelCase = decoder_layers UpperCAmelCase = decoder_attention_heads UpperCAmelCase = dropout UpperCAmelCase = attention_dropout UpperCAmelCase = activation_dropout UpperCAmelCase = activation_function UpperCAmelCase = init_std UpperCAmelCase = encoder_layerdrop UpperCAmelCase = decoder_layerdrop UpperCAmelCase = use_cache UpperCAmelCase = encoder_layers UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True UpperCAmelCase = share_encoder_decoder_embeddings super().__init__( pad_token_id=lowercase , eos_token_id=lowercase , is_encoder_decoder=lowercase , decoder_start_token_id=lowercase , forced_eos_token_id=lowercase , **lowercase , ) class _a ( __a ): @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs def A ( self : int ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: UpperCAmelCase = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: UpperCAmelCase = {0: '''batch'''} UpperCAmelCase = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: UpperCAmelCase = {0: '''batch''', 1: '''decoder_sequence'''} UpperCAmelCase = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(lowercase , direction='''inputs''' ) elif self.task == "causal-lm": # TODO: figure this case out. UpperCAmelCase = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: UpperCAmelCase , UpperCAmelCase = self.num_layers for i in range(lowercase ): UpperCAmelCase = {0: '''batch''', 2: '''past_sequence + sequence'''} UpperCAmelCase = {0: '''batch''', 2: '''past_sequence + sequence'''} else: UpperCAmelCase = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}), ('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}), ] ) return common_inputs @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs def A ( self : Any ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: UpperCAmelCase = super().outputs else: UpperCAmelCase = super(lowercase , self ).outputs if self.use_past: UpperCAmelCase , UpperCAmelCase = self.num_layers for i in range(lowercase ): UpperCAmelCase = {0: '''batch''', 2: '''past_sequence + sequence'''} UpperCAmelCase = {0: '''batch''', 2: '''past_sequence + sequence'''} return common_outputs def A ( self : Dict , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ): '''simple docstring''' UpperCAmelCase = self._generate_dummy_inputs_for_encoder_and_decoder( lowercase , lowercase , lowercase , lowercase , lowercase ) # Generate decoder inputs UpperCAmelCase = seq_length if not self.use_past else 1 UpperCAmelCase = self._generate_dummy_inputs_for_encoder_and_decoder( lowercase , lowercase , lowercase , lowercase , lowercase ) UpperCAmelCase = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()} UpperCAmelCase = dict(**lowercase , **lowercase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch UpperCAmelCase , UpperCAmelCase = common_inputs['''input_ids'''].shape UpperCAmelCase = common_inputs['''decoder_input_ids'''].shape[1] UpperCAmelCase , UpperCAmelCase = self.num_attention_heads UpperCAmelCase = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) UpperCAmelCase = decoder_seq_length + 3 UpperCAmelCase = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) UpperCAmelCase = torch.cat( [common_inputs['''decoder_attention_mask'''], torch.ones(lowercase , lowercase )] , dim=1 ) UpperCAmelCase = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered UpperCAmelCase , UpperCAmelCase = self.num_layers UpperCAmelCase = min(lowercase , lowercase ) UpperCAmelCase = max(lowercase , lowercase ) - min_num_layers UpperCAmelCase = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder''' for _ in range(lowercase ): common_inputs["past_key_values"].append( ( torch.zeros(lowercase ), torch.zeros(lowercase ), torch.zeros(lowercase ), torch.zeros(lowercase ), ) ) # TODO: test this. UpperCAmelCase = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape for _ in range(lowercase , lowercase ): common_inputs["past_key_values"].append((torch.zeros(lowercase ), torch.zeros(lowercase )) ) return common_inputs def A ( self : int , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ): '''simple docstring''' UpperCAmelCase = self._generate_dummy_inputs_for_encoder_and_decoder( lowercase , lowercase , lowercase , lowercase , lowercase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch UpperCAmelCase , UpperCAmelCase = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values UpperCAmelCase = seqlen + 2 UpperCAmelCase , UpperCAmelCase = self.num_layers UpperCAmelCase , UpperCAmelCase = self.num_attention_heads UpperCAmelCase = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) UpperCAmelCase = common_inputs['''attention_mask'''].dtype UpperCAmelCase = torch.cat( [common_inputs['''attention_mask'''], torch.ones(lowercase , lowercase , dtype=lowercase )] , dim=1 ) UpperCAmelCase = [ (torch.zeros(lowercase ), torch.zeros(lowercase )) for _ in range(lowercase ) ] return common_inputs def A ( self : str , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ): '''simple docstring''' UpperCAmelCase = compute_effective_axis_dimension( lowercase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX UpperCAmelCase = tokenizer.num_special_tokens_to_add(lowercase ) UpperCAmelCase = compute_effective_axis_dimension( lowercase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowercase ) # Generate dummy inputs according to compute batch and sequence UpperCAmelCase = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size UpperCAmelCase = dict(tokenizer(lowercase , return_tensors=lowercase ) ) return common_inputs def A ( self : List[str] , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: UpperCAmelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm( lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase ) else: UpperCAmelCase = self._generate_dummy_inputs_for_causal_lm( lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase ) return common_inputs def A ( self : List[Any] , lowercase : Any , lowercase : Tuple , lowercase : Any , lowercase : Any ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: UpperCAmelCase = super()._flatten_past_key_values_(lowercase , lowercase , lowercase , lowercase ) else: UpperCAmelCase = super(lowercase , self )._flatten_past_key_values_( lowercase , lowercase , lowercase , lowercase ) @property def A ( self : Any ): '''simple docstring''' return 1E-4
34
0
"""simple docstring""" from typing import List import numpy as np def __a ( __lowerCamelCase ): UpperCAmelCase_ : Dict = {key: len(__lowerCamelCase ) for key, value in gen_kwargs.items() if isinstance(__lowerCamelCase, __lowerCamelCase )} if len(set(lists_lengths.values() ) ) > 1: raise RuntimeError( ( "Sharding is ambiguous for this dataset: " + "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n" + "\n".join(f"""\t- key {key} has length {length}""" for key, length in lists_lengths.items() ) + "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, " + "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length." ) ) UpperCAmelCase_ : List[str] = max(lists_lengths.values(), default=0 ) return max(1, __lowerCamelCase ) def __a ( __lowerCamelCase, __lowerCamelCase ): UpperCAmelCase_ : Optional[int] = [] for group_idx in range(__lowerCamelCase ): UpperCAmelCase_ : Union[str, Any] = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break UpperCAmelCase_ : Optional[Any] = shards_indices_per_group[-1].stop if shards_indices_per_group else 0 UpperCAmelCase_ : List[Any] = range(__lowerCamelCase, start + num_shards_to_add ) shards_indices_per_group.append(__lowerCamelCase ) return shards_indices_per_group def __a ( __lowerCamelCase, __lowerCamelCase ): UpperCAmelCase_ : Any = _number_of_shards_in_gen_kwargs(__lowerCamelCase ) if num_shards == 1: return [dict(__lowerCamelCase )] else: UpperCAmelCase_ : Any = _distribute_shards(num_shards=__lowerCamelCase, max_num_jobs=__lowerCamelCase ) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(__lowerCamelCase, __lowerCamelCase ) else value for key, value in gen_kwargs.items() } for group_idx in range(len(__lowerCamelCase ) ) ] def __a ( __lowerCamelCase ): return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key], __lowerCamelCase ) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def __a ( __lowerCamelCase, __lowerCamelCase ): UpperCAmelCase_ : Union[str, Any] = {len(__lowerCamelCase ) for value in gen_kwargs.values() if isinstance(__lowerCamelCase, __lowerCamelCase )} UpperCAmelCase_ : List[str] = {} for size in list_sizes: UpperCAmelCase_ : Tuple = list(range(__lowerCamelCase ) ) rng.shuffle(indices_per_size[size] ) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes UpperCAmelCase_ : Optional[int] = dict(__lowerCamelCase ) for key, value in shuffled_kwargs.items(): if isinstance(__lowerCamelCase, __lowerCamelCase ): UpperCAmelCase_ : str = [value[i] for i in indices_per_size[len(__lowerCamelCase )]] return shuffled_kwargs
61
'''simple docstring''' import os def snake_case_ (): UpperCAmelCase = os.path.join(os.path.dirname(_a ) , '''num.txt''' ) with open(_a ) as file_hand: return str(sum(int(_a ) for line in file_hand ) )[:1_0] if __name__ == "__main__": print(solution())
34
0
from math import atan, cos, radians, sin, tan from .haversine_distance import haversine_distance _A = 6_378_137.0 _A = 6_356_752.314_245 _A = 637_8137 def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ): __UpperCamelCase =(AXIS_A - AXIS_B) / AXIS_A # Parametric latitudes # https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude __UpperCamelCase =atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE__ ) ) ) __UpperCamelCase =atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE__ ) ) ) # Compute central angle between two points # using haversine theta. sigma = haversine_distance / equatorial radius __UpperCamelCase =haversine_distance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) / EQUATORIAL_RADIUS # Intermediate P and Q values __UpperCamelCase =(b_lata + b_lata) / 2 __UpperCamelCase =(b_lata - b_lata) / 2 # Intermediate X value # X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2) __UpperCamelCase =(sin(SCREAMING_SNAKE_CASE__ ) ** 2) * (cos(SCREAMING_SNAKE_CASE__ ) ** 2) __UpperCamelCase =cos(sigma / 2 ) ** 2 __UpperCamelCase =(sigma - sin(SCREAMING_SNAKE_CASE__ )) * (x_numerator / x_demonimator) # Intermediate Y value # Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2) __UpperCamelCase =(cos(SCREAMING_SNAKE_CASE__ ) ** 2) * (sin(SCREAMING_SNAKE_CASE__ ) ** 2) __UpperCamelCase =sin(sigma / 2 ) ** 2 __UpperCamelCase =(sigma + sin(SCREAMING_SNAKE_CASE__ )) * (y_numerator / y_denominator) return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value))) if __name__ == "__main__": import doctest doctest.testmod()
62
'''simple docstring''' import argparse import logging import os from pathlib import Path from typing import Any, Dict import pytorch_lightning as pl from pytorch_lightning.utilities import rank_zero_info from transformers import ( AdamW, AutoConfig, AutoModel, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoModelWithLMHead, AutoTokenizer, PretrainedConfig, PreTrainedTokenizer, ) from transformers.optimization import ( Adafactor, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.utils.versions import require_version A =logging.getLogger(__name__) require_version('pytorch_lightning>=1.0.4') A ={ 'base': AutoModel, 'sequence-classification': AutoModelForSequenceClassification, 'question-answering': AutoModelForQuestionAnswering, 'pretraining': AutoModelForPreTraining, 'token-classification': AutoModelForTokenClassification, 'language-modeling': AutoModelWithLMHead, 'summarization': AutoModelForSeqaSeqLM, 'translation': AutoModelForSeqaSeqLM, } # update this and the import above to support new schedulers from transformers.optimization A ={ 'linear': get_linear_schedule_with_warmup, 'cosine': get_cosine_schedule_with_warmup, 'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup, 'polynomial': get_polynomial_decay_schedule_with_warmup, # '': get_constant_schedule, # not supported for now # '': get_constant_schedule_with_warmup, # not supported for now } A =sorted(arg_to_scheduler.keys()) A ='{' + ', '.join(arg_to_scheduler_choices) + '}' class _a ( pl.LightningModule ): def __init__( self : List[str] , lowercase : argparse.Namespace , lowercase : List[Any]=None , lowercase : Dict="base" , lowercase : Optional[int]=None , lowercase : Dict=None , lowercase : Tuple=None , **lowercase : Optional[int] , ): '''simple docstring''' super().__init__() # TODO: move to self.save_hyperparameters() # self.save_hyperparameters() # can also expand arguments into trainer signature for easier reading self.save_hyperparameters(lowercase ) UpperCAmelCase = 0 UpperCAmelCase = Path(self.hparams.output_dir ) UpperCAmelCase = self.hparams.cache_dir if self.hparams.cache_dir else None if config is None: UpperCAmelCase = AutoConfig.from_pretrained( self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'''num_labels''': num_labels} if num_labels is not None else {}) , cache_dir=lowercase , **lowercase , ) else: UpperCAmelCase = config UpperCAmelCase = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''') for p in extra_model_params: if getattr(self.hparams , lowercase , lowercase ): assert hasattr(self.config , lowercase ), f"model config doesn't have a `{p}` attribute" setattr(self.config , lowercase , getattr(self.hparams , lowercase ) ) if tokenizer is None: UpperCAmelCase = AutoTokenizer.from_pretrained( self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=lowercase , ) else: UpperCAmelCase = tokenizer UpperCAmelCase = MODEL_MODES[mode] if model is None: UpperCAmelCase = self.model_type.from_pretrained( self.hparams.model_name_or_path , from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=lowercase , ) else: UpperCAmelCase = model def A ( self : List[Any] , *lowercase : List[str] , **lowercase : List[str] ): '''simple docstring''' UpperCAmelCase = self.model_type.from_pretrained(*lowercase , **lowercase ) def A ( self : Tuple ): '''simple docstring''' UpperCAmelCase = arg_to_scheduler[self.hparams.lr_scheduler] UpperCAmelCase = get_schedule_func( self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() ) UpperCAmelCase = {'''scheduler''': scheduler, '''interval''': '''step''', '''frequency''': 1} return scheduler def A ( self : str ): '''simple docstring''' UpperCAmelCase = self.model UpperCAmelCase = ['''bias''', '''LayerNorm.weight'''] UpperCAmelCase = [ { '''params''': [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay ) ], # check this named paramters '''weight_decay''': self.hparams.weight_decay, }, { '''params''': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0, }, ] if self.hparams.adafactor: UpperCAmelCase = Adafactor( lowercase , lr=self.hparams.learning_rate , scale_parameter=lowercase , relative_step=lowercase ) else: UpperCAmelCase = AdamW( lowercase , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon ) UpperCAmelCase = optimizer UpperCAmelCase = self.get_lr_scheduler() return [optimizer], [scheduler] def A ( self : List[Any] , lowercase : int , lowercase : List[str] ): '''simple docstring''' return self.validation_step(lowercase , lowercase ) def A ( self : List[Any] , lowercase : Tuple ): '''simple docstring''' return self.validation_end(lowercase ) def A ( self : List[Any] ): '''simple docstring''' UpperCAmelCase = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores UpperCAmelCase = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs def A ( self : List[str] , lowercase : Any ): '''simple docstring''' if stage == "test": UpperCAmelCase = len(self.test_dataloader().dataset ) else: UpperCAmelCase = self.get_dataloader('''train''' , self.hparams.train_batch_size , shuffle=lowercase ) UpperCAmelCase = len(self.train_dataloader().dataset ) def A ( self : List[str] , lowercase : str , lowercase : int , lowercase : bool = False ): '''simple docstring''' raise NotImplementedError('''You must implement this for your task''' ) def A ( self : Union[str, Any] ): '''simple docstring''' return self.train_loader def A ( self : Optional[Any] ): '''simple docstring''' return self.get_dataloader('''dev''' , self.hparams.eval_batch_size , shuffle=lowercase ) def A ( self : List[Any] ): '''simple docstring''' return self.get_dataloader('''test''' , self.hparams.eval_batch_size , shuffle=lowercase ) def A ( self : Any , lowercase : Union[str, Any] ): '''simple docstring''' return os.path.join( self.hparams.data_dir , '''cached_{}_{}_{}'''.format( lowercase , list(filter(lowercase , self.hparams.model_name_or_path.split('''/''' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , ) @pl.utilities.rank_zero_only def A ( self : List[str] , lowercase : Dict[str, Any] ): '''simple docstring''' UpperCAmelCase = self.output_dir.joinpath('''best_tfmr''' ) UpperCAmelCase = self.step_count self.model.save_pretrained(lowercase ) self.tokenizer.save_pretrained(lowercase ) @staticmethod def A ( lowercase : Optional[int] , lowercase : List[str] ): '''simple docstring''' parser.add_argument( '''--model_name_or_path''' , default=lowercase , type=lowercase , required=lowercase , help='''Path to pretrained model or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--config_name''' , default='''''' , type=lowercase , help='''Pretrained config name or path if not the same as model_name''' ) parser.add_argument( '''--tokenizer_name''' , default=lowercase , type=lowercase , help='''Pretrained tokenizer name or path if not the same as model_name''' , ) parser.add_argument( '''--cache_dir''' , default=str(Path(lowercase ).parent / '''test_run''' / '''cache''' ) , type=lowercase , help='''Where do you want to store the pre-trained models downloaded from huggingface.co''' , ) parser.add_argument( '''--encoder_layerdrop''' , type=lowercase , help='''Encoder layer dropout probability (Optional). Goes into model.config''' , ) parser.add_argument( '''--decoder_layerdrop''' , type=lowercase , help='''Decoder layer dropout probability (Optional). Goes into model.config''' , ) parser.add_argument( '''--dropout''' , type=lowercase , help='''Dropout probability (Optional). Goes into model.config''' , ) parser.add_argument( '''--attention_dropout''' , type=lowercase , help='''Attention dropout probability (Optional). Goes into model.config''' , ) parser.add_argument('''--learning_rate''' , default=5E-5 , type=lowercase , help='''The initial learning rate for Adam.''' ) parser.add_argument( '''--lr_scheduler''' , default='''linear''' , choices=lowercase , metavar=lowercase , type=lowercase , help='''Learning rate scheduler''' , ) parser.add_argument('''--weight_decay''' , default=0.0 , type=lowercase , help='''Weight decay if we apply some.''' ) parser.add_argument('''--adam_epsilon''' , default=1E-8 , type=lowercase , help='''Epsilon for Adam optimizer.''' ) parser.add_argument('''--warmup_steps''' , default=0 , type=lowercase , help='''Linear warmup over warmup_steps.''' ) parser.add_argument('''--num_workers''' , default=4 , type=lowercase , help='''kwarg passed to DataLoader''' ) parser.add_argument('''--num_train_epochs''' , dest='''max_epochs''' , default=3 , type=lowercase ) parser.add_argument('''--train_batch_size''' , default=32 , type=lowercase ) parser.add_argument('''--eval_batch_size''' , default=32 , type=lowercase ) parser.add_argument('''--adafactor''' , action='''store_true''' ) class _a ( pl.Callback ): def A ( self : Dict , lowercase : Optional[Any] , lowercase : List[Any] ): '''simple docstring''' if ( trainer.is_global_zero and trainer.global_rank == 0 ): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed. pl_module.model.rag.retriever.init_retrieval() # better to use hook functions. class _a ( pl.Callback ): def A ( self : Optional[int] , lowercase : Union[str, Any] , lowercase : Any ): '''simple docstring''' for name, param in pl_module.model.rag.named_parameters(): if param.grad is None: print(lowercase ) class _a ( pl.Callback ): def A ( self : Optional[int] , lowercase : Optional[int] , lowercase : Dict ): '''simple docstring''' UpperCAmelCase = trainer.lr_schedulers[0]['''scheduler'''] UpperCAmelCase = {f"lr_group_{i}": lr for i, lr in enumerate(lr_scheduler.get_lr() )} pl_module.logger.log_metrics(lowercase ) def A ( self : Tuple , lowercase : pl.Trainer , lowercase : pl.LightningModule ): '''simple docstring''' rank_zero_info('''***** Validation results *****''' ) UpperCAmelCase = trainer.callback_metrics # Log results for key in sorted(lowercase ): if key not in ["log", "progress_bar"]: rank_zero_info('''{} = {}\n'''.format(lowercase , str(metrics[key] ) ) ) def A ( self : Dict , lowercase : pl.Trainer , lowercase : pl.LightningModule ): '''simple docstring''' rank_zero_info('''***** Test results *****''' ) UpperCAmelCase = trainer.callback_metrics # Log and save results to file UpperCAmelCase = os.path.join(pl_module.hparams.output_dir , '''test_results.txt''' ) with open(lowercase , '''w''' ) as writer: for key in sorted(lowercase ): if key not in ["log", "progress_bar"]: rank_zero_info('''{} = {}\n'''.format(lowercase , str(metrics[key] ) ) ) writer.write('''{} = {}\n'''.format(lowercase , str(metrics[key] ) ) ) def snake_case_ (_a : int , _a : Optional[Any] ): # To allow all pl args uncomment the following line # parser = pl.Trainer.add_argparse_args(parser) parser.add_argument( '''--output_dir''' , default=str(Path(_a ).parent / '''test_run''' / '''model_checkpoints''' ) , type=_a , help='''The output directory where the model predictions and checkpoints will be written.''' , ) parser.add_argument( '''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , ) parser.add_argument( '''--fp16_opt_level''' , type=_a , default='''O2''' , help=( '''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].''' '''See details at https://nvidia.github.io/apex/amp.html''' ) , ) parser.add_argument('''--n_tpu_cores''' , dest='''tpu_cores''' , type=_a ) parser.add_argument('''--max_grad_norm''' , dest='''gradient_clip_val''' , default=1.0 , type=_a , help='''Max gradient norm''' ) parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' ) parser.add_argument('''--do_predict''' , action='''store_true''' , help='''Whether to run predictions on the test set.''' ) parser.add_argument( '''--gradient_accumulation_steps''' , dest='''accumulate_grad_batches''' , type=_a , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , ) parser.add_argument('''--seed''' , type=_a , default=4_2 , help='''random seed for initialization''' ) parser.add_argument( '''--data_dir''' , default=str(Path(_a ).parent / '''test_run''' / '''dummy-train-data''' ) , type=_a , help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''' , ) def snake_case_ (_a : BaseTransformer , _a : argparse.Namespace , _a : List[Any]=None , _a : Tuple=True , _a : int=[] , _a : Any=None , _a : int=None , **_a : Optional[Any] , ): pl.seed_everything(args.seed ) # init model UpperCAmelCase = Path(model.hparams.output_dir ) odir.mkdir(exist_ok=_a ) # add custom checkpoints if checkpoint_callback is None: UpperCAmelCase = pl.callbacks.ModelCheckpoint( filepath=args.output_dir , prefix='''checkpoint''' , monitor='''val_loss''' , mode='''min''' , save_top_k=1 ) if early_stopping_callback: extra_callbacks.append(_a ) if logging_callback is None: UpperCAmelCase = LoggingCallback() UpperCAmelCase = {} if args.fpaa: UpperCAmelCase = 1_6 if args.gpus > 1: UpperCAmelCase = '''auto''' UpperCAmelCase = '''ddp''' UpperCAmelCase = args.accumulate_grad_batches UpperCAmelCase = None UpperCAmelCase = '''auto''' UpperCAmelCase = pl.Trainer.from_argparse_args( _a , weights_summary=_a , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=_a , val_check_interval=1 , num_sanity_val_steps=2 , **_a , ) if args.do_train: trainer.fit(_a ) else: print('''RAG modeling tests with new set functions successfuly executed!''' ) return trainer
34
0
'''simple docstring''' # Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version lowerCAmelCase_ : List[Any] = get_logger(__name__) class __SCREAMING_SNAKE_CASE : """simple docstring""" __a ='dummy_data' __a ='datasets' __a =False def __init__( self : Dict , __a : str , __a : str , __a : Union[Version, str] , __a : Optional[str] = None , __a : bool = False , __a : bool = True , __a : Optional[List[Callable]] = None , ): _a = 0 _a = dataset_name _a = cache_dir _a = use_local_dummy_data _a = config # download_callbacks take a single url as input _a = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root _a = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general _a = str(__a ) # to be downloaded _a = None _a = None @property def UpperCamelCase__ ( self : Dict ): if self._dummy_file is None: _a = self.download_dummy_data() return self._dummy_file @property def UpperCamelCase__ ( self : Dict ): if self.config is not None: # structure is dummy / config_name / version_name return os.path.join("dummy" , self.config.name , self.version_name ) # structure is dummy / version_name return os.path.join("dummy" , self.version_name ) @property def UpperCamelCase__ ( self : Optional[int] ): return os.path.join(self.dummy_data_folder , "dummy_data.zip" ) def UpperCamelCase__ ( self : int ): _a = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) _a = cached_path( __a , cache_dir=self.cache_dir , extract_compressed_file=__a , force_extract=__a ) return os.path.join(__a , self.dummy_file_name ) @property def UpperCamelCase__ ( self : Tuple ): return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file ) @property def UpperCamelCase__ ( self : Union[str, Any] ): if self._bucket_url is None: _a = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/" ) ) return self._bucket_url @property def UpperCamelCase__ ( self : Optional[int] ): # return full path if its a dir if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep , "/" ).split("/" )[:-1] ) def UpperCamelCase__ ( self : Dict , __a : Tuple , *__a : Tuple ): if self.load_existing_dummy_data: # dummy data is downloaded and tested _a = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned _a = self.dummy_file_name # special case when data_url is a dict if isinstance(__a , __a ): return self.create_dummy_data_dict(__a , __a ) elif isinstance(__a , (list, tuple) ): return self.create_dummy_data_list(__a , __a ) else: return self.create_dummy_data_single(__a , __a ) def UpperCamelCase__ ( self : Any , __a : int , *__a : int ): return self.download_and_extract(__a ) def UpperCamelCase__ ( self : Union[str, Any] , __a : str , __a : int ): return self.download_and_extract(__a ) def UpperCamelCase__ ( self : Tuple , __a : List[Any] , *__a : Tuple , **__a : int ): return path def UpperCamelCase__ ( self : List[Any] ): return {} def UpperCamelCase__ ( self : str , __a : List[str] , __a : Union[str, Any] ): _a = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(__a , __a ): for single_url in single_urls: download_callback(__a ) else: _a = single_urls download_callback(__a ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(__a , __a ): _a = [os.path.join(__a , urllib.parse.quote_plus(Path(__a ).name ) ) for x in single_urls] else: _a = single_urls _a = os.path.join(__a , urllib.parse.quote_plus(Path(__a ).name ) ) _a = value # make sure that values are unique if all(isinstance(__a , __a ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique _a = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def UpperCamelCase__ ( self : Optional[Any] , __a : List[str] , __a : str ): _a = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one _a = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , __a ) ) for url in data_url ) _a = all( url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): _a = [data_url[0]] * len(__a ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(__a ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _a = os.path.join(__a , urllib.parse.quote_plus(single_url.split("/" )[-1] ) ) dummy_data_list.append(__a ) return dummy_data_list def UpperCamelCase__ ( self : Union[str, Any] , __a : Optional[int] , __a : List[Any] ): for download_callback in self.download_callbacks: download_callback(__a ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _a = os.path.join(__a , urllib.parse.quote_plus(data_url.split("/" )[-1] ) ) if os.path.exists(__a ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def UpperCamelCase__ ( self : Dict ): pass def UpperCamelCase__ ( self : Dict ): pass def UpperCamelCase__ ( self : Optional[int] , __a : Any ): def _iter_archive_members(__a : Tuple ): # this preserves the order of the members inside the ZIP archive _a = Path(self.dummy_file ).parent _a = path.relative_to(__a ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: _a = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(__a ) _a = Path(__a ) _a = _iter_archive_members(__a ) if self.use_local_dummy_data else path.rglob("*" ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith((".", "__") ): yield file_path.relative_to(__a ).as_posix(), file_path.open("rb" ) def UpperCamelCase__ ( self : Tuple , __a : Optional[int] ): if not isinstance(__a , __a ): _a = [paths] for path in paths: if os.path.isfile(__a ): if os.path.basename(__a ).startswith((".", "__") ): return yield path else: for dirpath, dirnames, filenames in os.walk(__a ): if os.path.basename(__a ).startswith((".", "__") ): continue dirnames.sort() for filename in sorted(__a ): if filename.startswith((".", "__") ): continue yield os.path.join(__a , __a )
63
'''simple docstring''' from queue import PriorityQueue from typing import Any import numpy as np def snake_case_ (_a : dict , _a : str , _a : set , _a : set , _a : dict , _a : dict , _a : PriorityQueue , _a : dict , _a : float | int , ): for nxt, d in graph[v]: if nxt in visited_forward: continue UpperCAmelCase = cst_fwd.get(_a , np.inf ) UpperCAmelCase = cst_fwd[v] + d if new_cost_f < old_cost_f: queue.put((new_cost_f, nxt) ) UpperCAmelCase = new_cost_f UpperCAmelCase = v if nxt in visited_backward: if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: UpperCAmelCase = cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance def snake_case_ (_a : str , _a : str , _a : dict , _a : dict ): UpperCAmelCase = -1 UpperCAmelCase = set() UpperCAmelCase = set() UpperCAmelCase = {source: 0} UpperCAmelCase = {destination: 0} UpperCAmelCase = {source: None} UpperCAmelCase = {destination: None} UpperCAmelCase = PriorityQueue() UpperCAmelCase = PriorityQueue() UpperCAmelCase = np.inf queue_forward.put((0, source) ) queue_backward.put((0, destination) ) if source == destination: return 0 while not queue_forward.empty() and not queue_backward.empty(): UpperCAmelCase , UpperCAmelCase = queue_forward.get() visited_forward.add(_a ) UpperCAmelCase , UpperCAmelCase = queue_backward.get() visited_backward.add(_a ) UpperCAmelCase = pass_and_relaxation( _a , _a , _a , _a , _a , _a , _a , _a , _a , ) UpperCAmelCase = pass_and_relaxation( _a , _a , _a , _a , _a , _a , _a , _a , _a , ) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break if shortest_distance != np.inf: UpperCAmelCase = shortest_distance return shortest_path_distance A ={ 'B': [['C', 1]], 'C': [['D', 1]], 'D': [['F', 1]], 'E': [['B', 1], ['G', 2]], 'F': [], 'G': [['F', 1]], } A ={ 'B': [['E', 1]], 'C': [['B', 1]], 'D': [['C', 1]], 'F': [['D', 1], ['G', 1]], 'E': [[None, np.inf]], 'G': [['E', 2]], } if __name__ == "__main__": import doctest doctest.testmod()
34
0
"""simple docstring""" from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL from PIL import Image from ...utils import ( BaseOutput, OptionalDependencyNotAvailable, is_flax_available, is_k_diffusion_available, is_k_diffusion_version, is_onnx_available, is_torch_available, is_transformers_available, is_transformers_version, ) @dataclass class lowercase( __a ): '''simple docstring''' lowercase__ = 42 lowercase__ = 42 try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_cycle_diffusion import CycleDiffusionPipeline from .pipeline_stable_diffusion import StableDiffusionPipeline from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline from .pipeline_stable_unclip import StableUnCLIPPipeline from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline from .safety_checker import StableDiffusionSafetyChecker from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline else: from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.26.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( StableDiffusionDepthaImgPipeline, StableDiffusionDiffEditPipeline, StableDiffusionPixaPixZeroPipeline, ) else: from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline try: if not ( is_torch_available() and is_transformers_available() and is_k_diffusion_available() and is_k_diffusion_version('''>=''', '''0.0.12''') ): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403 else: from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline try: if not (is_transformers_available() and is_onnx_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_onnx_objects import * # noqa F403 else: from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline if is_transformers_available() and is_flax_available(): import flax @flax.struct.dataclass class lowercase( __a ): '''simple docstring''' lowercase__ = 42 lowercase__ = 42 from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
64
'''simple docstring''' import argparse import json from pathlib import Path import torch import torchaudio from datasets import load_dataset from huggingface_hub import hf_hub_download from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification from transformers.utils import logging logging.set_verbosity_info() A =logging.get_logger(__name__) def snake_case_ (_a : List[str] ): UpperCAmelCase = ASTConfig() if "10-10" in model_name: pass elif "speech-commands" in model_name: UpperCAmelCase = 1_2_8 elif "12-12" in model_name: UpperCAmelCase = 1_2 UpperCAmelCase = 1_2 elif "14-14" in model_name: UpperCAmelCase = 1_4 UpperCAmelCase = 1_4 elif "16-16" in model_name: UpperCAmelCase = 1_6 UpperCAmelCase = 1_6 else: raise ValueError('''Model not supported''' ) UpperCAmelCase = '''huggingface/label-files''' if "speech-commands" in model_name: UpperCAmelCase = 3_5 UpperCAmelCase = '''speech-commands-v2-id2label.json''' else: UpperCAmelCase = 5_2_7 UpperCAmelCase = '''audioset-id2label.json''' UpperCAmelCase = json.load(open(hf_hub_download(_a , _a , repo_type='''dataset''' ) , '''r''' ) ) UpperCAmelCase = {int(_a ): v for k, v in idalabel.items()} UpperCAmelCase = idalabel UpperCAmelCase = {v: k for k, v in idalabel.items()} return config def snake_case_ (_a : Tuple ): if "module.v" in name: UpperCAmelCase = name.replace('''module.v''' , '''audio_spectrogram_transformer''' ) if "cls_token" in name: UpperCAmelCase = name.replace('''cls_token''' , '''embeddings.cls_token''' ) if "dist_token" in name: UpperCAmelCase = name.replace('''dist_token''' , '''embeddings.distillation_token''' ) if "pos_embed" in name: UpperCAmelCase = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' ) if "patch_embed.proj" in name: UpperCAmelCase = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) # transformer blocks if "blocks" in name: UpperCAmelCase = name.replace('''blocks''' , '''encoder.layer''' ) if "attn.proj" in name: UpperCAmelCase = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: UpperCAmelCase = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: UpperCAmelCase = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: UpperCAmelCase = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: UpperCAmelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: UpperCAmelCase = name.replace('''mlp.fc2''' , '''output.dense''' ) # final layernorm if "audio_spectrogram_transformer.norm" in name: UpperCAmelCase = name.replace('''audio_spectrogram_transformer.norm''' , '''audio_spectrogram_transformer.layernorm''' ) # classifier head if "module.mlp_head.0" in name: UpperCAmelCase = name.replace('''module.mlp_head.0''' , '''classifier.layernorm''' ) if "module.mlp_head.1" in name: UpperCAmelCase = name.replace('''module.mlp_head.1''' , '''classifier.dense''' ) return name def snake_case_ (_a : Dict , _a : List[Any] ): for key in orig_state_dict.copy().keys(): UpperCAmelCase = orig_state_dict.pop(_a ) if "qkv" in key: UpperCAmelCase = key.split('''.''' ) UpperCAmelCase = int(key_split[3] ) UpperCAmelCase = config.hidden_size if "weight" in key: UpperCAmelCase = val[:dim, :] UpperCAmelCase = val[dim : dim * 2, :] UpperCAmelCase = val[-dim:, :] else: UpperCAmelCase = val[:dim] UpperCAmelCase = val[dim : dim * 2] UpperCAmelCase = val[-dim:] else: UpperCAmelCase = val return orig_state_dict def snake_case_ (_a : Tuple ): UpperCAmelCase = [ '''module.v.head.weight''', '''module.v.head.bias''', '''module.v.head_dist.weight''', '''module.v.head_dist.bias''', ] for k in ignore_keys: state_dict.pop(_a , _a ) @torch.no_grad() def snake_case_ (_a : int , _a : Union[str, Any] , _a : Dict=False ): UpperCAmelCase = get_audio_spectrogram_transformer_config(_a ) UpperCAmelCase = { '''ast-finetuned-audioset-10-10-0.4593''': ( '''https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1''' ), '''ast-finetuned-audioset-10-10-0.450''': ( '''https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1''' ), '''ast-finetuned-audioset-10-10-0.448''': ( '''https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1''' ), '''ast-finetuned-audioset-10-10-0.448-v2''': ( '''https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1''' ), '''ast-finetuned-audioset-12-12-0.447''': ( '''https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1''' ), '''ast-finetuned-audioset-14-14-0.443''': ( '''https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1''' ), '''ast-finetuned-audioset-16-16-0.442''': ( '''https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1''' ), '''ast-finetuned-speech-commands-v2''': ( '''https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1''' ), } # load original state_dict UpperCAmelCase = model_name_to_url[model_name] UpperCAmelCase = torch.hub.load_state_dict_from_url(_a , map_location='''cpu''' ) # remove some keys remove_keys(_a ) # rename some keys UpperCAmelCase = convert_state_dict(_a , _a ) # load 🤗 model UpperCAmelCase = ASTForAudioClassification(_a ) model.eval() model.load_state_dict(_a ) # verify outputs on dummy input # source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62 UpperCAmelCase = -4.267_7393 if '''speech-commands''' not in model_name else -6.84_5978 UpperCAmelCase = 4.568_9974 if '''speech-commands''' not in model_name else 5.565_4526 UpperCAmelCase = 1_0_2_4 if '''speech-commands''' not in model_name else 1_2_8 UpperCAmelCase = ASTFeatureExtractor(mean=_a , std=_a , max_length=_a ) if "speech-commands" in model_name: UpperCAmelCase = load_dataset('''speech_commands''' , '''v0.02''' , split='''validation''' ) UpperCAmelCase = dataset[0]['''audio''']['''array'''] else: UpperCAmelCase = hf_hub_download( repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' , ) UpperCAmelCase , UpperCAmelCase = torchaudio.load(_a ) UpperCAmelCase = waveform.squeeze().numpy() UpperCAmelCase = feature_extractor(_a , sampling_rate=1_6_0_0_0 , return_tensors='''pt''' ) # forward pass UpperCAmelCase = model(**_a ) UpperCAmelCase = outputs.logits if model_name == "ast-finetuned-audioset-10-10-0.4593": UpperCAmelCase = torch.tensor([-0.8760, -7.0042, -8.6602] ) elif model_name == "ast-finetuned-audioset-10-10-0.450": UpperCAmelCase = torch.tensor([-1.1986, -7.0903, -8.2718] ) elif model_name == "ast-finetuned-audioset-10-10-0.448": UpperCAmelCase = torch.tensor([-2.6128, -8.0080, -9.4344] ) elif model_name == "ast-finetuned-audioset-10-10-0.448-v2": UpperCAmelCase = torch.tensor([-1.5080, -7.4534, -8.8917] ) elif model_name == "ast-finetuned-audioset-12-12-0.447": UpperCAmelCase = torch.tensor([-0.5050, -6.5833, -8.0843] ) elif model_name == "ast-finetuned-audioset-14-14-0.443": UpperCAmelCase = torch.tensor([-0.3826, -7.0336, -8.2413] ) elif model_name == "ast-finetuned-audioset-16-16-0.442": UpperCAmelCase = torch.tensor([-1.2113, -6.9101, -8.3470] ) elif model_name == "ast-finetuned-speech-commands-v2": UpperCAmelCase = torch.tensor([6.1589, -8.0566, -8.7984] ) else: raise ValueError('''Unknown model name''' ) if not torch.allclose(logits[0, :3] , _a , atol=1E-4 ): raise ValueError('''Logits don\'t match''' ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: Path(_a ).mkdir(exist_ok=_a ) print(F"Saving model {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(_a ) print(F"Saving feature extractor to {pytorch_dump_folder_path}" ) feature_extractor.save_pretrained(_a ) if push_to_hub: print('''Pushing model and feature extractor to the hub...''' ) model.push_to_hub(F"MIT/{model_name}" ) feature_extractor.push_to_hub(F"MIT/{model_name}" ) if __name__ == "__main__": A =argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='ast-finetuned-audioset-10-10-0.4593', type=str, help='Name of the Audio Spectrogram Transformer model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) A =parser.parse_args() convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
34
0
def lowerCAmelCase_ ( __A ) -> str: '''simple docstring''' UpperCAmelCase__ = "" for ch in key: if ch == " " or ch not in key_no_dups and ch.isalpha(): key_no_dups += ch return key_no_dups def lowerCAmelCase_ ( __A ) -> dict[str, str]: '''simple docstring''' UpperCAmelCase__ = [chr(i + 65 ) for i in range(26 )] # Remove duplicate characters from key UpperCAmelCase__ = remove_duplicates(key.upper() ) UpperCAmelCase__ = len(__A ) # First fill cipher with key characters UpperCAmelCase__ = {alphabet[i]: char for i, char in enumerate(__A )} # Then map remaining characters in alphabet to # the alphabet from the beginning for i in range(len(__A ), 26 ): UpperCAmelCase__ = alphabet[i - offset] # Ensure we are not mapping letters to letters previously mapped while char in key: offset -= 1 UpperCAmelCase__ = alphabet[i - offset] UpperCAmelCase__ = char return cipher_alphabet def lowerCAmelCase_ ( __A, __A ) -> str: '''simple docstring''' return "".join(cipher_map.get(__A, __A ) for ch in message.upper() ) def lowerCAmelCase_ ( __A, __A ) -> str: '''simple docstring''' UpperCAmelCase__ = {v: k for k, v in cipher_map.items()} return "".join(rev_cipher_map.get(__A, __A ) for ch in message.upper() ) def lowerCAmelCase_ ( ) -> None: '''simple docstring''' UpperCAmelCase__ = input("Enter message to encode or decode: " ).strip() UpperCAmelCase__ = input("Enter keyword: " ).strip() UpperCAmelCase__ = input("Encipher or decipher? E/D:" ).strip()[0].lower() try: UpperCAmelCase__ = {"e": encipher, "d": decipher}[option] except KeyError: raise KeyError("invalid input option" ) UpperCAmelCase__ = create_cipher_map(__A ) print(func(__A, __A ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
65
'''simple docstring''' from __future__ import annotations def snake_case_ (_a : Dict , _a : str , _a : Optional[Any] , _a : List[str] ): # noqa: E741 while r - l > 1: UpperCAmelCase = (l + r) // 2 if v[m] >= key: UpperCAmelCase = m else: UpperCAmelCase = m # noqa: E741 return r def snake_case_ (_a : list[int] ): if len(_a ) == 0: return 0 UpperCAmelCase = [0] * len(_a ) UpperCAmelCase = 1 UpperCAmelCase = v[0] for i in range(1 , len(_a ) ): if v[i] < tail[0]: UpperCAmelCase = v[i] elif v[i] > tail[length - 1]: UpperCAmelCase = v[i] length += 1 else: UpperCAmelCase = v[i] return length if __name__ == "__main__": import doctest doctest.testmod()
34
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __a = { "configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"], "processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ["VisionTextDualEncoderModel"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ["FlaxVisionTextDualEncoderModel"] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ["TFVisionTextDualEncoderModel"] if TYPE_CHECKING: from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel else: import sys __a = _LazyModule(__name__, globals()["__file__"], _import_structure)
66
'''simple docstring''' def snake_case_ (_a : str , _a : str ): UpperCAmelCase = len(_a ) + 1 UpperCAmelCase = len(_a ) + 1 # dp is a 2d matrix where dp[i][j] denotes whether prefix string of # length i of input_string matches with prefix string of length j of # given pattern. # "dp" stands for dynamic programming. UpperCAmelCase = [[0 for i in range(_a )] for j in range(_a )] # since string of zero length match pattern of zero length UpperCAmelCase = 1 # since pattern of zero length will never match with string of non-zero length for i in range(1 , _a ): UpperCAmelCase = 0 # since string of zero length will match with pattern where there # is at least one * alternatively for j in range(1 , _a ): UpperCAmelCase = dp[0][j - 2] if pattern[j - 1] == '''*''' else 0 # now using bottom-up approach to find for all remaining lengths for i in range(1 , _a ): for j in range(1 , _a ): if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".": UpperCAmelCase = dp[i - 1][j - 1] elif pattern[j - 1] == "*": if dp[i][j - 2] == 1: UpperCAmelCase = 1 elif pattern[j - 2] in (input_string[i - 1], "."): UpperCAmelCase = dp[i - 1][j] else: UpperCAmelCase = 0 else: UpperCAmelCase = 0 return bool(dp[-1][-1] ) if __name__ == "__main__": import doctest doctest.testmod() # inputing the strings # input_string = input("input a string :") # pattern = input("input a pattern :") A ='aab' A ='c*a*b' # using function to check whether given string matches the given pattern if match_pattern(input_string, pattern): print(f"""{input_string} matches the given pattern {pattern}""") else: print(f"""{input_string} does not match with the given pattern {pattern}""")
34
0
'''simple docstring''' def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]: if b == 0: return 1 if (b % 2) == 0: return actual_power(UpperCamelCase__ , int(b / 2 ) ) * actual_power(UpperCamelCase__ , int(b / 2 ) ) else: return a * actual_power(UpperCamelCase__ , int(b / 2 ) ) * actual_power(UpperCamelCase__ , int(b / 2 ) ) def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> float: if b < 0: return 1 / actual_power(UpperCamelCase__ , UpperCamelCase__ ) return actual_power(UpperCamelCase__ , UpperCamelCase__ ) if __name__ == "__main__": print(power(-2, -3))
67
'''simple docstring''' import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, PerceiverTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): A ='pt' elif is_tf_available(): A ='tf' else: A ='jax' class _a ( __a , unittest.TestCase ): __a : Optional[Any] = PerceiverTokenizer __a : str = False def A ( self : Union[str, Any] ): '''simple docstring''' super().setUp() UpperCAmelCase = PerceiverTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def A ( self : Optional[int] ): '''simple docstring''' return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' ) def A ( self : Union[str, Any] , **lowercase : int ): '''simple docstring''' return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase ) def A ( self : Tuple , lowercase : str , lowercase : List[str]=False , lowercase : Union[str, Any]=20 , lowercase : Union[str, Any]=5 ): '''simple docstring''' UpperCAmelCase = [] for i in range(len(lowercase ) ): try: UpperCAmelCase = tokenizer.decode([i] , clean_up_tokenization_spaces=lowercase ) except UnicodeDecodeError: pass toks.append((i, tok) ) UpperCAmelCase = list(filter(lambda lowercase : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , lowercase ) ) UpperCAmelCase = list(filter(lambda lowercase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowercase ) , lowercase ) ) if max_length is not None and len(lowercase ) > max_length: UpperCAmelCase = toks[:max_length] if min_length is not None and len(lowercase ) < min_length and len(lowercase ) > 0: while len(lowercase ) < min_length: UpperCAmelCase = toks + toks # toks_str = [t[1] for t in toks] UpperCAmelCase = [t[0] for t in toks] # Ensure consistency UpperCAmelCase = tokenizer.decode(lowercase , clean_up_tokenization_spaces=lowercase ) if " " not in output_txt and len(lowercase ) > 1: UpperCAmelCase = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowercase ) + ''' ''' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowercase ) ) if with_prefix_space: UpperCAmelCase = ''' ''' + output_txt UpperCAmelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase ) return output_txt, output_ids def A ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase = self.perceiver_tokenizer UpperCAmelCase = '''Unicode €.''' UpperCAmelCase = tokenizer(lowercase ) UpperCAmelCase = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5] self.assertEqual(encoded['''input_ids'''] , lowercase ) # decoding UpperCAmelCase = tokenizer.decode(lowercase ) self.assertEqual(lowercase , '''[CLS]Unicode €.[SEP]''' ) UpperCAmelCase = tokenizer('''e è é ê ë''' ) UpperCAmelCase = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5] self.assertEqual(encoded['''input_ids'''] , lowercase ) # decoding UpperCAmelCase = tokenizer.decode(lowercase ) self.assertEqual(lowercase , '''[CLS]e è é ê ë[SEP]''' ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' ) def A ( self : str ): '''simple docstring''' UpperCAmelCase = self.perceiver_tokenizer UpperCAmelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] # fmt: off UpperCAmelCase = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0] # fmt: on UpperCAmelCase = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase ) self.assertIsInstance(lowercase , lowercase ) if FRAMEWORK != "jax": UpperCAmelCase = list(batch.input_ids.numpy()[0] ) else: UpperCAmelCase = list(batch.input_ids.tolist()[0] ) self.assertListEqual(lowercase , lowercase ) self.assertEqual((2, 38) , batch.input_ids.shape ) self.assertEqual((2, 38) , batch.attention_mask.shape ) def A ( self : str ): '''simple docstring''' UpperCAmelCase = self.perceiver_tokenizer UpperCAmelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] UpperCAmelCase = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase ) # check if input_ids are returned and no decoder_input_ids self.assertIn('''input_ids''' , lowercase ) self.assertIn('''attention_mask''' , lowercase ) self.assertNotIn('''decoder_input_ids''' , lowercase ) self.assertNotIn('''decoder_attention_mask''' , lowercase ) def A ( self : Dict ): '''simple docstring''' UpperCAmelCase = self.perceiver_tokenizer UpperCAmelCase = [ '''Summary of the text.''', '''Another summary.''', ] UpperCAmelCase = tokenizer( text_target=lowercase , max_length=32 , padding='''max_length''' , truncation=lowercase , return_tensors=lowercase ) self.assertEqual(32 , targets['''input_ids'''].shape[1] ) def A ( self : int ): '''simple docstring''' UpperCAmelCase = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test UpperCAmelCase = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): # Isolate this from the other tests because we save additional tokens/etc UpperCAmelCase = tempfile.mkdtemp() UpperCAmelCase = ''' He is very happy, UNwant\u00E9d,running''' UpperCAmelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase ) tokenizer.save_pretrained(lowercase ) UpperCAmelCase = tokenizer.__class__.from_pretrained(lowercase ) UpperCAmelCase = after_tokenizer.encode(lowercase , add_special_tokens=lowercase ) self.assertListEqual(lowercase , lowercase ) shutil.rmtree(lowercase ) UpperCAmelCase = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): # Isolate this from the other tests because we save additional tokens/etc UpperCAmelCase = tempfile.mkdtemp() UpperCAmelCase = ''' He is very happy, UNwant\u00E9d,running''' tokenizer.add_tokens(['''bim''', '''bambam'''] ) UpperCAmelCase = tokenizer.additional_special_tokens additional_special_tokens.append('''new_additional_special_token''' ) tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} ) UpperCAmelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase ) tokenizer.save_pretrained(lowercase ) UpperCAmelCase = tokenizer.__class__.from_pretrained(lowercase ) UpperCAmelCase = after_tokenizer.encode(lowercase , add_special_tokens=lowercase ) self.assertListEqual(lowercase , lowercase ) self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) UpperCAmelCase = tokenizer.__class__.from_pretrained(lowercase , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(lowercase ) def A ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(lowercase ) with open(os.path.join(lowercase , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file: UpperCAmelCase = json.load(lowercase ) with open(os.path.join(lowercase , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file: UpperCAmelCase = json.load(lowercase ) UpperCAmelCase = [f"<extra_id_{i}>" for i in range(125 )] UpperCAmelCase = added_tokens_extra_ids + [ '''an_additional_special_token''' ] UpperCAmelCase = added_tokens_extra_ids + [ '''an_additional_special_token''' ] with open(os.path.join(lowercase , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile: json.dump(lowercase , lowercase ) with open(os.path.join(lowercase , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile: json.dump(lowercase , lowercase ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files UpperCAmelCase = tokenizer_class.from_pretrained( lowercase , ) self.assertIn( '''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens ) self.assertEqual( ['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained UpperCAmelCase = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=lowercase )] UpperCAmelCase = tokenizer_class.from_pretrained( lowercase , additional_special_tokens=lowercase , ) self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens ) self.assertEqual( ['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , ) def A ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase = self.perceiver_tokenizer self.assertEqual(tokenizer.decode([178] ) , '''�''' ) def A ( self : Union[str, Any] ): '''simple docstring''' pass def A ( self : Any ): '''simple docstring''' pass def A ( self : Dict ): '''simple docstring''' pass def A ( self : str ): '''simple docstring''' pass def A ( self : List[str] ): '''simple docstring''' UpperCAmelCase = self.get_tokenizers(fast=lowercase , do_lower_case=lowercase ) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): UpperCAmelCase = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]'''] UpperCAmelCase = tokenizer.convert_tokens_to_string(lowercase ) self.assertIsInstance(lowercase , lowercase )
34
0
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.test_utils import execute_subprocess_async def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str=None ) -> Tuple: '''simple docstring''' if subparsers is not None: A__ = subparsers.add_parser("test" ) else: A__ = argparse.ArgumentParser("Accelerate test command" ) parser.add_argument( "--config_file" , default=SCREAMING_SNAKE_CASE_ , help=( "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " "with 'huggingface'." ) , ) if subparsers is not None: parser.set_defaults(func=SCREAMING_SNAKE_CASE_ ) return parser def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any] ) -> List[str]: '''simple docstring''' A__ = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] ) if args.config_file is None: A__ = script_name else: A__ = F'--config_file={args.config_file} {script_name}' A__ = ["accelerate-launch"] + test_args.split() A__ = execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() ) if result.returncode == 0: print("Test is a success! You are ready for your distributed training!" ) def lowerCAmelCase__ ( ) -> List[str]: '''simple docstring''' A__ = test_command_parser() A__ = parser.parse_args() test_command(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": main()
68
'''simple docstring''' import os from distutils.util import strtobool def snake_case_ (_a : Union[str, Any] , _a : List[Any] ): for e in env_keys: UpperCAmelCase = int(os.environ.get(_a , -1 ) ) if val >= 0: return val return default def snake_case_ (_a : Dict , _a : Any=False ): UpperCAmelCase = os.environ.get(_a , str(_a ) ) return strtobool(_a ) == 1 # As its name indicates `strtobool` actually returns an int... def snake_case_ (_a : str , _a : Optional[Any]="no" ): UpperCAmelCase = os.environ.get(_a , str(_a ) ) return value
34
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import _LazyModule __UpperCamelCase = {'''tokenization_bertweet''': ['''BertweetTokenizer''']} if TYPE_CHECKING: from .tokenization_bertweet import BertweetTokenizer else: import sys __UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
69
'''simple docstring''' import argparse import logging from collections import namedtuple import torch from model_bertabs import BertAbsSummarizer from models.model_builder import AbsSummarizer # The authors' implementation from transformers import BertTokenizer logging.basicConfig(level=logging.INFO) A =logging.getLogger(__name__) A ='Hello world! cécé herlolip' A =namedtuple( 'BertAbsConfig', [ 'temp_dir', 'large', 'use_bert_emb', 'finetune_bert', 'encoder', 'share_emb', 'max_pos', 'enc_layers', 'enc_hidden_size', 'enc_heads', 'enc_ff_size', 'enc_dropout', 'dec_layers', 'dec_hidden_size', 'dec_heads', 'dec_ff_size', 'dec_dropout', ], ) def snake_case_ (_a : List[Any] , _a : Any ): UpperCAmelCase = BertAbsConfig( temp_dir='''.''' , finetune_bert=_a , large=_a , share_emb=_a , use_bert_emb=_a , encoder='''bert''' , max_pos=5_1_2 , enc_layers=6 , enc_hidden_size=5_1_2 , enc_heads=8 , enc_ff_size=5_1_2 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_6_8 , dec_heads=8 , dec_ff_size=2_0_4_8 , dec_dropout=0.2 , ) UpperCAmelCase = torch.load(_a , lambda _a , _a : storage ) UpperCAmelCase = AbsSummarizer(_a , torch.device('''cpu''' ) , _a ) original.eval() UpperCAmelCase = BertAbsSummarizer(_a , torch.device('''cpu''' ) ) new_model.eval() # ------------------- # Convert the weights # ------------------- logging.info('''convert the model''' ) new_model.bert.load_state_dict(original.bert.state_dict() ) new_model.decoder.load_state_dict(original.decoder.state_dict() ) new_model.generator.load_state_dict(original.generator.state_dict() ) # ---------------------------------- # Make sure the outpus are identical # ---------------------------------- logging.info('''Make sure that the models\' outputs are identical''' ) UpperCAmelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' ) # prepare the model inputs UpperCAmelCase = tokenizer.encode('''This is sample éàalj\'-.''' ) encoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(_a )) ) UpperCAmelCase = torch.tensor(_a ).unsqueeze(0 ) UpperCAmelCase = tokenizer.encode('''This is sample 3 éàalj\'-.''' ) decoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(_a )) ) UpperCAmelCase = torch.tensor(_a ).unsqueeze(0 ) # failsafe to make sure the weights reset does not affect the # loaded weights. assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0 # forward pass UpperCAmelCase = encoder_input_ids UpperCAmelCase = decoder_input_ids UpperCAmelCase = UpperCAmelCase = None UpperCAmelCase = None UpperCAmelCase = UpperCAmelCase = None UpperCAmelCase = UpperCAmelCase = None UpperCAmelCase = None # The original model does not apply the geneator layer immediatly but rather in # the beam search (where it combines softmax + linear layer). Since we already # apply the softmax in our generation process we only apply the linear layer here. # We make sure that the outputs of the full stack are identical UpperCAmelCase = original(_a , _a , _a , _a , _a , _a , _a )[0] UpperCAmelCase = original.generator(_a ) UpperCAmelCase = new_model( _a , _a , _a , _a , _a )[0] UpperCAmelCase = new_model.generator(_a ) UpperCAmelCase = torch.max(torch.abs(output_converted_model - output_original_model ) ).item() print('''Maximum absolute difference beween weights: {:.2f}'''.format(_a ) ) UpperCAmelCase = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item() print('''Maximum absolute difference beween weights: {:.2f}'''.format(_a ) ) UpperCAmelCase = torch.allclose(_a , _a , atol=1E-3 ) if are_identical: logging.info('''all weights are equal up to 1e-3''' ) else: raise ValueError('''the weights are different. The new model is likely different from the original one.''' ) # The model has been saved with torch.save(model) and this is bound to the exact # directory structure. We save the state_dict instead. logging.info('''saving the model\'s state dictionary''' ) torch.save( new_model.state_dict() , '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''' ) if __name__ == "__main__": A =argparse.ArgumentParser() parser.add_argument( '--bertabs_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.', ) A =parser.parse_args() convert_bertabs_checkpoints( args.bertabs_checkpoint_path, args.pytorch_dump_folder_path, )
34
0
'''simple docstring''' from . import ( albert, align, altclip, audio_spectrogram_transformer, auto, autoformer, bark, bart, barthez, bartpho, beit, bert, bert_generation, bert_japanese, bertweet, big_bird, bigbird_pegasus, biogpt, bit, blenderbot, blenderbot_small, blip, blip_a, bloom, bridgetower, byta, camembert, canine, chinese_clip, clap, clip, clipseg, codegen, conditional_detr, convbert, convnext, convnextva, cpm, cpmant, ctrl, cvt, dataavec, deberta, deberta_va, decision_transformer, deformable_detr, deit, deprecated, deta, detr, dialogpt, dinat, distilbert, dit, donut, dpr, dpt, efficientformer, efficientnet, electra, encodec, encoder_decoder, ernie, ernie_m, esm, falcon, flaubert, flava, fnet, focalnet, fsmt, funnel, git, glpn, gpta, gpt_bigcode, gpt_neo, gpt_neox, gpt_neox_japanese, gpt_swa, gptj, gptsan_japanese, graphormer, groupvit, herbert, hubert, ibert, imagegpt, informer, instructblip, jukebox, layoutlm, layoutlmva, layoutlmva, layoutxlm, led, levit, lilt, llama, longformer, longta, luke, lxmert, mam_aaa, marian, markuplm, maskaformer, maskformer, mbart, mbartaa, mega, megatron_bert, megatron_gpta, mgp_str, mluke, mobilebert, mobilenet_va, mobilenet_va, mobilevit, mobilevitva, mpnet, mra, mta, musicgen, mvp, nat, nezha, nllb, nllb_moe, nystromformer, oneformer, open_llama, openai, opt, owlvit, pegasus, pegasus_x, perceiver, phobert, pixastruct, plbart, poolformer, prophetnet, qdqbert, rag, realm, reformer, regnet, rembert, resnet, roberta, roberta_prelayernorm, roc_bert, roformer, rwkv, sam, segformer, sew, sew_d, speech_encoder_decoder, speech_to_text, speech_to_text_a, speechta, splinter, squeezebert, swiftformer, swin, swinasr, swinva, switch_transformers, ta, table_transformer, tapas, time_series_transformer, timesformer, timm_backbone, transfo_xl, trocr, tvlt, umta, unispeech, unispeech_sat, upernet, videomae, vilt, vision_encoder_decoder, vision_text_dual_encoder, visual_bert, vit, vit_hybrid, vit_mae, vit_msn, vivit, wavaveca, wavaveca_conformer, wavaveca_phoneme, wavaveca_with_lm, wavlm, whisper, x_clip, xglm, xlm, xlm_prophetnet, xlm_roberta, xlm_roberta_xl, xlnet, xmod, yolos, yoso, )
70
'''simple docstring''' from ..utils import DummyObject, requires_backends class _a ( metaclass=__a ): __a : int = ["""flax""", """transformers"""] def __init__( self : Optional[Any] , *lowercase : str , **lowercase : List[Any] ): '''simple docstring''' requires_backends(self , ['''flax''', '''transformers'''] ) @classmethod def A ( cls : Union[str, Any] , *lowercase : List[Any] , **lowercase : List[str] ): '''simple docstring''' requires_backends(cls , ['''flax''', '''transformers'''] ) @classmethod def A ( cls : Union[str, Any] , *lowercase : Tuple , **lowercase : int ): '''simple docstring''' requires_backends(cls , ['''flax''', '''transformers'''] ) class _a ( metaclass=__a ): __a : int = ["""flax""", """transformers"""] def __init__( self : int , *lowercase : Tuple , **lowercase : Dict ): '''simple docstring''' requires_backends(self , ['''flax''', '''transformers'''] ) @classmethod def A ( cls : List[str] , *lowercase : Optional[int] , **lowercase : List[Any] ): '''simple docstring''' requires_backends(cls , ['''flax''', '''transformers'''] ) @classmethod def A ( cls : Dict , *lowercase : Union[str, Any] , **lowercase : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ['''flax''', '''transformers'''] ) class _a ( metaclass=__a ): __a : int = ["""flax""", """transformers"""] def __init__( self : Optional[int] , *lowercase : Union[str, Any] , **lowercase : Any ): '''simple docstring''' requires_backends(self , ['''flax''', '''transformers'''] ) @classmethod def A ( cls : Union[str, Any] , *lowercase : Tuple , **lowercase : Any ): '''simple docstring''' requires_backends(cls , ['''flax''', '''transformers'''] ) @classmethod def A ( cls : Any , *lowercase : Dict , **lowercase : Optional[Any] ): '''simple docstring''' requires_backends(cls , ['''flax''', '''transformers'''] ) class _a ( metaclass=__a ): __a : Any = ["""flax""", """transformers"""] def __init__( self : Any , *lowercase : Optional[Any] , **lowercase : Optional[int] ): '''simple docstring''' requires_backends(self , ['''flax''', '''transformers'''] ) @classmethod def A ( cls : Dict , *lowercase : Optional[Any] , **lowercase : Optional[Any] ): '''simple docstring''' requires_backends(cls , ['''flax''', '''transformers'''] ) @classmethod def A ( cls : Union[str, Any] , *lowercase : str , **lowercase : Any ): '''simple docstring''' requires_backends(cls , ['''flax''', '''transformers'''] )
34
0
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPanoramaPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() @skip_mps class __A ( a , a , unittest.TestCase ): """simple docstring""" UpperCamelCase__ : Tuple =StableDiffusionPanoramaPipeline UpperCamelCase__ : Union[str, Any] =TEXT_TO_IMAGE_PARAMS UpperCamelCase__ : Union[str, Any] =TEXT_TO_IMAGE_BATCH_PARAMS UpperCamelCase__ : List[str] =TEXT_TO_IMAGE_IMAGE_PARAMS UpperCamelCase__ : Dict =TEXT_TO_IMAGE_IMAGE_PARAMS def __lowercase ( self ): """simple docstring""" torch.manual_seed(0 ) __UpperCamelCase : str =UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) __UpperCamelCase : List[Any] =DDIMScheduler() torch.manual_seed(0 ) __UpperCamelCase : str =AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) torch.manual_seed(0 ) __UpperCamelCase : List[str] =CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) __UpperCamelCase : Any =CLIPTextModel(lowerCamelCase__ ) __UpperCamelCase : List[Any] =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) __UpperCamelCase : int ={ 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__=0 ): """simple docstring""" __UpperCamelCase : Dict =torch.manual_seed(lowerCamelCase__ ) __UpperCamelCase : Optional[int] ={ 'prompt': 'a photo of the dolomites', 'generator': generator, # Setting height and width to None to prevent OOMs on CPU. 'height': None, 'width': None, 'num_inference_steps': 1, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Union[str, Any] ='cpu' # ensure determinism for the device-dependent torch.Generator __UpperCamelCase : Any =self.get_dummy_components() __UpperCamelCase : Tuple =StableDiffusionPanoramaPipeline(**lowerCamelCase__ ) __UpperCamelCase : List[str] =sd_pipe.to(lowerCamelCase__ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __UpperCamelCase : str =self.get_dummy_inputs(lowerCamelCase__ ) __UpperCamelCase : Any =sd_pipe(**lowerCamelCase__ ).images __UpperCamelCase : List[Any] =image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __UpperCamelCase : Tuple =np.array([0.6_186, 0.5_374, 0.4_915, 0.4_135, 0.4_114, 0.4_563, 0.5_128, 0.4_977, 0.4_757] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __lowercase ( self ): """simple docstring""" super().test_inference_batch_consistent(batch_sizes=[1, 2] ) def __lowercase ( self ): """simple docstring""" super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25E-3 ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : str ='cpu' # ensure determinism for the device-dependent torch.Generator __UpperCamelCase : Optional[Any] =self.get_dummy_components() __UpperCamelCase : Union[str, Any] =StableDiffusionPanoramaPipeline(**lowerCamelCase__ ) __UpperCamelCase : List[str] =sd_pipe.to(lowerCamelCase__ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __UpperCamelCase : str =self.get_dummy_inputs(lowerCamelCase__ ) __UpperCamelCase : int ='french fries' __UpperCamelCase : Optional[Any] =sd_pipe(**lowerCamelCase__ , negative_prompt=lowerCamelCase__ ) __UpperCamelCase : Dict =output.images __UpperCamelCase : Any =image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __UpperCamelCase : List[Any] =np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __lowercase ( self ): """simple docstring""" __UpperCamelCase : List[Any] ='cpu' # ensure determinism for the device-dependent torch.Generator __UpperCamelCase : Optional[Any] =self.get_dummy_components() __UpperCamelCase : Dict =StableDiffusionPanoramaPipeline(**lowerCamelCase__ ) __UpperCamelCase : str =sd_pipe.to(lowerCamelCase__ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __UpperCamelCase : Union[str, Any] =self.get_dummy_inputs(lowerCamelCase__ ) __UpperCamelCase : str =sd_pipe(**lowerCamelCase__ , view_batch_size=2 ) __UpperCamelCase : Any =output.images __UpperCamelCase : List[str] =image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __UpperCamelCase : List[str] =np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Optional[Any] ='cpu' # ensure determinism for the device-dependent torch.Generator __UpperCamelCase : Optional[int] =self.get_dummy_components() __UpperCamelCase : List[str] =EulerAncestralDiscreteScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' ) __UpperCamelCase : Any =StableDiffusionPanoramaPipeline(**lowerCamelCase__ ) __UpperCamelCase : int =sd_pipe.to(lowerCamelCase__ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __UpperCamelCase : List[str] =self.get_dummy_inputs(lowerCamelCase__ ) __UpperCamelCase : List[str] =sd_pipe(**lowerCamelCase__ ).images __UpperCamelCase : List[Any] =image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __UpperCamelCase : Optional[int] =np.array([0.4_024, 0.6_510, 0.4_901, 0.5_378, 0.5_813, 0.5_622, 0.4_795, 0.4_467, 0.4_952] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Optional[Any] ='cpu' # ensure determinism for the device-dependent torch.Generator __UpperCamelCase : int =self.get_dummy_components() __UpperCamelCase : Tuple =PNDMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , skip_prk_steps=lowerCamelCase__ ) __UpperCamelCase : Dict =StableDiffusionPanoramaPipeline(**lowerCamelCase__ ) __UpperCamelCase : Optional[Any] =sd_pipe.to(lowerCamelCase__ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __UpperCamelCase : List[Any] =self.get_dummy_inputs(lowerCamelCase__ ) __UpperCamelCase : List[str] =sd_pipe(**lowerCamelCase__ ).images __UpperCamelCase : Dict =image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __UpperCamelCase : Optional[Any] =np.array([0.6_391, 0.6_291, 0.4_861, 0.5_134, 0.5_552, 0.4_578, 0.5_032, 0.5_023, 0.4_539] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class __A ( unittest.TestCase ): """simple docstring""" def __lowercase ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowercase ( self , lowerCamelCase__=0 ): """simple docstring""" __UpperCamelCase : Tuple =torch.manual_seed(lowerCamelCase__ ) __UpperCamelCase : Dict ={ 'prompt': 'a photo of the dolomites', 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Optional[int] ='stabilityai/stable-diffusion-2-base' __UpperCamelCase : Union[str, Any] =DDIMScheduler.from_pretrained(lowerCamelCase__ , subfolder='scheduler' ) __UpperCamelCase : Optional[Any] =StableDiffusionPanoramaPipeline.from_pretrained(lowerCamelCase__ , scheduler=lowerCamelCase__ , safety_checker=lowerCamelCase__ ) pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) pipe.enable_attention_slicing() __UpperCamelCase : Optional[int] =self.get_inputs() __UpperCamelCase : str =pipe(**lowerCamelCase__ ).images __UpperCamelCase : str =image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 2048, 3) __UpperCamelCase : Dict =np.array( [ 0.36_968_392, 0.27_025_372, 0.32_446_766, 0.28_379_387, 0.36_363_274, 0.30_733_347, 0.27_100_027, 0.27_054_125, 0.25_536_096, ] ) assert np.abs(expected_slice - image_slice ).max() < 1E-2 def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Any =StableDiffusionPanoramaPipeline.from_pretrained( 'stabilityai/stable-diffusion-2-base' , safety_checker=lowerCamelCase__ ) __UpperCamelCase : List[Any] =LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) pipe.enable_attention_slicing() __UpperCamelCase : Tuple =self.get_inputs() __UpperCamelCase : List[Any] =pipe(**lowerCamelCase__ ).images __UpperCamelCase : str =image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 2048, 3) __UpperCamelCase : Dict =np.array( [ [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ] ] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Dict =0 def callback_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> None: __UpperCamelCase : List[Any] =True nonlocal number_of_steps number_of_steps += 1 if step == 1: __UpperCamelCase : Optional[Any] =latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 256) __UpperCamelCase : Tuple =latents[0, -3:, -3:, -1] __UpperCamelCase : Optional[Any] =np.array( [ 0.18_681_869, 0.33_907_816, 0.5_361_276, 0.14_432_865, -0.02_856_611, -0.73_941_123, 0.23_397_987, 0.47_322_682, -0.37_823_164, ] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 elif step == 2: __UpperCamelCase : List[Any] =latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 256) __UpperCamelCase : Dict =latents[0, -3:, -3:, -1] __UpperCamelCase : int =np.array( [ 0.18_539_645, 0.33_987_248, 0.5_378_559, 0.14_437_142, -0.02_455_261, -0.7_338_317, 0.23_990_755, 0.47_356_272, -0.3_786_505, ] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 __UpperCamelCase : Any =False __UpperCamelCase : int ='stabilityai/stable-diffusion-2-base' __UpperCamelCase : Optional[int] =DDIMScheduler.from_pretrained(lowerCamelCase__ , subfolder='scheduler' ) __UpperCamelCase : int =StableDiffusionPanoramaPipeline.from_pretrained(lowerCamelCase__ , scheduler=lowerCamelCase__ , safety_checker=lowerCamelCase__ ) __UpperCamelCase : Optional[int] =pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) pipe.enable_attention_slicing() __UpperCamelCase : str =self.get_inputs() pipe(**lowerCamelCase__ , callback=lowerCamelCase__ , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def __lowercase ( self ): """simple docstring""" torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __UpperCamelCase : Optional[int] ='stabilityai/stable-diffusion-2-base' __UpperCamelCase : Optional[Any] =DDIMScheduler.from_pretrained(lowerCamelCase__ , subfolder='scheduler' ) __UpperCamelCase : str =StableDiffusionPanoramaPipeline.from_pretrained(lowerCamelCase__ , scheduler=lowerCamelCase__ , safety_checker=lowerCamelCase__ ) __UpperCamelCase : List[Any] =pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() __UpperCamelCase : Optional[Any] =self.get_inputs() __UpperCamelCase : List[str] =pipe(**lowerCamelCase__ ) __UpperCamelCase : Optional[int] =torch.cuda.max_memory_allocated() # make sure that less than 5.2 GB is allocated assert mem_bytes < 5.5 * 10**9
71
'''simple docstring''' import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() A =logging.get_logger(__name__) A ={ 'b0': efficientnet.EfficientNetBa, 'b1': efficientnet.EfficientNetBa, 'b2': efficientnet.EfficientNetBa, 'b3': efficientnet.EfficientNetBa, 'b4': efficientnet.EfficientNetBa, 'b5': efficientnet.EfficientNetBa, 'b6': efficientnet.EfficientNetBa, 'b7': efficientnet.EfficientNetBa, } A ={ 'b0': { 'hidden_dim': 12_80, 'width_coef': 1.0, 'depth_coef': 1.0, 'image_size': 2_24, 'dropout_rate': 0.2, 'dw_padding': [], }, 'b1': { 'hidden_dim': 12_80, 'width_coef': 1.0, 'depth_coef': 1.1, 'image_size': 2_40, 'dropout_rate': 0.2, 'dw_padding': [16], }, 'b2': { 'hidden_dim': 14_08, 'width_coef': 1.1, 'depth_coef': 1.2, 'image_size': 2_60, 'dropout_rate': 0.3, 'dw_padding': [5, 8, 16], }, 'b3': { 'hidden_dim': 15_36, 'width_coef': 1.2, 'depth_coef': 1.4, 'image_size': 3_00, 'dropout_rate': 0.3, 'dw_padding': [5, 18], }, 'b4': { 'hidden_dim': 17_92, 'width_coef': 1.4, 'depth_coef': 1.8, 'image_size': 3_80, 'dropout_rate': 0.4, 'dw_padding': [6], }, 'b5': { 'hidden_dim': 20_48, 'width_coef': 1.6, 'depth_coef': 2.2, 'image_size': 4_56, 'dropout_rate': 0.4, 'dw_padding': [13, 27], }, 'b6': { 'hidden_dim': 23_04, 'width_coef': 1.8, 'depth_coef': 2.6, 'image_size': 5_28, 'dropout_rate': 0.5, 'dw_padding': [31], }, 'b7': { 'hidden_dim': 25_60, 'width_coef': 2.0, 'depth_coef': 3.1, 'image_size': 6_00, 'dropout_rate': 0.5, 'dw_padding': [18], }, } def snake_case_ (_a : List[str] ): UpperCAmelCase = EfficientNetConfig() UpperCAmelCase = CONFIG_MAP[model_name]['''hidden_dim'''] UpperCAmelCase = CONFIG_MAP[model_name]['''width_coef'''] UpperCAmelCase = CONFIG_MAP[model_name]['''depth_coef'''] UpperCAmelCase = CONFIG_MAP[model_name]['''image_size'''] UpperCAmelCase = CONFIG_MAP[model_name]['''dropout_rate'''] UpperCAmelCase = CONFIG_MAP[model_name]['''dw_padding'''] UpperCAmelCase = '''huggingface/label-files''' UpperCAmelCase = '''imagenet-1k-id2label.json''' UpperCAmelCase = 1_0_0_0 UpperCAmelCase = json.load(open(hf_hub_download(_a , _a , repo_type='''dataset''' ) , '''r''' ) ) UpperCAmelCase = {int(_a ): v for k, v in idalabel.items()} UpperCAmelCase = idalabel UpperCAmelCase = {v: k for k, v in idalabel.items()} return config def snake_case_ (): UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg''' UpperCAmelCase = Image.open(requests.get(_a , stream=_a ).raw ) return im def snake_case_ (_a : str ): UpperCAmelCase = CONFIG_MAP[model_name]['''image_size'''] UpperCAmelCase = EfficientNetImageProcessor( size={'''height''': size, '''width''': size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.4785_3944, 0.473_2864, 0.4743_4163] , do_center_crop=_a , ) return preprocessor def snake_case_ (_a : Optional[Any] ): UpperCAmelCase = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )] UpperCAmelCase = sorted(set(_a ) ) UpperCAmelCase = len(_a ) UpperCAmelCase = {b: str(_a ) for b, i in zip(_a , range(_a ) )} UpperCAmelCase = [] rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') ) rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') ) rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') ) rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') ) rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') ) for b in block_names: UpperCAmelCase = block_name_mapping[b] rename_keys.append((F"block{b}_expand_conv/kernel:0", F"encoder.blocks.{hf_b}.expansion.expand_conv.weight") ) rename_keys.append((F"block{b}_expand_bn/gamma:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.weight") ) rename_keys.append((F"block{b}_expand_bn/beta:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.bias") ) rename_keys.append( (F"block{b}_expand_bn/moving_mean:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.running_mean") ) rename_keys.append( (F"block{b}_expand_bn/moving_variance:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.running_var") ) rename_keys.append( (F"block{b}_dwconv/depthwise_kernel:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight") ) rename_keys.append((F"block{b}_bn/gamma:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight") ) rename_keys.append((F"block{b}_bn/beta:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias") ) rename_keys.append( (F"block{b}_bn/moving_mean:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean") ) rename_keys.append( (F"block{b}_bn/moving_variance:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var") ) rename_keys.append((F"block{b}_se_reduce/kernel:0", F"encoder.blocks.{hf_b}.squeeze_excite.reduce.weight") ) rename_keys.append((F"block{b}_se_reduce/bias:0", F"encoder.blocks.{hf_b}.squeeze_excite.reduce.bias") ) rename_keys.append((F"block{b}_se_expand/kernel:0", F"encoder.blocks.{hf_b}.squeeze_excite.expand.weight") ) rename_keys.append((F"block{b}_se_expand/bias:0", F"encoder.blocks.{hf_b}.squeeze_excite.expand.bias") ) rename_keys.append( (F"block{b}_project_conv/kernel:0", F"encoder.blocks.{hf_b}.projection.project_conv.weight") ) rename_keys.append((F"block{b}_project_bn/gamma:0", F"encoder.blocks.{hf_b}.projection.project_bn.weight") ) rename_keys.append((F"block{b}_project_bn/beta:0", F"encoder.blocks.{hf_b}.projection.project_bn.bias") ) rename_keys.append( (F"block{b}_project_bn/moving_mean:0", F"encoder.blocks.{hf_b}.projection.project_bn.running_mean") ) rename_keys.append( (F"block{b}_project_bn/moving_variance:0", F"encoder.blocks.{hf_b}.projection.project_bn.running_var") ) rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') ) rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') ) rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') ) rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') ) rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') ) UpperCAmelCase = {} for item in rename_keys: if item[0] in original_param_names: UpperCAmelCase = '''efficientnet.''' + item[1] UpperCAmelCase = '''classifier.weight''' UpperCAmelCase = '''classifier.bias''' return key_mapping def snake_case_ (_a : Dict , _a : List[str] , _a : Dict ): for key, value in tf_params.items(): if "normalization" in key: continue UpperCAmelCase = key_mapping[key] if "_conv" in key and "kernel" in key: UpperCAmelCase = torch.from_numpy(_a ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: UpperCAmelCase = torch.from_numpy(_a ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: UpperCAmelCase = torch.from_numpy(np.transpose(_a ) ) else: UpperCAmelCase = torch.from_numpy(_a ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(_a ) @torch.no_grad() def snake_case_ (_a : Optional[Any] , _a : List[str] , _a : Optional[int] , _a : Dict ): UpperCAmelCase = model_classes[model_name]( include_top=_a , weights='''imagenet''' , input_tensor=_a , input_shape=_a , pooling=_a , classes=1_0_0_0 , classifier_activation='''softmax''' , ) UpperCAmelCase = original_model.trainable_variables UpperCAmelCase = original_model.non_trainable_variables UpperCAmelCase = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: UpperCAmelCase = param.numpy() UpperCAmelCase = list(tf_params.keys() ) # Load HuggingFace model UpperCAmelCase = get_efficientnet_config(_a ) UpperCAmelCase = EfficientNetForImageClassification(_a ).eval() UpperCAmelCase = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print('''Converting parameters...''' ) UpperCAmelCase = rename_keys(_a ) replace_params(_a , _a , _a ) # Initialize preprocessor and preprocess input image UpperCAmelCase = convert_image_processor(_a ) UpperCAmelCase = preprocessor(images=prepare_img() , return_tensors='''pt''' ) # HF model inference hf_model.eval() with torch.no_grad(): UpperCAmelCase = hf_model(**_a ) UpperCAmelCase = outputs.logits.detach().numpy() # Original model inference UpperCAmelCase = False UpperCAmelCase = CONFIG_MAP[model_name]['''image_size'''] UpperCAmelCase = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) UpperCAmelCase = image.img_to_array(_a ) UpperCAmelCase = np.expand_dims(_a , axis=0 ) UpperCAmelCase = original_model.predict(_a ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(_a , _a , atol=1E-3 ), "The predicted logits are not the same." print('''Model outputs match!''' ) if save_model: # Create folder to save model if not os.path.isdir(_a ): os.mkdir(_a ) # Save converted model and image processor hf_model.save_pretrained(_a ) preprocessor.save_pretrained(_a ) if push_to_hub: # Push model and image processor to hub print(F"Pushing converted {model_name} to the hub..." ) UpperCAmelCase = F"efficientnet-{model_name}" preprocessor.push_to_hub(_a ) hf_model.push_to_hub(_a ) if __name__ == "__main__": A =argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='b0', type=str, help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].', ) parser.add_argument( '--pytorch_dump_folder_path', default='hf_model', type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument('--save_model', action='store_true', help='Save model to local') parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub') A =parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
34
0
"""simple docstring""" lowerCAmelCase__ = '''Alexander Joslin''' import operator as op from .stack import Stack def snake_case_ ( A_ : str ): '''simple docstring''' _lowerCamelCase : Tuple = {'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub} _lowerCamelCase : Stack[int] = Stack() _lowerCamelCase : Stack[str] = Stack() for i in equation: if i.isdigit(): # RULE 1 operand_stack.push(int(A_ ) ) elif i in operators: # RULE 2 operator_stack.push(A_ ) elif i == ")": # RULE 4 _lowerCamelCase : int = operator_stack.peek() operator_stack.pop() _lowerCamelCase : Dict = operand_stack.peek() operand_stack.pop() _lowerCamelCase : Any = operand_stack.peek() operand_stack.pop() _lowerCamelCase : Optional[int] = operators[opr](A_, A_ ) operand_stack.push(A_ ) # RULE 5 return operand_stack.peek() if __name__ == "__main__": lowerCAmelCase__ = '''(5 + ((4 * 2) * (2 + 3)))''' # answer = 45 print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
72
'''simple docstring''' from datetime import datetime import requests from bsa import BeautifulSoup if __name__ == "__main__": A =input('Enter image url: ').strip() print(f"""Downloading image from {url} ...""") A =BeautifulSoup(requests.get(url).content, 'html.parser') # The image URL is in the content field of the first meta tag with property og:image A =soup.find('meta', {'property': 'og:image'})['content'] A =requests.get(image_url).content A =f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg""" with open(file_name, 'wb') as fp: fp.write(image_data) print(f"""Done. Image saved to disk as {file_name}.""")
34
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) a ={ """configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""], """processing_layoutlmv2""": ["""LayoutLMv2Processor"""], """tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a =["""LayoutLMv2TokenizerFast"""] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a =["""LayoutLMv2FeatureExtractor"""] a =["""LayoutLMv2ImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a =[ """LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""", """LayoutLMv2ForQuestionAnswering""", """LayoutLMv2ForSequenceClassification""", """LayoutLMv2ForTokenClassification""", """LayoutLMv2Layer""", """LayoutLMv2Model""", """LayoutLMv2PreTrainedModel""", ] if TYPE_CHECKING: from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaLayer, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) else: import sys a =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
73
'''simple docstring''' from math import acos, sin from typing import List, Tuple, Union import numpy as np import torch from PIL import Image from ...models import AutoencoderKL, UNetaDConditionModel from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel class _a ( __a ): __a : str = ["""vqvae"""] def __init__( self : str , lowercase : AutoencoderKL , lowercase : UNetaDConditionModel , lowercase : Mel , lowercase : Union[DDIMScheduler, DDPMScheduler] , ): '''simple docstring''' super().__init__() self.register_modules(unet=lowercase , scheduler=lowercase , mel=lowercase , vqvae=lowercase ) def A ( self : Optional[Any] ): '''simple docstring''' return 50 if isinstance(self.scheduler , lowercase ) else 1_000 @torch.no_grad() def __call__( self : Optional[Any] , lowercase : int = 1 , lowercase : str = None , lowercase : np.ndarray = None , lowercase : int = 0 , lowercase : int = 0 , lowercase : int = None , lowercase : torch.Generator = None , lowercase : float = 0 , lowercase : float = 0 , lowercase : torch.Generator = None , lowercase : float = 0 , lowercase : torch.Tensor = None , lowercase : torch.Tensor = None , lowercase : Tuple=True , ): '''simple docstring''' UpperCAmelCase = steps or self.get_default_steps() self.scheduler.set_timesteps(lowercase ) UpperCAmelCase = step_generator or generator # For backwards compatibility if type(self.unet.config.sample_size ) == int: UpperCAmelCase = (self.unet.config.sample_size, self.unet.config.sample_size) if noise is None: UpperCAmelCase = randn_tensor( ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size[0], self.unet.config.sample_size[1], ) , generator=lowercase , device=self.device , ) UpperCAmelCase = noise UpperCAmelCase = None if audio_file is not None or raw_audio is not None: self.mel.load_audio(lowercase , lowercase ) UpperCAmelCase = self.mel.audio_slice_to_image(lowercase ) UpperCAmelCase = np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape( (input_image.height, input_image.width) ) UpperCAmelCase = (input_image / 255) * 2 - 1 UpperCAmelCase = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device ) if self.vqvae is not None: UpperCAmelCase = self.vqvae.encode(torch.unsqueeze(lowercase , 0 ) ).latent_dist.sample( generator=lowercase )[0] UpperCAmelCase = self.vqvae.config.scaling_factor * input_images if start_step > 0: UpperCAmelCase = self.scheduler.add_noise(lowercase , lowercase , self.scheduler.timesteps[start_step - 1] ) UpperCAmelCase = ( self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length ) UpperCAmelCase = int(mask_start_secs * pixels_per_second ) UpperCAmelCase = int(mask_end_secs * pixels_per_second ) UpperCAmelCase = self.scheduler.add_noise(lowercase , lowercase , torch.tensor(self.scheduler.timesteps[start_step:] ) ) for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ): if isinstance(self.unet , lowercase ): UpperCAmelCase = self.unet(lowercase , lowercase , lowercase )['''sample'''] else: UpperCAmelCase = self.unet(lowercase , lowercase )['''sample'''] if isinstance(self.scheduler , lowercase ): UpperCAmelCase = self.scheduler.step( model_output=lowercase , timestep=lowercase , sample=lowercase , eta=lowercase , generator=lowercase , )['''prev_sample'''] else: UpperCAmelCase = self.scheduler.step( model_output=lowercase , timestep=lowercase , sample=lowercase , generator=lowercase , )['''prev_sample'''] if mask is not None: if mask_start > 0: UpperCAmelCase = mask[:, step, :, :mask_start] if mask_end > 0: UpperCAmelCase = mask[:, step, :, -mask_end:] if self.vqvae is not None: # 0.18215 was scaling factor used in training to ensure unit variance UpperCAmelCase = 1 / self.vqvae.config.scaling_factor * images UpperCAmelCase = self.vqvae.decode(lowercase )['''sample'''] UpperCAmelCase = (images / 2 + 0.5).clamp(0 , 1 ) UpperCAmelCase = images.cpu().permute(0 , 2 , 3 , 1 ).numpy() UpperCAmelCase = (images * 255).round().astype('''uint8''' ) UpperCAmelCase = list( (Image.fromarray(_[:, :, 0] ) for _ in images) if images.shape[3] == 1 else (Image.fromarray(lowercase , mode='''RGB''' ).convert('''L''' ) for _ in images) ) UpperCAmelCase = [self.mel.image_to_audio(lowercase ) for _ in images] if not return_dict: return images, (self.mel.get_sample_rate(), audios) return BaseOutput(**AudioPipelineOutput(np.array(lowercase )[:, np.newaxis, :] ) , **ImagePipelineOutput(lowercase ) ) @torch.no_grad() def A ( self : Dict , lowercase : List[Image.Image] , lowercase : int = 50 ): '''simple docstring''' assert isinstance(self.scheduler , lowercase ) self.scheduler.set_timesteps(lowercase ) UpperCAmelCase = np.array( [np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] ) UpperCAmelCase = (sample / 255) * 2 - 1 UpperCAmelCase = torch.Tensor(lowercase ).to(self.device ) for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ): UpperCAmelCase = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps UpperCAmelCase = self.scheduler.alphas_cumprod[t] UpperCAmelCase = ( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) UpperCAmelCase = 1 - alpha_prod_t UpperCAmelCase = self.unet(lowercase , lowercase )['''sample'''] UpperCAmelCase = (1 - alpha_prod_t_prev) ** 0.5 * model_output UpperCAmelCase = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) UpperCAmelCase = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output return sample @staticmethod def A ( lowercase : torch.Tensor , lowercase : torch.Tensor , lowercase : float ): '''simple docstring''' UpperCAmelCase = acos(torch.dot(torch.flatten(lowercase ) , torch.flatten(lowercase ) ) / torch.norm(lowercase ) / torch.norm(lowercase ) ) return sin((1 - alpha) * theta ) * xa / sin(lowercase ) + sin(alpha * theta ) * xa / sin(lowercase )
34
0
"""simple docstring""" from __future__ import annotations import inspect import unittest import numpy as np from transformers import DeiTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, ) from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Optional[Any] ,A_ : Optional[Any] ,A_ : Optional[Any]=13 ,A_ : List[Any]=30 ,A_ : Optional[Any]=2 ,A_ : Optional[int]=3 ,A_ : int=True ,A_ : Optional[Any]=True ,A_ : List[str]=32 ,A_ : str=2 ,A_ : str=4 ,A_ : int=37 ,A_ : Tuple="gelu" ,A_ : Any=0.1 ,A_ : int=0.1 ,A_ : str=10 ,A_ : List[str]=0.02 ,A_ : int=3 ,A_ : List[Any]=None ,A_ : int=2 ,) -> Dict: A = parent A = batch_size A = image_size A = patch_size A = num_channels A = is_training A = use_labels A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = type_sequence_label_size A = initializer_range A = scope A = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) A = (image_size // patch_size) ** 2 A = num_patches + 2 def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]: A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A = None if self.use_labels: A = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) A = self.get_config() return config, pixel_values, labels def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]: return DeiTConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=A_ ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,) def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Any ,A_ : Any ,A_ : int ) -> int: A = TFDeiTModel(config=A_ ) A = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : List[str] ,A_ : List[str] ,A_ : Optional[Any] ) -> List[Any]: A = TFDeiTForMaskedImageModeling(config=A_ ) A = model(A_ ) self.parent.assertEqual( result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images A = 1 A = TFDeiTForMaskedImageModeling(A_ ) A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A = model(A_ ) self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any] ,A_ : str ,A_ : Tuple ) -> List[Any]: A = self.type_sequence_label_size A = TFDeiTForImageClassification(A_ ) A = model(A_ ,labels=A_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) # test greyscale images A = 1 A = TFDeiTForImageClassification(A_ ) A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A = model(A_ ,labels=A_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any: A = self.prepare_config_and_inputs() A , A , A = config_and_inputs A = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Tuple = ( ( TFDeiTModel, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, ) if is_tf_available() else () ) _lowerCamelCase: Dict = ( { '''feature-extraction''': TFDeiTModel, '''image-classification''': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher), } if is_tf_available() else {} ) _lowerCamelCase: Union[str, Any] = False _lowerCamelCase: Dict = False _lowerCamelCase: Any = False _lowerCamelCase: Dict = False def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple: A = TFDeiTModelTester(self ) A = ConfigTester(self ,config_class=A_ ,has_text_modality=A_ ,hidden_size=37 ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: self.config_tester.run_common_tests() @unittest.skip(reason='DeiT does not use inputs_embeds' ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str: pass def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict: A , A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A = model_class(A_ ) self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) ) A = model.get_output_embeddings() self.assertTrue(x is None or isinstance(A_ ,tf.keras.layers.Dense ) ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]: A , A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A = model_class(A_ ) A = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A = [*signature.parameters.keys()] A = ['pixel_values'] self.assertListEqual(arg_names[:1] ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> Any: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : List[str] ,A_ : Any ,A_ : List[Any]=False ) -> str: A = super()._prepare_for_class(A_ ,A_ ,return_labels=A_ ) if return_labels: if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters: del inputs_dict["labels"] return inputs_dict @slow def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]: for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A = TFDeiTModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) def _snake_case ( ): A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]: return ( DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' ) if is_vision_available() else None ) @slow def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any: A = TFDeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' ) A = self.default_image_processor A = prepare_img() A = image_processor(images=A_ ,return_tensors='tf' ) # forward pass A = model(**A_ ) # verify the logits A = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape ,A_ ) A = tf.constant([-1.02_66, 0.19_12, -1.28_61] ) self.assertTrue(np.allclose(outputs.logits[0, :3] ,A_ ,atol=1e-4 ) )
74
'''simple docstring''' from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal A =logging.get_logger(__name__) A =TypeVar('DatasetType', Dataset, IterableDataset) def snake_case_ (_a : List[DatasetType] , _a : Optional[List[float]] = None , _a : Optional[int] = None , _a : Optional[DatasetInfo] = None , _a : Optional[NamedSplit] = None , _a : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ): from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError('''Unable to interleave an empty list of datasets.''' ) for i, dataset in enumerate(_a ): if not isinstance(_a , (Dataset, IterableDataset) ): if isinstance(_a , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} " '''is an empty dataset dictionary.''' ) raise ValueError( F"Dataset at position {i} has at least one split: {list(_a )}\n" F"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_a ) )}']" ) raise ValueError( F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_a ).__name__}." ) if i == 0: UpperCAmelCase , UpperCAmelCase = ( (Dataset, IterableDataset) if isinstance(_a , _a ) else (IterableDataset, Dataset) ) elif not isinstance(_a , _a ): raise ValueError( F"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(F"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." ) if dataset_type is Dataset: return _interleave_map_style_datasets( _a , _a , _a , info=_a , split=_a , stopping_strategy=_a ) else: return _interleave_iterable_datasets( _a , _a , _a , info=_a , split=_a , stopping_strategy=_a ) def snake_case_ (_a : List[DatasetType] , _a : Optional[DatasetInfo] = None , _a : Optional[NamedSplit] = None , _a : int = 0 , ): if not dsets: raise ValueError('''Unable to concatenate an empty list of datasets.''' ) for i, dataset in enumerate(_a ): if not isinstance(_a , (Dataset, IterableDataset) ): if isinstance(_a , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} " '''is an empty dataset dictionary.''' ) raise ValueError( F"Dataset at position {i} has at least one split: {list(_a )}\n" F"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_a ) )}']" ) raise ValueError( F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_a ).__name__}." ) if i == 0: UpperCAmelCase , UpperCAmelCase = ( (Dataset, IterableDataset) if isinstance(_a , _a ) else (IterableDataset, Dataset) ) elif not isinstance(_a , _a ): raise ValueError( F"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." ) if dataset_type is Dataset: return _concatenate_map_style_datasets(_a , info=_a , split=_a , axis=_a ) else: return _concatenate_iterable_datasets(_a , info=_a , split=_a , axis=_a )
34
0
'''simple docstring''' import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin class __UpperCamelCase ( unittest.TestCase , lowerCamelCase__ ): def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =load_tool('''text-classification''' ) self.tool.setup() lowerCamelCase_ =load_tool('''text-classification''', remote=lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.tool('''That\'s quite cool''', ['''positive''', '''negative'''] ) self.assertEqual(lowerCAmelCase, '''positive''' ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.remote_tool('''That\'s quite cool''', ['''positive''', '''negative'''] ) self.assertEqual(lowerCAmelCase, '''positive''' ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.tool(text='''That\'s quite cool''', labels=['''positive''', '''negative'''] ) self.assertEqual(lowerCAmelCase, '''positive''' ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.remote_tool(text='''That\'s quite cool''', labels=['''positive''', '''negative'''] ) self.assertEqual(lowerCAmelCase, '''positive''' )
75
'''simple docstring''' from argparse import ArgumentParser from datasets.commands.convert import ConvertCommand from datasets.commands.dummy_data import DummyDataCommand from datasets.commands.env import EnvironmentCommand from datasets.commands.run_beam import RunBeamCommand from datasets.commands.test import TestCommand from datasets.utils.logging import set_verbosity_info def snake_case_ (_a : Tuple ): return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )} def snake_case_ (): UpperCAmelCase = ArgumentParser( '''HuggingFace Datasets CLI tool''' , usage='''datasets-cli <command> [<args>]''' , allow_abbrev=_a ) UpperCAmelCase = parser.add_subparsers(help='''datasets-cli command helpers''' ) set_verbosity_info() # Register commands ConvertCommand.register_subcommand(_a ) EnvironmentCommand.register_subcommand(_a ) TestCommand.register_subcommand(_a ) RunBeamCommand.register_subcommand(_a ) DummyDataCommand.register_subcommand(_a ) # Parse args UpperCAmelCase , UpperCAmelCase = parser.parse_known_args() if not hasattr(_a , '''func''' ): parser.print_help() exit(1 ) UpperCAmelCase = parse_unknown_args(_a ) # Run UpperCAmelCase = args.func(_a , **_a ) service.run() if __name__ == "__main__": main()
34
0
import copy from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING a_ = logging.get_logger(__name__) a_ = { 'microsoft/conditional-detr-resnet-50': ( 'https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json' ), } class _UpperCamelCase ( __A ): '''simple docstring''' lowerCamelCase__ ='conditional_detr' lowerCamelCase__ =['past_key_values'] lowerCamelCase__ ={ 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self : List[Any] , a : List[str]=True , a : int=None , a : Optional[int]=3 , a : List[str]=300 , a : Optional[Any]=6 , a : Tuple=2048 , a : Dict=8 , a : Optional[Any]=6 , a : Tuple=2048 , a : List[Any]=8 , a : Tuple=0.0 , a : Optional[Any]=0.0 , a : Tuple=True , a : List[str]="relu" , a : List[Any]=256 , a : str=0.1 , a : Optional[Any]=0.0 , a : Optional[int]=0.0 , a : Union[str, Any]=0.02 , a : Tuple=1.0 , a : Dict=False , a : Optional[Any]="sine" , a : Optional[int]="resnet50" , a : Dict=True , a : str=False , a : int=2 , a : List[str]=5 , a : str=2 , a : Optional[int]=1 , a : Tuple=1 , a : Optional[Any]=2 , a : Optional[int]=5 , a : List[str]=2 , a : Any=0.25 , **a : Tuple , ) -> Dict: """simple docstring""" if backbone_config is not None and use_timm_backbone: raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." ) if not use_timm_backbone: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) SCREAMING_SNAKE_CASE : List[str] = CONFIG_MAPPING["resnet"](out_features=["stage4"] ) elif isinstance(a , a ): SCREAMING_SNAKE_CASE : List[Any] = backbone_config.get("model_type" ) SCREAMING_SNAKE_CASE : List[str] = CONFIG_MAPPING[backbone_model_type] SCREAMING_SNAKE_CASE : List[Any] = config_class.from_dict(a ) SCREAMING_SNAKE_CASE : Union[str, Any] = use_timm_backbone SCREAMING_SNAKE_CASE : Dict = backbone_config SCREAMING_SNAKE_CASE : Tuple = num_channels SCREAMING_SNAKE_CASE : str = num_queries SCREAMING_SNAKE_CASE : List[Any] = d_model SCREAMING_SNAKE_CASE : Tuple = encoder_ffn_dim SCREAMING_SNAKE_CASE : Union[str, Any] = encoder_layers SCREAMING_SNAKE_CASE : Tuple = encoder_attention_heads SCREAMING_SNAKE_CASE : int = decoder_ffn_dim SCREAMING_SNAKE_CASE : Dict = decoder_layers SCREAMING_SNAKE_CASE : Dict = decoder_attention_heads SCREAMING_SNAKE_CASE : str = dropout SCREAMING_SNAKE_CASE : Any = attention_dropout SCREAMING_SNAKE_CASE : List[Any] = activation_dropout SCREAMING_SNAKE_CASE : List[Any] = activation_function SCREAMING_SNAKE_CASE : Optional[int] = init_std SCREAMING_SNAKE_CASE : Optional[int] = init_xavier_std SCREAMING_SNAKE_CASE : Dict = encoder_layerdrop SCREAMING_SNAKE_CASE : str = decoder_layerdrop SCREAMING_SNAKE_CASE : Any = encoder_layers SCREAMING_SNAKE_CASE : Optional[Any] = auxiliary_loss SCREAMING_SNAKE_CASE : Dict = position_embedding_type SCREAMING_SNAKE_CASE : Dict = backbone SCREAMING_SNAKE_CASE : Any = use_pretrained_backbone SCREAMING_SNAKE_CASE : str = dilation # Hungarian matcher SCREAMING_SNAKE_CASE : int = class_cost SCREAMING_SNAKE_CASE : Optional[int] = bbox_cost SCREAMING_SNAKE_CASE : Optional[Any] = giou_cost # Loss coefficients SCREAMING_SNAKE_CASE : List[Any] = mask_loss_coefficient SCREAMING_SNAKE_CASE : Optional[int] = dice_loss_coefficient SCREAMING_SNAKE_CASE : Union[str, Any] = cls_loss_coefficient SCREAMING_SNAKE_CASE : Dict = bbox_loss_coefficient SCREAMING_SNAKE_CASE : Tuple = giou_loss_coefficient SCREAMING_SNAKE_CASE : List[Any] = focal_alpha super().__init__(is_encoder_decoder=a , **a ) @property def __UpperCamelCase ( self : Dict ) -> int: """simple docstring""" return self.encoder_attention_heads @property def __UpperCamelCase ( self : str ) -> int: """simple docstring""" return self.d_model def __UpperCamelCase ( self : List[Any] ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: SCREAMING_SNAKE_CASE : Optional[int] = self.backbone_config.to_dict() SCREAMING_SNAKE_CASE : str = self.__class__.model_type return output class _UpperCamelCase ( __A ): '''simple docstring''' lowerCamelCase__ =version.parse('1.11' ) @property def __UpperCamelCase ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("pixel_mask", {0: "batch"}), ] ) @property def __UpperCamelCase ( self : List[Any] ) -> float: """simple docstring""" return 1e-5 @property def __UpperCamelCase ( self : Optional[int] ) -> int: """simple docstring""" return 12
76
'''simple docstring''' import argparse import json import logging import os import sys from unittest.mock import patch from transformers.testing_utils import TestCasePlus, get_gpu_count, slow A =[ os.path.join(os.path.dirname(__file__), dirname) for dirname in [ 'text-classification', 'language-modeling', 'summarization', 'token-classification', 'question-answering', ] ] sys.path.extend(SRC_DIRS) if SRC_DIRS is not None: import run_clm_flax import run_flax_glue import run_flax_ner import run_mlm_flax import run_qa import run_summarization_flax import run_ta_mlm_flax logging.basicConfig(level=logging.DEBUG) A =logging.getLogger() def snake_case_ (): UpperCAmelCase = argparse.ArgumentParser() parser.add_argument('''-f''' ) UpperCAmelCase = parser.parse_args() return args.f def snake_case_ (_a : List[str] , _a : Union[str, Any]="eval" ): UpperCAmelCase = os.path.join(_a , F"{split}_results.json" ) if os.path.exists(_a ): with open(_a , '''r''' ) as f: return json.load(_a ) raise ValueError(F"can't find {path}" ) A =logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class _a ( __a ): def A ( self : Any ): '''simple docstring''' UpperCAmelCase = self.get_auto_remove_tmp_dir() UpperCAmelCase = f"\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split() with patch.object(lowercase , '''argv''' , lowercase ): run_flax_glue.main() UpperCAmelCase = get_results(lowercase ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) @slow def A ( self : Any ): '''simple docstring''' UpperCAmelCase = self.get_auto_remove_tmp_dir() UpperCAmelCase = f"\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split() with patch.object(lowercase , '''argv''' , lowercase ): run_clm_flax.main() UpperCAmelCase = get_results(lowercase ) self.assertLess(result['''eval_perplexity'''] , 100 ) @slow def A ( self : str ): '''simple docstring''' UpperCAmelCase = self.get_auto_remove_tmp_dir() UpperCAmelCase = f"\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n ".split() with patch.object(lowercase , '''argv''' , lowercase ): run_summarization_flax.main() UpperCAmelCase = get_results(lowercase , split='''test''' ) self.assertGreaterEqual(result['''test_rouge1'''] , 10 ) self.assertGreaterEqual(result['''test_rouge2'''] , 2 ) self.assertGreaterEqual(result['''test_rougeL'''] , 7 ) self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 ) @slow def A ( self : int ): '''simple docstring''' UpperCAmelCase = self.get_auto_remove_tmp_dir() UpperCAmelCase = f"\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n ".split() with patch.object(lowercase , '''argv''' , lowercase ): run_mlm_flax.main() UpperCAmelCase = get_results(lowercase ) self.assertLess(result['''eval_perplexity'''] , 42 ) @slow def A ( self : Tuple ): '''simple docstring''' UpperCAmelCase = self.get_auto_remove_tmp_dir() UpperCAmelCase = f"\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split() with patch.object(lowercase , '''argv''' , lowercase ): run_ta_mlm_flax.main() UpperCAmelCase = get_results(lowercase ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 ) @slow def A ( self : Tuple ): '''simple docstring''' UpperCAmelCase = 7 if get_gpu_count() > 1 else 2 UpperCAmelCase = self.get_auto_remove_tmp_dir() UpperCAmelCase = f"\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n ".split() with patch.object(lowercase , '''argv''' , lowercase ): run_flax_ner.main() UpperCAmelCase = get_results(lowercase ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) self.assertGreaterEqual(result['''eval_f1'''] , 0.3 ) @slow def A ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase = self.get_auto_remove_tmp_dir() UpperCAmelCase = f"\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n ".split() with patch.object(lowercase , '''argv''' , lowercase ): run_qa.main() UpperCAmelCase = get_results(lowercase ) self.assertGreaterEqual(result['''eval_f1'''] , 30 ) self.assertGreaterEqual(result['''eval_exact'''] , 30 )
34
0
"""simple docstring""" import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() _UpperCamelCase : int = logging.get_logger(__name__) _UpperCamelCase : str = [ ("bert.bert", "visual_bert"), ("bert.cls", "cls"), ("bert.classifier", "cls"), ("token_type_embeddings_visual", "visual_token_type_embeddings"), ("position_embeddings_visual", "visual_position_embeddings"), ("projection", "visual_projection"), ] _UpperCamelCase : Dict = [ "nlvr2_coco_pre_trained.th", "nlvr2_fine_tuned.th", "nlvr2_pre_trained.th", "vcr_coco_pre_train.th", "vcr_fine_tune.th", "vcr_pre_train.th", "vqa_coco_pre_trained.th", "vqa_fine_tuned.th", "vqa_pre_trained.th", ] def a_ ( _lowerCAmelCase : Optional[Any] ): '''simple docstring''' lowercase__ : Dict = torch.load(_lowerCAmelCase , map_location='cpu' ) return sd def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str=rename_keys_prefix ): '''simple docstring''' lowercase__ : Union[str, Any] = OrderedDict() lowercase__ : List[str] = torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue lowercase__ : List[str] = key for name_pair in rename_keys_prefix: lowercase__ : List[Any] = new_key.replace(name_pair[0] , name_pair[1] ) lowercase__ : List[str] = d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately lowercase__ : str = new_d['cls.predictions.bias'] return new_d @torch.no_grad() def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : str ): '''simple docstring''' assert ( checkpoint_path.split('/' )[-1] in ACCEPTABLE_CHECKPOINTS ), f"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.""" # Get Config if "pre" in checkpoint_path: lowercase__ : List[Any] = 'pretraining' if "vcr" in checkpoint_path: lowercase__ : int = {'visual_embedding_dim': 512} elif "vqa_advanced" in checkpoint_path: lowercase__ : Tuple = {'visual_embedding_dim': 2048} elif "vqa" in checkpoint_path: lowercase__ : Union[str, Any] = {'visual_embedding_dim': 2048} elif "nlvr" in checkpoint_path: lowercase__ : List[str] = {'visual_embedding_dim': 1024} else: raise NotImplementedError(f"""No implementation found for `{checkpoint_path}`.""" ) else: if "vcr" in checkpoint_path: lowercase__ : Optional[Any] = {'visual_embedding_dim': 512} lowercase__ : int = 'multichoice' elif "vqa_advanced" in checkpoint_path: lowercase__ : int = {'visual_embedding_dim': 2048} lowercase__ : Tuple = 'vqa_advanced' elif "vqa" in checkpoint_path: lowercase__ : Union[str, Any] = {'visual_embedding_dim': 2048, 'num_labels': 3129} lowercase__ : int = 'vqa' elif "nlvr" in checkpoint_path: lowercase__ : Tuple = { 'visual_embedding_dim': 1024, 'num_labels': 2, } lowercase__ : List[str] = 'nlvr' lowercase__ : Optional[Any] = VisualBertConfig(**_lowerCAmelCase ) # Load State Dict lowercase__ : List[Any] = load_state_dict(_lowerCAmelCase ) lowercase__ : str = get_new_dict(_lowerCAmelCase , _lowerCAmelCase ) if model_type == "pretraining": lowercase__ : Optional[Any] = VisualBertForPreTraining(_lowerCAmelCase ) elif model_type == "vqa": lowercase__ : Optional[Any] = VisualBertForQuestionAnswering(_lowerCAmelCase ) elif model_type == "nlvr": lowercase__ : Dict = VisualBertForVisualReasoning(_lowerCAmelCase ) elif model_type == "multichoice": lowercase__ : List[Any] = VisualBertForMultipleChoice(_lowerCAmelCase ) model.load_state_dict(_lowerCAmelCase ) # Save Checkpoints Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase ) model.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": _UpperCamelCase : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.") parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.") _UpperCamelCase : int = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
77
'''simple docstring''' class _a : def __init__( self : Any ): '''simple docstring''' UpperCAmelCase = {} # Mapping from char to TrieNode UpperCAmelCase = False def A ( self : int , lowercase : list[str] ): '''simple docstring''' for word in words: self.insert(lowercase ) def A ( self : Optional[int] , lowercase : str ): '''simple docstring''' UpperCAmelCase = self for char in word: if char not in curr.nodes: UpperCAmelCase = TrieNode() UpperCAmelCase = curr.nodes[char] UpperCAmelCase = True def A ( self : Optional[int] , lowercase : str ): '''simple docstring''' UpperCAmelCase = self for char in word: if char not in curr.nodes: return False UpperCAmelCase = curr.nodes[char] return curr.is_leaf def A ( self : str , lowercase : str ): '''simple docstring''' def _delete(lowercase : TrieNode , lowercase : str , lowercase : int ) -> bool: if index == len(lowercase ): # If word does not exist if not curr.is_leaf: return False UpperCAmelCase = False return len(curr.nodes ) == 0 UpperCAmelCase = word[index] UpperCAmelCase = curr.nodes.get(lowercase ) # If char not in current trie node if not char_node: return False # Flag to check if node can be deleted UpperCAmelCase = _delete(lowercase , lowercase , index + 1 ) if delete_curr: del curr.nodes[char] return len(curr.nodes ) == 0 return delete_curr _delete(self , lowercase , 0 ) def snake_case_ (_a : TrieNode , _a : str ): if node.is_leaf: print(_a , end=''' ''' ) for key, value in node.nodes.items(): print_words(_a , word + key ) def snake_case_ (): UpperCAmelCase = '''banana bananas bandana band apple all beast'''.split() UpperCAmelCase = TrieNode() root.insert_many(_a ) # print_words(root, "") assert all(root.find(_a ) for word in words ) assert root.find('''banana''' ) assert not root.find('''bandanas''' ) assert not root.find('''apps''' ) assert root.find('''apple''' ) assert root.find('''all''' ) root.delete('''all''' ) assert not root.find('''all''' ) root.delete('''banana''' ) assert not root.find('''banana''' ) assert root.find('''bananas''' ) return True def snake_case_ (_a : str , _a : bool ): print(str(_a ) , '''works!''' if passes else '''doesn\'t work :(''' ) def snake_case_ (): assert test_trie() def snake_case_ (): print_results('''Testing trie functionality''' , test_trie() ) if __name__ == "__main__": main()
34
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) snake_case_ = { """configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ = ["""LlamaTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ = ["""LlamaTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ = [ """LlamaForCausalLM""", """LlamaModel""", """LlamaPreTrainedModel""", """LlamaForSequenceClassification""", ] if TYPE_CHECKING: from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama import LlamaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama_fast import LlamaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel else: import sys snake_case_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
78
'''simple docstring''' import math from typing import Optional import numpy as np from ...configuration_utils import PretrainedConfig from ...utils import logging A =logging.get_logger(__name__) A ={ 'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json', 'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json', } class _a ( __a ): __a : Union[str, Any] = """encodec""" def __init__( self : Tuple , lowercase : List[str]=[1.5, 3.0, 6.0, 12.0, 24.0] , lowercase : Any=24_000 , lowercase : str=1 , lowercase : Optional[int]=False , lowercase : Optional[Any]=None , lowercase : str=None , lowercase : Tuple=128 , lowercase : Union[str, Any]=32 , lowercase : Union[str, Any]=1 , lowercase : Optional[Any]=[8, 5, 4, 2] , lowercase : Any="weight_norm" , lowercase : Tuple=7 , lowercase : int=7 , lowercase : Dict=3 , lowercase : List[Any]=2 , lowercase : str=True , lowercase : List[str]="reflect" , lowercase : List[Any]=2 , lowercase : Optional[Any]=2 , lowercase : int=1.0 , lowercase : Dict=1_024 , lowercase : str=None , lowercase : Union[str, Any]=True , **lowercase : Optional[int] , ): '''simple docstring''' UpperCAmelCase = target_bandwidths UpperCAmelCase = sampling_rate UpperCAmelCase = audio_channels UpperCAmelCase = normalize UpperCAmelCase = chunk_length_s UpperCAmelCase = overlap UpperCAmelCase = hidden_size UpperCAmelCase = num_filters UpperCAmelCase = num_residual_layers UpperCAmelCase = upsampling_ratios UpperCAmelCase = norm_type UpperCAmelCase = kernel_size UpperCAmelCase = last_kernel_size UpperCAmelCase = residual_kernel_size UpperCAmelCase = dilation_growth_rate UpperCAmelCase = use_causal_conv UpperCAmelCase = pad_mode UpperCAmelCase = compress UpperCAmelCase = num_lstm_layers UpperCAmelCase = trim_right_ratio UpperCAmelCase = codebook_size UpperCAmelCase = codebook_dim if codebook_dim is not None else hidden_size UpperCAmelCase = use_conv_shortcut if self.norm_type not in ["weight_norm", "time_group_norm"]: raise ValueError( f"self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}" ) super().__init__(**lowercase ) @property def A ( self : Dict ): '''simple docstring''' if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def A ( self : Union[str, Any] ): '''simple docstring''' if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 , int((1.0 - self.overlap) * self.chunk_length ) ) @property def A ( self : Any ): '''simple docstring''' UpperCAmelCase = np.prod(self.upsampling_ratios ) return math.ceil(self.sampling_rate / hop_length ) @property def A ( self : Optional[int] ): '''simple docstring''' return int(1_000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
34
0
'''simple docstring''' def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase ) -> int: '''simple docstring''' _A , _A = len(__lowercase ), len(grid[0] ) if ( min(__lowercase , __lowercase ) < 0 or row == row_length or col == col_length or (row, col) in visit or grid[row][col] == 1 ): return 0 if row == row_length - 1 and col == col_length - 1: return 1 visit.add((row, col) ) _A = 0 count += depth_first_search(__lowercase , row + 1 , __lowercase , __lowercase ) count += depth_first_search(__lowercase , row - 1 , __lowercase , __lowercase ) count += depth_first_search(__lowercase , __lowercase , col + 1 , __lowercase ) count += depth_first_search(__lowercase , __lowercase , col - 1 , __lowercase ) visit.remove((row, col) ) return count if __name__ == "__main__": import doctest doctest.testmod()
79
'''simple docstring''' from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, logging if is_torch_available(): import torch A =logging.get_logger(__name__) class _a ( __a ): __a : str = ["""pixel_values"""] def __init__( self : Optional[int] , lowercase : bool = True , lowercase : Optional[Dict[str, int]] = None , lowercase : PILImageResampling = PILImageResampling.BILINEAR , lowercase : bool = True , lowercase : Dict[str, int] = None , lowercase : bool = True , lowercase : Union[int, float] = 1 / 255 , lowercase : bool = True , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[float, List[float]]] = None , **lowercase : Union[str, Any] , ): '''simple docstring''' super().__init__(**lowercase ) UpperCAmelCase = size if size is not None else {'''shortest_edge''': 256} UpperCAmelCase = get_size_dict(lowercase , default_to_square=lowercase ) UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} UpperCAmelCase = get_size_dict(lowercase , param_name='''crop_size''' ) UpperCAmelCase = do_resize UpperCAmelCase = size UpperCAmelCase = resample UpperCAmelCase = do_center_crop UpperCAmelCase = crop_size UpperCAmelCase = do_rescale UpperCAmelCase = rescale_factor UpperCAmelCase = do_normalize UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD def A ( self : Union[str, Any] , lowercase : np.ndarray , lowercase : Dict[str, int] , lowercase : PILImageResampling = PILImageResampling.BICUBIC , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : Any , ): '''simple docstring''' UpperCAmelCase = get_size_dict(lowercase , default_to_square=lowercase ) if "shortest_edge" not in size: raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" ) UpperCAmelCase = get_resize_output_image_size(lowercase , size=size['''shortest_edge'''] , default_to_square=lowercase ) return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase ) def A ( self : Union[str, Any] , lowercase : np.ndarray , lowercase : Dict[str, int] , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : int , ): '''simple docstring''' UpperCAmelCase = get_size_dict(lowercase ) if "height" not in size or "width" not in size: raise ValueError(f"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}" ) return center_crop(lowercase , size=(size['''height'''], size['''width''']) , data_format=lowercase , **lowercase ) def A ( self : Tuple , lowercase : np.ndarray , lowercase : float , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : List[str] ): '''simple docstring''' return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase ) def A ( self : Optional[int] , lowercase : np.ndarray , lowercase : Union[float, List[float]] , lowercase : Union[float, List[float]] , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : Union[str, Any] , ): '''simple docstring''' return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase ) def A ( self : Optional[int] , lowercase : ImageInput , lowercase : Optional[bool] = None , lowercase : Dict[str, int] = None , lowercase : PILImageResampling = None , lowercase : bool = None , lowercase : Dict[str, int] = None , lowercase : Optional[bool] = None , lowercase : Optional[float] = None , lowercase : Optional[bool] = None , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[str, TensorType]] = None , lowercase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowercase : Dict , ): '''simple docstring''' UpperCAmelCase = do_resize if do_resize is not None else self.do_resize UpperCAmelCase = size if size is not None else self.size UpperCAmelCase = get_size_dict(lowercase , default_to_square=lowercase ) UpperCAmelCase = resample if resample is not None else self.resample UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCAmelCase = crop_size if crop_size is not None else self.crop_size UpperCAmelCase = get_size_dict(lowercase , param_name='''crop_size''' ) UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase = image_mean if image_mean is not None else self.image_mean UpperCAmelCase = image_std if image_std is not None else self.image_std UpperCAmelCase = make_list_of_images(lowercase ) if not valid_images(lowercase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. UpperCAmelCase = [to_numpy_array(lowercase ) for image in images] if do_resize: UpperCAmelCase = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images] if do_center_crop: UpperCAmelCase = [self.center_crop(image=lowercase , size=lowercase ) for image in images] if do_rescale: UpperCAmelCase = [self.rescale(image=lowercase , scale=lowercase ) for image in images] if do_normalize: UpperCAmelCase = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images] UpperCAmelCase = [to_channel_dimension_format(lowercase , lowercase ) for image in images] UpperCAmelCase = {'''pixel_values''': images} return BatchFeature(data=lowercase , tensor_type=lowercase ) def A ( self : Tuple , lowercase : str , lowercase : List[Tuple] = None ): '''simple docstring''' UpperCAmelCase = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(lowercase ) != len(lowercase ): raise ValueError( '''Make sure that you pass in as many target sizes as the batch dimension of the logits''' ) if is_torch_tensor(lowercase ): UpperCAmelCase = target_sizes.numpy() UpperCAmelCase = [] for idx in range(len(lowercase ) ): UpperCAmelCase = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=lowercase ) UpperCAmelCase = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(lowercase ) else: UpperCAmelCase = logits.argmax(dim=1 ) UpperCAmelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
34
0
'''simple docstring''' # limitations under the License. from typing import Optional, Tuple, Union import torch from diffusers import DiffusionPipeline, ImagePipelineOutput class lowercase_ ( a__ ): def __init__( self , a , a ): super().__init__() self.register_modules(unet=a , scheduler=a ) @torch.no_grad() def __call__( self , a = 1 , a = None , a = 50 , a = "pil" , a = True , **a , ): UpperCamelCase__ = torch.randn( (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=a , ) UpperCamelCase__ = image.to(self.device ) # set step values self.scheduler.set_timesteps(a ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output UpperCamelCase__ = self.unet(a , a ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 UpperCamelCase__ = self.scheduler.step(a , a , a ).prev_sample UpperCamelCase__ = (image / 2 + 0.5).clamp(0 , 1 ) UpperCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": UpperCamelCase__ = self.numpy_to_pil(a ) if not return_dict: return (image,), "This is a local test" return ImagePipelineOutput(images=a ), "This is a local test"
80
'''simple docstring''' import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process A =logging.getLogger(__name__) def snake_case_ (_a : Dict , _a : Union[str, Any] ): return (preds == labels).mean() @dataclass class _a : __a : str = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) __a : Optional[str] = field( default=__a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) __a : Optional[str] = field( default=__a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) __a : Optional[str] = field( default=__a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) @dataclass class _a : __a : str = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} ) __a : str = field(metadata={"""help""": """Should contain the data files for the task."""} ) __a : int = field( default=128 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) __a : bool = field( default=__a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) def snake_case_ (): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F"Output directory ({training_args.output_dir}) already exists and is not empty. Use" ''' --overwrite_output_dir to overcome.''' ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('''Training/evaluation parameters %s''' , _a ) # Set seed set_seed(training_args.seed ) try: UpperCAmelCase = processors[data_args.task_name]() UpperCAmelCase = processor.get_labels() UpperCAmelCase = len(_a ) except KeyError: raise ValueError('''Task not found: %s''' % (data_args.task_name) ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. UpperCAmelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_a , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , ) UpperCAmelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) UpperCAmelCase = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_a , cache_dir=model_args.cache_dir , ) # Get datasets UpperCAmelCase = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=_a , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) UpperCAmelCase = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=_a , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def compute_metrics(_a : EvalPrediction ) -> Dict: UpperCAmelCase = np.argmax(p.predictions , axis=1 ) return {"acc": simple_accuracy(_a , p.label_ids )} # Data collator UpperCAmelCase = DataCollatorWithPadding(_a , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer UpperCAmelCase = Trainer( model=_a , args=_a , train_dataset=_a , eval_dataset=_a , compute_metrics=_a , data_collator=_a , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation UpperCAmelCase = {} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) UpperCAmelCase = trainer.evaluate() UpperCAmelCase = os.path.join(training_args.output_dir , '''eval_results.txt''' ) if trainer.is_world_master(): with open(_a , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key, value in result.items(): logger.info(''' %s = %s''' , _a , _a ) writer.write('''%s = %s\n''' % (key, value) ) results.update(_a ) return results def snake_case_ (_a : Optional[int] ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
34
0
"""simple docstring""" import requests from bsa import BeautifulSoup def _A ( lowercase = "AAPL" ): """simple docstring""" a =f'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}''' a =BeautifulSoup(requests.get(lowercase ).text , '''html.parser''' ) a ='''My(6px) Pos(r) smartphone_Mt(6px)''' return soup.find('''div''' , class_=class_ ).find('''span''' ).text if __name__ == "__main__": for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(F'Current {symbol:<4} stock price is {stock_price(symbol):>8}')
81
'''simple docstring''' import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _a ( __a ): __a : int = ["""image_processor""", """tokenizer"""] __a : Union[str, Any] = """ChineseCLIPImageProcessor""" __a : List[Any] = ("""BertTokenizer""", """BertTokenizerFast""") def __init__( self : Dict , lowercase : Union[str, Any]=None , lowercase : Dict=None , **lowercase : Optional[Any] ): '''simple docstring''' UpperCAmelCase = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , lowercase , ) UpperCAmelCase = kwargs.pop('''feature_extractor''' ) UpperCAmelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(lowercase , lowercase ) UpperCAmelCase = self.image_processor def __call__( self : Tuple , lowercase : Optional[Any]=None , lowercase : Union[str, Any]=None , lowercase : int=None , **lowercase : Dict ): '''simple docstring''' if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: UpperCAmelCase = self.tokenizer(lowercase , return_tensors=lowercase , **lowercase ) if images is not None: UpperCAmelCase = self.image_processor(lowercase , return_tensors=lowercase , **lowercase ) if text is not None and images is not None: UpperCAmelCase = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**lowercase ) , tensor_type=lowercase ) def A ( self : int , *lowercase : Tuple , **lowercase : List[str] ): '''simple docstring''' return self.tokenizer.batch_decode(*lowercase , **lowercase ) def A ( self : Optional[Any] , *lowercase : int , **lowercase : Optional[int] ): '''simple docstring''' return self.tokenizer.decode(*lowercase , **lowercase ) @property def A ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase = self.tokenizer.model_input_names UpperCAmelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def A ( self : List[Any] ): '''simple docstring''' warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowercase , ) return self.image_processor_class
34
0
import importlib import os import fsspec import pytest from fsspec import register_implementation from fsspec.registry import _registry as _fsspec_registry from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem from .utils import require_lza, require_zstandard def _UpperCAmelCase ( snake_case ): """simple docstring""" assert "mock" in _fsspec_registry assert "bz2" in _fsspec_registry def _UpperCAmelCase ( ): """simple docstring""" assert "mock" not in _fsspec_registry assert "bz2" in _fsspec_registry def _UpperCAmelCase ( ): """simple docstring""" _lowerCAmelCase = """mock-s3-bucket""" _lowerCAmelCase = F's3://{mock_bucket}' _lowerCAmelCase = extract_path_from_uri(snake_case ) assert dataset_path.startswith("""s3://""" ) is False _lowerCAmelCase = """./local/path""" _lowerCAmelCase = extract_path_from_uri(snake_case ) assert dataset_path == new_dataset_path def _UpperCAmelCase ( snake_case ): """simple docstring""" _lowerCAmelCase = is_remote_filesystem(snake_case ) assert is_remote is True _lowerCAmelCase = fsspec.filesystem("""file""" ) _lowerCAmelCase = is_remote_filesystem(snake_case ) assert is_remote is False @pytest.mark.parametrize("""compression_fs_class""" , snake_case ) def _UpperCAmelCase ( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): """simple docstring""" _lowerCAmelCase = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_file, """bz2""": bza_file, """lz4""": lza_file} _lowerCAmelCase = input_paths[compression_fs_class.protocol] if input_path is None: _lowerCAmelCase = F'for \'{compression_fs_class.protocol}\' compression protocol, ' if compression_fs_class.protocol == "lz4": reason += require_lza.kwargs["reason"] elif compression_fs_class.protocol == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(snake_case ) _lowerCAmelCase = fsspec.filesystem(compression_fs_class.protocol , fo=snake_case ) assert isinstance(snake_case , snake_case ) _lowerCAmelCase = os.path.basename(snake_case ) _lowerCAmelCase = expected_filename[: expected_filename.rindex(""".""" )] assert fs.glob("""*""" ) == [expected_filename] with fs.open(snake_case , """r""" , encoding="""utf-8""" ) as f, open(snake_case , encoding="""utf-8""" ) as expected_file: assert f.read() == expected_file.read() @pytest.mark.parametrize("""protocol""" , ["""zip""", """gzip"""] ) def _UpperCAmelCase ( snake_case , snake_case , snake_case ): """simple docstring""" _lowerCAmelCase = {"""zip""": zip_jsonl_path, """gzip""": jsonl_gz_path} _lowerCAmelCase = compressed_file_paths[protocol] _lowerCAmelCase = """dataset.jsonl""" _lowerCAmelCase = F'{protocol}://{member_file_path}::{compressed_file_path}' _lowerCAmelCase , *_lowerCAmelCase = fsspec.get_fs_token_paths(snake_case ) assert fs.isfile(snake_case ) assert not fs.isfile("""non_existing_""" + member_file_path ) @pytest.mark.integration def _UpperCAmelCase ( snake_case , snake_case , snake_case , snake_case ): """simple docstring""" _lowerCAmelCase = hf_api.dataset_info(snake_case , token=snake_case ) _lowerCAmelCase = HfFileSystem(repo_info=snake_case , token=snake_case ) assert sorted(hffs.glob("""*""" ) ) == [".gitattributes", "data"] assert hffs.isdir("""data""" ) assert hffs.isfile(""".gitattributes""" ) and hffs.isfile("""data/text_data.txt""" ) with open(snake_case ) as f: assert hffs.open("""data/text_data.txt""" , """r""" ).read() == f.read() def _UpperCAmelCase ( ): """simple docstring""" _lowerCAmelCase = """bz2""" # Import module import datasets.filesystems # Overwrite protocol and reload register_implementation(snake_case , snake_case , clobber=snake_case ) with pytest.warns(snake_case ) as warning_info: importlib.reload(datasets.filesystems ) assert len(snake_case ) == 1 assert ( str(warning_info[0].message ) == F'A filesystem protocol was already set for {protocol} and will be overwritten.' )
82
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging A =logging.get_logger(__name__) A ={ 'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json', # See all Marian models at https://huggingface.co/models?filter=marian } class _a ( __a ): __a : List[Any] = """marian""" __a : Union[str, Any] = ["""past_key_values"""] __a : List[str] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""} def __init__( self : List[Any] , lowercase : Union[str, Any]=58_101 , lowercase : Tuple=None , lowercase : str=1_024 , lowercase : Optional[int]=12 , lowercase : Optional[int]=4_096 , lowercase : int=16 , lowercase : List[Any]=12 , lowercase : int=4_096 , lowercase : Optional[int]=16 , lowercase : int=0.0 , lowercase : Tuple=0.0 , lowercase : Tuple=True , lowercase : Union[str, Any]=True , lowercase : List[Any]="gelu" , lowercase : Tuple=1_024 , lowercase : str=0.1 , lowercase : str=0.0 , lowercase : Optional[int]=0.0 , lowercase : Dict=0.02 , lowercase : Union[str, Any]=58_100 , lowercase : List[str]=False , lowercase : str=58_100 , lowercase : Any=0 , lowercase : Optional[Any]=0 , lowercase : Tuple=True , **lowercase : Optional[int] , ): '''simple docstring''' UpperCAmelCase = vocab_size UpperCAmelCase = decoder_vocab_size or vocab_size UpperCAmelCase = max_position_embeddings UpperCAmelCase = d_model UpperCAmelCase = encoder_ffn_dim UpperCAmelCase = encoder_layers UpperCAmelCase = encoder_attention_heads UpperCAmelCase = decoder_ffn_dim UpperCAmelCase = decoder_layers UpperCAmelCase = decoder_attention_heads UpperCAmelCase = dropout UpperCAmelCase = attention_dropout UpperCAmelCase = activation_dropout UpperCAmelCase = activation_function UpperCAmelCase = init_std UpperCAmelCase = encoder_layerdrop UpperCAmelCase = decoder_layerdrop UpperCAmelCase = use_cache UpperCAmelCase = encoder_layers UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True UpperCAmelCase = share_encoder_decoder_embeddings super().__init__( pad_token_id=lowercase , eos_token_id=lowercase , is_encoder_decoder=lowercase , decoder_start_token_id=lowercase , forced_eos_token_id=lowercase , **lowercase , ) class _a ( __a ): @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs def A ( self : int ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: UpperCAmelCase = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: UpperCAmelCase = {0: '''batch'''} UpperCAmelCase = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: UpperCAmelCase = {0: '''batch''', 1: '''decoder_sequence'''} UpperCAmelCase = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(lowercase , direction='''inputs''' ) elif self.task == "causal-lm": # TODO: figure this case out. UpperCAmelCase = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: UpperCAmelCase , UpperCAmelCase = self.num_layers for i in range(lowercase ): UpperCAmelCase = {0: '''batch''', 2: '''past_sequence + sequence'''} UpperCAmelCase = {0: '''batch''', 2: '''past_sequence + sequence'''} else: UpperCAmelCase = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}), ('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}), ] ) return common_inputs @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs def A ( self : Any ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: UpperCAmelCase = super().outputs else: UpperCAmelCase = super(lowercase , self ).outputs if self.use_past: UpperCAmelCase , UpperCAmelCase = self.num_layers for i in range(lowercase ): UpperCAmelCase = {0: '''batch''', 2: '''past_sequence + sequence'''} UpperCAmelCase = {0: '''batch''', 2: '''past_sequence + sequence'''} return common_outputs def A ( self : Dict , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ): '''simple docstring''' UpperCAmelCase = self._generate_dummy_inputs_for_encoder_and_decoder( lowercase , lowercase , lowercase , lowercase , lowercase ) # Generate decoder inputs UpperCAmelCase = seq_length if not self.use_past else 1 UpperCAmelCase = self._generate_dummy_inputs_for_encoder_and_decoder( lowercase , lowercase , lowercase , lowercase , lowercase ) UpperCAmelCase = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()} UpperCAmelCase = dict(**lowercase , **lowercase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch UpperCAmelCase , UpperCAmelCase = common_inputs['''input_ids'''].shape UpperCAmelCase = common_inputs['''decoder_input_ids'''].shape[1] UpperCAmelCase , UpperCAmelCase = self.num_attention_heads UpperCAmelCase = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) UpperCAmelCase = decoder_seq_length + 3 UpperCAmelCase = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) UpperCAmelCase = torch.cat( [common_inputs['''decoder_attention_mask'''], torch.ones(lowercase , lowercase )] , dim=1 ) UpperCAmelCase = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered UpperCAmelCase , UpperCAmelCase = self.num_layers UpperCAmelCase = min(lowercase , lowercase ) UpperCAmelCase = max(lowercase , lowercase ) - min_num_layers UpperCAmelCase = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder''' for _ in range(lowercase ): common_inputs["past_key_values"].append( ( torch.zeros(lowercase ), torch.zeros(lowercase ), torch.zeros(lowercase ), torch.zeros(lowercase ), ) ) # TODO: test this. UpperCAmelCase = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape for _ in range(lowercase , lowercase ): common_inputs["past_key_values"].append((torch.zeros(lowercase ), torch.zeros(lowercase )) ) return common_inputs def A ( self : int , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ): '''simple docstring''' UpperCAmelCase = self._generate_dummy_inputs_for_encoder_and_decoder( lowercase , lowercase , lowercase , lowercase , lowercase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch UpperCAmelCase , UpperCAmelCase = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values UpperCAmelCase = seqlen + 2 UpperCAmelCase , UpperCAmelCase = self.num_layers UpperCAmelCase , UpperCAmelCase = self.num_attention_heads UpperCAmelCase = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) UpperCAmelCase = common_inputs['''attention_mask'''].dtype UpperCAmelCase = torch.cat( [common_inputs['''attention_mask'''], torch.ones(lowercase , lowercase , dtype=lowercase )] , dim=1 ) UpperCAmelCase = [ (torch.zeros(lowercase ), torch.zeros(lowercase )) for _ in range(lowercase ) ] return common_inputs def A ( self : str , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ): '''simple docstring''' UpperCAmelCase = compute_effective_axis_dimension( lowercase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX UpperCAmelCase = tokenizer.num_special_tokens_to_add(lowercase ) UpperCAmelCase = compute_effective_axis_dimension( lowercase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowercase ) # Generate dummy inputs according to compute batch and sequence UpperCAmelCase = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size UpperCAmelCase = dict(tokenizer(lowercase , return_tensors=lowercase ) ) return common_inputs def A ( self : List[str] , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: UpperCAmelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm( lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase ) else: UpperCAmelCase = self._generate_dummy_inputs_for_causal_lm( lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase ) return common_inputs def A ( self : List[Any] , lowercase : Any , lowercase : Tuple , lowercase : Any , lowercase : Any ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: UpperCAmelCase = super()._flatten_past_key_values_(lowercase , lowercase , lowercase , lowercase ) else: UpperCAmelCase = super(lowercase , self )._flatten_past_key_values_( lowercase , lowercase , lowercase , lowercase ) @property def A ( self : Any ): '''simple docstring''' return 1E-4
34
0
'''simple docstring''' def A__ ( UpperCAmelCase_ ): _UpperCamelCase : str = [0] * len(UpperCAmelCase_ ) _UpperCamelCase : Any = [] _UpperCamelCase : Optional[int] = [] _UpperCamelCase : List[str] = 0 for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(UpperCAmelCase_ ) ): if indegree[i] == 0: queue.append(UpperCAmelCase_ ) while queue: _UpperCamelCase : Any = queue.pop(0 ) cnt += 1 topo.append(UpperCAmelCase_ ) for x in graph[vertex]: indegree[x] -= 1 if indegree[x] == 0: queue.append(UpperCAmelCase_ ) if cnt != len(UpperCAmelCase_ ): print('Cycle exists' ) else: print(UpperCAmelCase_ ) # Adjacency List of Graph snake_case_ : List[str] = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []} topological_sort(graph)
83
'''simple docstring''' import os def snake_case_ (): UpperCAmelCase = os.path.join(os.path.dirname(_a ) , '''num.txt''' ) with open(_a ) as file_hand: return str(sum(int(_a ) for line in file_hand ) )[:1_0] if __name__ == "__main__": print(solution())
34
0
"""simple docstring""" import tempfile import unittest import numpy as np from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import BertConfig, is_flax_available from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax if is_flax_available(): import os from flax.core.frozen_dict import unfreeze from flax.traverse_util import flatten_dict from transformers import FlaxBertModel __UpperCAmelCase = '0.12' # assumed parallelism: 8 @require_flax @is_staging_test class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): @classmethod def __lowerCAmelCase ( cls ) -> Union[str, Any]: lowerCAmelCase_ :Union[str, Any] = TOKEN HfFolder.save_token(__A ) @classmethod def __lowerCAmelCase ( cls ) -> Optional[Any]: try: delete_repo(token=cls._token , repo_id="""test-model-flax""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""valid_org/test-model-flax-org""" ) except HTTPError: pass def __lowerCAmelCase ( self ) -> List[str]: lowerCAmelCase_ :int = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) lowerCAmelCase_ :Optional[Any] = FlaxBertModel(__A ) model.push_to_hub("""test-model-flax""" , use_auth_token=self._token ) lowerCAmelCase_ :Union[str, Any] = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" ) lowerCAmelCase_ :str = flatten_dict(unfreeze(model.params ) ) lowerCAmelCase_ :Union[str, Any] = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): lowerCAmelCase_ :str = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__A , 1E-3 , msg=f"""{key} not identical""" ) # Reset repo delete_repo(token=self._token , repo_id="""test-model-flax""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(__A , repo_id="""test-model-flax""" , push_to_hub=__A , use_auth_token=self._token ) lowerCAmelCase_ :Any = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" ) lowerCAmelCase_ :Dict = flatten_dict(unfreeze(model.params ) ) lowerCAmelCase_ :Tuple = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): lowerCAmelCase_ :List[str] = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__A , 1E-3 , msg=f"""{key} not identical""" ) def __lowerCAmelCase ( self ) -> Any: lowerCAmelCase_ :List[str] = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) lowerCAmelCase_ :Optional[int] = FlaxBertModel(__A ) model.push_to_hub("""valid_org/test-model-flax-org""" , use_auth_token=self._token ) lowerCAmelCase_ :Any = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" ) lowerCAmelCase_ :Tuple = flatten_dict(unfreeze(model.params ) ) lowerCAmelCase_ :str = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): lowerCAmelCase_ :Tuple = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__A , 1E-3 , msg=f"""{key} not identical""" ) # Reset repo delete_repo(token=self._token , repo_id="""valid_org/test-model-flax-org""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained( __A , repo_id="""valid_org/test-model-flax-org""" , push_to_hub=__A , use_auth_token=self._token ) lowerCAmelCase_ :Dict = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" ) lowerCAmelCase_ :str = flatten_dict(unfreeze(model.params ) ) lowerCAmelCase_ :int = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): lowerCAmelCase_ :str = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__A , 1E-3 , msg=f"""{key} not identical""" ) def _snake_case ( lowercase__ : List[str] , lowercase__ : Optional[int] ) -> Tuple: '''simple docstring''' lowerCAmelCase_ :str = True lowerCAmelCase_ :Any = flatten_dict(modela.params ) lowerCAmelCase_ :Dict = flatten_dict(modela.params ) for key in flat_params_a.keys(): if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4: lowerCAmelCase_ :str = False return models_are_equal @require_flax class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> int: lowerCAmelCase_ :Optional[int] = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" ) lowerCAmelCase_ :Union[str, Any] = FlaxBertModel(__A ) lowerCAmelCase_ :Union[str, Any] = """bert""" with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(__A , __A ) ) with self.assertRaises(__A ): lowerCAmelCase_ :List[Any] = FlaxBertModel.from_pretrained(__A ) lowerCAmelCase_ :List[Any] = FlaxBertModel.from_pretrained(__A , subfolder=__A ) self.assertTrue(check_models_equal(__A , __A ) ) def __lowerCAmelCase ( self ) -> Union[str, Any]: lowerCAmelCase_ :Union[str, Any] = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" ) lowerCAmelCase_ :List[Any] = FlaxBertModel(__A ) lowerCAmelCase_ :str = """bert""" with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(__A , __A ) , max_shard_size="""10KB""" ) with self.assertRaises(__A ): lowerCAmelCase_ :List[str] = FlaxBertModel.from_pretrained(__A ) lowerCAmelCase_ :int = FlaxBertModel.from_pretrained(__A , subfolder=__A ) self.assertTrue(check_models_equal(__A , __A ) ) def __lowerCAmelCase ( self ) -> Tuple: lowerCAmelCase_ :int = """bert""" lowerCAmelCase_ :Any = """hf-internal-testing/tiny-random-bert-subfolder""" with self.assertRaises(__A ): lowerCAmelCase_ :Tuple = FlaxBertModel.from_pretrained(__A ) lowerCAmelCase_ :Any = FlaxBertModel.from_pretrained(__A , subfolder=__A ) self.assertIsNotNone(__A ) def __lowerCAmelCase ( self ) -> Tuple: lowerCAmelCase_ :Optional[Any] = """bert""" lowerCAmelCase_ :str = """hf-internal-testing/tiny-random-bert-sharded-subfolder""" with self.assertRaises(__A ): lowerCAmelCase_ :Dict = FlaxBertModel.from_pretrained(__A ) lowerCAmelCase_ :Optional[int] = FlaxBertModel.from_pretrained(__A , subfolder=__A ) self.assertIsNotNone(__A )
84
'''simple docstring''' import argparse import logging import os from pathlib import Path from typing import Any, Dict import pytorch_lightning as pl from pytorch_lightning.utilities import rank_zero_info from transformers import ( AdamW, AutoConfig, AutoModel, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoModelWithLMHead, AutoTokenizer, PretrainedConfig, PreTrainedTokenizer, ) from transformers.optimization import ( Adafactor, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.utils.versions import require_version A =logging.getLogger(__name__) require_version('pytorch_lightning>=1.0.4') A ={ 'base': AutoModel, 'sequence-classification': AutoModelForSequenceClassification, 'question-answering': AutoModelForQuestionAnswering, 'pretraining': AutoModelForPreTraining, 'token-classification': AutoModelForTokenClassification, 'language-modeling': AutoModelWithLMHead, 'summarization': AutoModelForSeqaSeqLM, 'translation': AutoModelForSeqaSeqLM, } # update this and the import above to support new schedulers from transformers.optimization A ={ 'linear': get_linear_schedule_with_warmup, 'cosine': get_cosine_schedule_with_warmup, 'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup, 'polynomial': get_polynomial_decay_schedule_with_warmup, # '': get_constant_schedule, # not supported for now # '': get_constant_schedule_with_warmup, # not supported for now } A =sorted(arg_to_scheduler.keys()) A ='{' + ', '.join(arg_to_scheduler_choices) + '}' class _a ( pl.LightningModule ): def __init__( self : List[str] , lowercase : argparse.Namespace , lowercase : List[Any]=None , lowercase : Dict="base" , lowercase : Optional[int]=None , lowercase : Dict=None , lowercase : Tuple=None , **lowercase : Optional[int] , ): '''simple docstring''' super().__init__() # TODO: move to self.save_hyperparameters() # self.save_hyperparameters() # can also expand arguments into trainer signature for easier reading self.save_hyperparameters(lowercase ) UpperCAmelCase = 0 UpperCAmelCase = Path(self.hparams.output_dir ) UpperCAmelCase = self.hparams.cache_dir if self.hparams.cache_dir else None if config is None: UpperCAmelCase = AutoConfig.from_pretrained( self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'''num_labels''': num_labels} if num_labels is not None else {}) , cache_dir=lowercase , **lowercase , ) else: UpperCAmelCase = config UpperCAmelCase = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''') for p in extra_model_params: if getattr(self.hparams , lowercase , lowercase ): assert hasattr(self.config , lowercase ), f"model config doesn't have a `{p}` attribute" setattr(self.config , lowercase , getattr(self.hparams , lowercase ) ) if tokenizer is None: UpperCAmelCase = AutoTokenizer.from_pretrained( self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=lowercase , ) else: UpperCAmelCase = tokenizer UpperCAmelCase = MODEL_MODES[mode] if model is None: UpperCAmelCase = self.model_type.from_pretrained( self.hparams.model_name_or_path , from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=lowercase , ) else: UpperCAmelCase = model def A ( self : List[Any] , *lowercase : List[str] , **lowercase : List[str] ): '''simple docstring''' UpperCAmelCase = self.model_type.from_pretrained(*lowercase , **lowercase ) def A ( self : Tuple ): '''simple docstring''' UpperCAmelCase = arg_to_scheduler[self.hparams.lr_scheduler] UpperCAmelCase = get_schedule_func( self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() ) UpperCAmelCase = {'''scheduler''': scheduler, '''interval''': '''step''', '''frequency''': 1} return scheduler def A ( self : str ): '''simple docstring''' UpperCAmelCase = self.model UpperCAmelCase = ['''bias''', '''LayerNorm.weight'''] UpperCAmelCase = [ { '''params''': [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay ) ], # check this named paramters '''weight_decay''': self.hparams.weight_decay, }, { '''params''': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0, }, ] if self.hparams.adafactor: UpperCAmelCase = Adafactor( lowercase , lr=self.hparams.learning_rate , scale_parameter=lowercase , relative_step=lowercase ) else: UpperCAmelCase = AdamW( lowercase , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon ) UpperCAmelCase = optimizer UpperCAmelCase = self.get_lr_scheduler() return [optimizer], [scheduler] def A ( self : List[Any] , lowercase : int , lowercase : List[str] ): '''simple docstring''' return self.validation_step(lowercase , lowercase ) def A ( self : List[Any] , lowercase : Tuple ): '''simple docstring''' return self.validation_end(lowercase ) def A ( self : List[Any] ): '''simple docstring''' UpperCAmelCase = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores UpperCAmelCase = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs def A ( self : List[str] , lowercase : Any ): '''simple docstring''' if stage == "test": UpperCAmelCase = len(self.test_dataloader().dataset ) else: UpperCAmelCase = self.get_dataloader('''train''' , self.hparams.train_batch_size , shuffle=lowercase ) UpperCAmelCase = len(self.train_dataloader().dataset ) def A ( self : List[str] , lowercase : str , lowercase : int , lowercase : bool = False ): '''simple docstring''' raise NotImplementedError('''You must implement this for your task''' ) def A ( self : Union[str, Any] ): '''simple docstring''' return self.train_loader def A ( self : Optional[Any] ): '''simple docstring''' return self.get_dataloader('''dev''' , self.hparams.eval_batch_size , shuffle=lowercase ) def A ( self : List[Any] ): '''simple docstring''' return self.get_dataloader('''test''' , self.hparams.eval_batch_size , shuffle=lowercase ) def A ( self : Any , lowercase : Union[str, Any] ): '''simple docstring''' return os.path.join( self.hparams.data_dir , '''cached_{}_{}_{}'''.format( lowercase , list(filter(lowercase , self.hparams.model_name_or_path.split('''/''' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , ) @pl.utilities.rank_zero_only def A ( self : List[str] , lowercase : Dict[str, Any] ): '''simple docstring''' UpperCAmelCase = self.output_dir.joinpath('''best_tfmr''' ) UpperCAmelCase = self.step_count self.model.save_pretrained(lowercase ) self.tokenizer.save_pretrained(lowercase ) @staticmethod def A ( lowercase : Optional[int] , lowercase : List[str] ): '''simple docstring''' parser.add_argument( '''--model_name_or_path''' , default=lowercase , type=lowercase , required=lowercase , help='''Path to pretrained model or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--config_name''' , default='''''' , type=lowercase , help='''Pretrained config name or path if not the same as model_name''' ) parser.add_argument( '''--tokenizer_name''' , default=lowercase , type=lowercase , help='''Pretrained tokenizer name or path if not the same as model_name''' , ) parser.add_argument( '''--cache_dir''' , default=str(Path(lowercase ).parent / '''test_run''' / '''cache''' ) , type=lowercase , help='''Where do you want to store the pre-trained models downloaded from huggingface.co''' , ) parser.add_argument( '''--encoder_layerdrop''' , type=lowercase , help='''Encoder layer dropout probability (Optional). Goes into model.config''' , ) parser.add_argument( '''--decoder_layerdrop''' , type=lowercase , help='''Decoder layer dropout probability (Optional). Goes into model.config''' , ) parser.add_argument( '''--dropout''' , type=lowercase , help='''Dropout probability (Optional). Goes into model.config''' , ) parser.add_argument( '''--attention_dropout''' , type=lowercase , help='''Attention dropout probability (Optional). Goes into model.config''' , ) parser.add_argument('''--learning_rate''' , default=5E-5 , type=lowercase , help='''The initial learning rate for Adam.''' ) parser.add_argument( '''--lr_scheduler''' , default='''linear''' , choices=lowercase , metavar=lowercase , type=lowercase , help='''Learning rate scheduler''' , ) parser.add_argument('''--weight_decay''' , default=0.0 , type=lowercase , help='''Weight decay if we apply some.''' ) parser.add_argument('''--adam_epsilon''' , default=1E-8 , type=lowercase , help='''Epsilon for Adam optimizer.''' ) parser.add_argument('''--warmup_steps''' , default=0 , type=lowercase , help='''Linear warmup over warmup_steps.''' ) parser.add_argument('''--num_workers''' , default=4 , type=lowercase , help='''kwarg passed to DataLoader''' ) parser.add_argument('''--num_train_epochs''' , dest='''max_epochs''' , default=3 , type=lowercase ) parser.add_argument('''--train_batch_size''' , default=32 , type=lowercase ) parser.add_argument('''--eval_batch_size''' , default=32 , type=lowercase ) parser.add_argument('''--adafactor''' , action='''store_true''' ) class _a ( pl.Callback ): def A ( self : Dict , lowercase : Optional[Any] , lowercase : List[Any] ): '''simple docstring''' if ( trainer.is_global_zero and trainer.global_rank == 0 ): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed. pl_module.model.rag.retriever.init_retrieval() # better to use hook functions. class _a ( pl.Callback ): def A ( self : Optional[int] , lowercase : Union[str, Any] , lowercase : Any ): '''simple docstring''' for name, param in pl_module.model.rag.named_parameters(): if param.grad is None: print(lowercase ) class _a ( pl.Callback ): def A ( self : Optional[int] , lowercase : Optional[int] , lowercase : Dict ): '''simple docstring''' UpperCAmelCase = trainer.lr_schedulers[0]['''scheduler'''] UpperCAmelCase = {f"lr_group_{i}": lr for i, lr in enumerate(lr_scheduler.get_lr() )} pl_module.logger.log_metrics(lowercase ) def A ( self : Tuple , lowercase : pl.Trainer , lowercase : pl.LightningModule ): '''simple docstring''' rank_zero_info('''***** Validation results *****''' ) UpperCAmelCase = trainer.callback_metrics # Log results for key in sorted(lowercase ): if key not in ["log", "progress_bar"]: rank_zero_info('''{} = {}\n'''.format(lowercase , str(metrics[key] ) ) ) def A ( self : Dict , lowercase : pl.Trainer , lowercase : pl.LightningModule ): '''simple docstring''' rank_zero_info('''***** Test results *****''' ) UpperCAmelCase = trainer.callback_metrics # Log and save results to file UpperCAmelCase = os.path.join(pl_module.hparams.output_dir , '''test_results.txt''' ) with open(lowercase , '''w''' ) as writer: for key in sorted(lowercase ): if key not in ["log", "progress_bar"]: rank_zero_info('''{} = {}\n'''.format(lowercase , str(metrics[key] ) ) ) writer.write('''{} = {}\n'''.format(lowercase , str(metrics[key] ) ) ) def snake_case_ (_a : int , _a : Optional[Any] ): # To allow all pl args uncomment the following line # parser = pl.Trainer.add_argparse_args(parser) parser.add_argument( '''--output_dir''' , default=str(Path(_a ).parent / '''test_run''' / '''model_checkpoints''' ) , type=_a , help='''The output directory where the model predictions and checkpoints will be written.''' , ) parser.add_argument( '''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , ) parser.add_argument( '''--fp16_opt_level''' , type=_a , default='''O2''' , help=( '''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].''' '''See details at https://nvidia.github.io/apex/amp.html''' ) , ) parser.add_argument('''--n_tpu_cores''' , dest='''tpu_cores''' , type=_a ) parser.add_argument('''--max_grad_norm''' , dest='''gradient_clip_val''' , default=1.0 , type=_a , help='''Max gradient norm''' ) parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' ) parser.add_argument('''--do_predict''' , action='''store_true''' , help='''Whether to run predictions on the test set.''' ) parser.add_argument( '''--gradient_accumulation_steps''' , dest='''accumulate_grad_batches''' , type=_a , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , ) parser.add_argument('''--seed''' , type=_a , default=4_2 , help='''random seed for initialization''' ) parser.add_argument( '''--data_dir''' , default=str(Path(_a ).parent / '''test_run''' / '''dummy-train-data''' ) , type=_a , help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''' , ) def snake_case_ (_a : BaseTransformer , _a : argparse.Namespace , _a : List[Any]=None , _a : Tuple=True , _a : int=[] , _a : Any=None , _a : int=None , **_a : Optional[Any] , ): pl.seed_everything(args.seed ) # init model UpperCAmelCase = Path(model.hparams.output_dir ) odir.mkdir(exist_ok=_a ) # add custom checkpoints if checkpoint_callback is None: UpperCAmelCase = pl.callbacks.ModelCheckpoint( filepath=args.output_dir , prefix='''checkpoint''' , monitor='''val_loss''' , mode='''min''' , save_top_k=1 ) if early_stopping_callback: extra_callbacks.append(_a ) if logging_callback is None: UpperCAmelCase = LoggingCallback() UpperCAmelCase = {} if args.fpaa: UpperCAmelCase = 1_6 if args.gpus > 1: UpperCAmelCase = '''auto''' UpperCAmelCase = '''ddp''' UpperCAmelCase = args.accumulate_grad_batches UpperCAmelCase = None UpperCAmelCase = '''auto''' UpperCAmelCase = pl.Trainer.from_argparse_args( _a , weights_summary=_a , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=_a , val_check_interval=1 , num_sanity_val_steps=2 , **_a , ) if args.do_train: trainer.fit(_a ) else: print('''RAG modeling tests with new set functions successfuly executed!''' ) return trainer
34
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _SCREAMING_SNAKE_CASE : Optional[Any] = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE : List[Any] = ["XGLMTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE : str = ["XGLMTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE : List[str] = [ "XGLM_PRETRAINED_MODEL_ARCHIVE_LIST", "XGLMForCausalLM", "XGLMModel", "XGLMPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE : Optional[Any] = [ "FlaxXGLMForCausalLM", "FlaxXGLMModel", "FlaxXGLMPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE : Union[str, Any] = [ "TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST", "TFXGLMForCausalLM", "TFXGLMModel", "TFXGLMPreTrainedModel", ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys _SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure)
85
'''simple docstring''' from queue import PriorityQueue from typing import Any import numpy as np def snake_case_ (_a : dict , _a : str , _a : set , _a : set , _a : dict , _a : dict , _a : PriorityQueue , _a : dict , _a : float | int , ): for nxt, d in graph[v]: if nxt in visited_forward: continue UpperCAmelCase = cst_fwd.get(_a , np.inf ) UpperCAmelCase = cst_fwd[v] + d if new_cost_f < old_cost_f: queue.put((new_cost_f, nxt) ) UpperCAmelCase = new_cost_f UpperCAmelCase = v if nxt in visited_backward: if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: UpperCAmelCase = cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance def snake_case_ (_a : str , _a : str , _a : dict , _a : dict ): UpperCAmelCase = -1 UpperCAmelCase = set() UpperCAmelCase = set() UpperCAmelCase = {source: 0} UpperCAmelCase = {destination: 0} UpperCAmelCase = {source: None} UpperCAmelCase = {destination: None} UpperCAmelCase = PriorityQueue() UpperCAmelCase = PriorityQueue() UpperCAmelCase = np.inf queue_forward.put((0, source) ) queue_backward.put((0, destination) ) if source == destination: return 0 while not queue_forward.empty() and not queue_backward.empty(): UpperCAmelCase , UpperCAmelCase = queue_forward.get() visited_forward.add(_a ) UpperCAmelCase , UpperCAmelCase = queue_backward.get() visited_backward.add(_a ) UpperCAmelCase = pass_and_relaxation( _a , _a , _a , _a , _a , _a , _a , _a , _a , ) UpperCAmelCase = pass_and_relaxation( _a , _a , _a , _a , _a , _a , _a , _a , _a , ) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break if shortest_distance != np.inf: UpperCAmelCase = shortest_distance return shortest_path_distance A ={ 'B': [['C', 1]], 'C': [['D', 1]], 'D': [['F', 1]], 'E': [['B', 1], ['G', 2]], 'F': [], 'G': [['F', 1]], } A ={ 'B': [['E', 1]], 'C': [['B', 1]], 'D': [['C', 1]], 'F': [['D', 1], ['G', 1]], 'E': [[None, np.inf]], 'G': [['E', 2]], } if __name__ == "__main__": import doctest doctest.testmod()
34
0
"""simple docstring""" import sys from collections import defaultdict class A__ : def __init__( self ): __lowerCAmelCase : Optional[Any] = [] def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ): return self.node_position[vertex] def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Union[str, Any] = pos def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if start > size // 2 - 1: return else: if 2 * start + 2 >= size: __lowerCAmelCase : Union[str, Any] = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: __lowerCAmelCase : str = 2 * start + 1 else: __lowerCAmelCase : Dict = 2 * start + 2 if heap[smallest_child] < heap[start]: __lowerCAmelCase , __lowerCAmelCase : str = heap[smallest_child], positions[smallest_child] __lowerCAmelCase , __lowerCAmelCase : Optional[int] = ( heap[start], positions[start], ) __lowerCAmelCase , __lowerCAmelCase : Any = temp, tempa __lowerCAmelCase : Dict = self.get_position(positions[smallest_child] ) self.set_position( positions[smallest_child] , self.get_position(positions[start] ) ) self.set_position(positions[start] , _SCREAMING_SNAKE_CASE ) self.top_to_bottom(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Any = position[index] while index != 0: __lowerCAmelCase : str = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 ) if val < heap[parent]: __lowerCAmelCase : Any = heap[parent] __lowerCAmelCase : Tuple = position[parent] self.set_position(position[parent] , _SCREAMING_SNAKE_CASE ) else: __lowerCAmelCase : str = val __lowerCAmelCase : Dict = temp self.set_position(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) break __lowerCAmelCase : str = parent else: __lowerCAmelCase : List[Any] = val __lowerCAmelCase : int = temp self.set_position(_SCREAMING_SNAKE_CASE , 0 ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Union[str, Any] = len(_SCREAMING_SNAKE_CASE ) // 2 - 1 for i in range(_SCREAMING_SNAKE_CASE , -1 , -1 ): self.top_to_bottom(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Tuple = positions[0] __lowerCAmelCase : Optional[int] = sys.maxsize self.top_to_bottom(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) return temp def __lowerCAmelCase (_UpperCamelCase ): __lowerCAmelCase : int = Heap() __lowerCAmelCase : str = [0] * len(_UpperCamelCase ) __lowerCAmelCase : int = [-1] * len(_UpperCamelCase ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph __lowerCAmelCase : List[str] = [] # Heap of Distance of vertices from their neighboring vertex __lowerCAmelCase : Tuple = [] for vertex in range(len(_UpperCamelCase ) ): distance_tv.append(sys.maxsize ) positions.append(_UpperCamelCase ) heap.node_position.append(_UpperCamelCase ) __lowerCAmelCase : Any = [] __lowerCAmelCase : List[str] = 1 __lowerCAmelCase : Optional[int] = sys.maxsize for neighbor, distance in adjacency_list[0]: __lowerCAmelCase : int = 0 __lowerCAmelCase : List[Any] = distance heap.heapify(_UpperCamelCase , _UpperCamelCase ) for _ in range(1 , len(_UpperCamelCase ) ): __lowerCAmelCase : str = heap.delete_minimum(_UpperCamelCase , _UpperCamelCase ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) __lowerCAmelCase : int = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(_UpperCamelCase )] ): __lowerCAmelCase : Tuple = distance heap.bottom_to_top( _UpperCamelCase , heap.get_position(_UpperCamelCase ) , _UpperCamelCase , _UpperCamelCase ) __lowerCAmelCase : str = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > lowerCamelCase__ = int(input("""Enter number of edges: """).strip()) lowerCamelCase__ = defaultdict(list) for _ in range(edges_number): lowerCamelCase__ = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
86
'''simple docstring''' import argparse import json from pathlib import Path import torch import torchaudio from datasets import load_dataset from huggingface_hub import hf_hub_download from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification from transformers.utils import logging logging.set_verbosity_info() A =logging.get_logger(__name__) def snake_case_ (_a : List[str] ): UpperCAmelCase = ASTConfig() if "10-10" in model_name: pass elif "speech-commands" in model_name: UpperCAmelCase = 1_2_8 elif "12-12" in model_name: UpperCAmelCase = 1_2 UpperCAmelCase = 1_2 elif "14-14" in model_name: UpperCAmelCase = 1_4 UpperCAmelCase = 1_4 elif "16-16" in model_name: UpperCAmelCase = 1_6 UpperCAmelCase = 1_6 else: raise ValueError('''Model not supported''' ) UpperCAmelCase = '''huggingface/label-files''' if "speech-commands" in model_name: UpperCAmelCase = 3_5 UpperCAmelCase = '''speech-commands-v2-id2label.json''' else: UpperCAmelCase = 5_2_7 UpperCAmelCase = '''audioset-id2label.json''' UpperCAmelCase = json.load(open(hf_hub_download(_a , _a , repo_type='''dataset''' ) , '''r''' ) ) UpperCAmelCase = {int(_a ): v for k, v in idalabel.items()} UpperCAmelCase = idalabel UpperCAmelCase = {v: k for k, v in idalabel.items()} return config def snake_case_ (_a : Tuple ): if "module.v" in name: UpperCAmelCase = name.replace('''module.v''' , '''audio_spectrogram_transformer''' ) if "cls_token" in name: UpperCAmelCase = name.replace('''cls_token''' , '''embeddings.cls_token''' ) if "dist_token" in name: UpperCAmelCase = name.replace('''dist_token''' , '''embeddings.distillation_token''' ) if "pos_embed" in name: UpperCAmelCase = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' ) if "patch_embed.proj" in name: UpperCAmelCase = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) # transformer blocks if "blocks" in name: UpperCAmelCase = name.replace('''blocks''' , '''encoder.layer''' ) if "attn.proj" in name: UpperCAmelCase = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: UpperCAmelCase = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: UpperCAmelCase = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: UpperCAmelCase = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: UpperCAmelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: UpperCAmelCase = name.replace('''mlp.fc2''' , '''output.dense''' ) # final layernorm if "audio_spectrogram_transformer.norm" in name: UpperCAmelCase = name.replace('''audio_spectrogram_transformer.norm''' , '''audio_spectrogram_transformer.layernorm''' ) # classifier head if "module.mlp_head.0" in name: UpperCAmelCase = name.replace('''module.mlp_head.0''' , '''classifier.layernorm''' ) if "module.mlp_head.1" in name: UpperCAmelCase = name.replace('''module.mlp_head.1''' , '''classifier.dense''' ) return name def snake_case_ (_a : Dict , _a : List[Any] ): for key in orig_state_dict.copy().keys(): UpperCAmelCase = orig_state_dict.pop(_a ) if "qkv" in key: UpperCAmelCase = key.split('''.''' ) UpperCAmelCase = int(key_split[3] ) UpperCAmelCase = config.hidden_size if "weight" in key: UpperCAmelCase = val[:dim, :] UpperCAmelCase = val[dim : dim * 2, :] UpperCAmelCase = val[-dim:, :] else: UpperCAmelCase = val[:dim] UpperCAmelCase = val[dim : dim * 2] UpperCAmelCase = val[-dim:] else: UpperCAmelCase = val return orig_state_dict def snake_case_ (_a : Tuple ): UpperCAmelCase = [ '''module.v.head.weight''', '''module.v.head.bias''', '''module.v.head_dist.weight''', '''module.v.head_dist.bias''', ] for k in ignore_keys: state_dict.pop(_a , _a ) @torch.no_grad() def snake_case_ (_a : int , _a : Union[str, Any] , _a : Dict=False ): UpperCAmelCase = get_audio_spectrogram_transformer_config(_a ) UpperCAmelCase = { '''ast-finetuned-audioset-10-10-0.4593''': ( '''https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1''' ), '''ast-finetuned-audioset-10-10-0.450''': ( '''https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1''' ), '''ast-finetuned-audioset-10-10-0.448''': ( '''https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1''' ), '''ast-finetuned-audioset-10-10-0.448-v2''': ( '''https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1''' ), '''ast-finetuned-audioset-12-12-0.447''': ( '''https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1''' ), '''ast-finetuned-audioset-14-14-0.443''': ( '''https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1''' ), '''ast-finetuned-audioset-16-16-0.442''': ( '''https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1''' ), '''ast-finetuned-speech-commands-v2''': ( '''https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1''' ), } # load original state_dict UpperCAmelCase = model_name_to_url[model_name] UpperCAmelCase = torch.hub.load_state_dict_from_url(_a , map_location='''cpu''' ) # remove some keys remove_keys(_a ) # rename some keys UpperCAmelCase = convert_state_dict(_a , _a ) # load 🤗 model UpperCAmelCase = ASTForAudioClassification(_a ) model.eval() model.load_state_dict(_a ) # verify outputs on dummy input # source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62 UpperCAmelCase = -4.267_7393 if '''speech-commands''' not in model_name else -6.84_5978 UpperCAmelCase = 4.568_9974 if '''speech-commands''' not in model_name else 5.565_4526 UpperCAmelCase = 1_0_2_4 if '''speech-commands''' not in model_name else 1_2_8 UpperCAmelCase = ASTFeatureExtractor(mean=_a , std=_a , max_length=_a ) if "speech-commands" in model_name: UpperCAmelCase = load_dataset('''speech_commands''' , '''v0.02''' , split='''validation''' ) UpperCAmelCase = dataset[0]['''audio''']['''array'''] else: UpperCAmelCase = hf_hub_download( repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' , ) UpperCAmelCase , UpperCAmelCase = torchaudio.load(_a ) UpperCAmelCase = waveform.squeeze().numpy() UpperCAmelCase = feature_extractor(_a , sampling_rate=1_6_0_0_0 , return_tensors='''pt''' ) # forward pass UpperCAmelCase = model(**_a ) UpperCAmelCase = outputs.logits if model_name == "ast-finetuned-audioset-10-10-0.4593": UpperCAmelCase = torch.tensor([-0.8760, -7.0042, -8.6602] ) elif model_name == "ast-finetuned-audioset-10-10-0.450": UpperCAmelCase = torch.tensor([-1.1986, -7.0903, -8.2718] ) elif model_name == "ast-finetuned-audioset-10-10-0.448": UpperCAmelCase = torch.tensor([-2.6128, -8.0080, -9.4344] ) elif model_name == "ast-finetuned-audioset-10-10-0.448-v2": UpperCAmelCase = torch.tensor([-1.5080, -7.4534, -8.8917] ) elif model_name == "ast-finetuned-audioset-12-12-0.447": UpperCAmelCase = torch.tensor([-0.5050, -6.5833, -8.0843] ) elif model_name == "ast-finetuned-audioset-14-14-0.443": UpperCAmelCase = torch.tensor([-0.3826, -7.0336, -8.2413] ) elif model_name == "ast-finetuned-audioset-16-16-0.442": UpperCAmelCase = torch.tensor([-1.2113, -6.9101, -8.3470] ) elif model_name == "ast-finetuned-speech-commands-v2": UpperCAmelCase = torch.tensor([6.1589, -8.0566, -8.7984] ) else: raise ValueError('''Unknown model name''' ) if not torch.allclose(logits[0, :3] , _a , atol=1E-4 ): raise ValueError('''Logits don\'t match''' ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: Path(_a ).mkdir(exist_ok=_a ) print(F"Saving model {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(_a ) print(F"Saving feature extractor to {pytorch_dump_folder_path}" ) feature_extractor.save_pretrained(_a ) if push_to_hub: print('''Pushing model and feature extractor to the hub...''' ) model.push_to_hub(F"MIT/{model_name}" ) feature_extractor.push_to_hub(F"MIT/{model_name}" ) if __name__ == "__main__": A =argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='ast-finetuned-audioset-10-10-0.4593', type=str, help='Name of the Audio Spectrogram Transformer model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) A =parser.parse_args() convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
34
0
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { '''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/config.json''', '''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json''', '''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/config.json''', '''funnel-transformer/medium-base''': '''https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json''', '''funnel-transformer/intermediate''': ( '''https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json''' ), '''funnel-transformer/intermediate-base''': ( '''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json''' ), '''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/config.json''', '''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json''', '''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json''', '''funnel-transformer/xlarge-base''': '''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json''', } class snake_case_ ( __A ): __A : Optional[int] = "funnel" __A : Optional[int] = { "hidden_size": "d_model", "num_attention_heads": "n_head", } def __init__( self : Union[str, Any] , lowercase_ : str=3_05_22 , lowercase_ : Optional[Any]=[4, 4, 4] , lowercase_ : int=None , lowercase_ : List[Any]=2 , lowercase_ : List[str]=7_68 , lowercase_ : Any=12 , lowercase_ : List[str]=64 , lowercase_ : Optional[int]=30_72 , lowercase_ : Optional[int]="gelu_new" , lowercase_ : int=0.1 , lowercase_ : Optional[Any]=0.1 , lowercase_ : List[Any]=0.0 , lowercase_ : List[str]=0.1 , lowercase_ : List[Any]=None , lowercase_ : List[Any]=1E-9 , lowercase_ : Dict="mean" , lowercase_ : Dict="relative_shift" , lowercase_ : Optional[Any]=True , lowercase_ : List[str]=True , lowercase_ : Dict=True , **lowercase_ : List[Any] , ) -> int: lowercase__ : List[str] = vocab_size lowercase__ : str = block_sizes lowercase__ : int = [1] * len(lowercase_ ) if block_repeats is None else block_repeats assert len(lowercase_ ) == len( self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length." lowercase__ : Any = num_decoder_layers lowercase__ : List[str] = d_model lowercase__ : int = n_head lowercase__ : Union[str, Any] = d_head lowercase__ : Tuple = d_inner lowercase__ : Union[str, Any] = hidden_act lowercase__ : Union[str, Any] = hidden_dropout lowercase__ : str = attention_dropout lowercase__ : Tuple = activation_dropout lowercase__ : Optional[Any] = initializer_range lowercase__ : List[Any] = initializer_std lowercase__ : Optional[Any] = layer_norm_eps assert pooling_type in [ "mean", "max", ], F'''Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.''' lowercase__ : List[str] = pooling_type assert attention_type in [ "relative_shift", "factorized", ], F'''Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.''' lowercase__ : str = attention_type lowercase__ : int = separate_cls lowercase__ : Dict = truncate_seq lowercase__ : str = pool_q_only super().__init__(**lowercase_ ) @property def __UpperCamelCase ( self : int ) -> Optional[Any]: return sum(self.block_sizes ) @num_hidden_layers.setter def __UpperCamelCase ( self : Optional[Any] , lowercase_ : List[Any] ) -> int: raise NotImplementedError( "This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`." ) @property def __UpperCamelCase ( self : Any ) -> Dict: return len(self.block_sizes ) @num_blocks.setter def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : Tuple ) -> Optional[int]: raise NotImplementedError("This model does not support the setting of `num_blocks`. Please set `block_sizes`." )
87
'''simple docstring''' from __future__ import annotations def snake_case_ (_a : Dict , _a : str , _a : Optional[Any] , _a : List[str] ): # noqa: E741 while r - l > 1: UpperCAmelCase = (l + r) // 2 if v[m] >= key: UpperCAmelCase = m else: UpperCAmelCase = m # noqa: E741 return r def snake_case_ (_a : list[int] ): if len(_a ) == 0: return 0 UpperCAmelCase = [0] * len(_a ) UpperCAmelCase = 1 UpperCAmelCase = v[0] for i in range(1 , len(_a ) ): if v[i] < tail[0]: UpperCAmelCase = v[i] elif v[i] > tail[length - 1]: UpperCAmelCase = v[i] length += 1 else: UpperCAmelCase = v[i] return length if __name__ == "__main__": import doctest doctest.testmod()
34
0
import argparse import numpy as np import torch from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging logging.set_verbosity_info() __lowerCAmelCase : Dict = logging.get_logger('transformers.models.speecht5') def a__ ( A_, A_, A_ ): '''simple docstring''' hf_model.apply_weight_norm() __magic_name__ = checkpoint["""input_conv.weight_g"""] __magic_name__ = checkpoint["""input_conv.weight_v"""] __magic_name__ = checkpoint["""input_conv.bias"""] for i in range(len(config.upsample_rates ) ): __magic_name__ = checkpoint[f'''upsamples.{i}.1.weight_g'''] __magic_name__ = checkpoint[f'''upsamples.{i}.1.weight_v'''] __magic_name__ = checkpoint[f'''upsamples.{i}.1.bias'''] for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ): for j in range(len(config.resblock_dilation_sizes ) ): __magic_name__ = checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_g'''] __magic_name__ = checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_v'''] __magic_name__ = checkpoint[f'''blocks.{i}.convs1.{j}.1.bias'''] __magic_name__ = checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_g'''] __magic_name__ = checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_v'''] __magic_name__ = checkpoint[f'''blocks.{i}.convs2.{j}.1.bias'''] __magic_name__ = checkpoint["""output_conv.1.weight_g"""] __magic_name__ = checkpoint["""output_conv.1.weight_v"""] __magic_name__ = checkpoint["""output_conv.1.bias"""] hf_model.remove_weight_norm() @torch.no_grad() def a__ ( A_, A_, A_, A_=None, A_=None, ): '''simple docstring''' if config_path is not None: __magic_name__ = SpeechTaHifiGanConfig.from_pretrained(A_ ) else: __magic_name__ = SpeechTaHifiGanConfig() __magic_name__ = SpeechTaHifiGan(A_ ) __magic_name__ = torch.load(A_ ) load_weights(orig_checkpoint["""model"""]["""generator"""], A_, A_ ) __magic_name__ = np.load(A_ ) __magic_name__ = stats[0].reshape(-1 ) __magic_name__ = stats[1].reshape(-1 ) __magic_name__ = torch.from_numpy(A_ ).float() __magic_name__ = torch.from_numpy(A_ ).float() model.save_pretrained(A_ ) if repo_id: print("""Pushing to the hub...""" ) model.push_to_hub(A_ ) if __name__ == "__main__": __lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint') parser.add_argument('--stats_path', required=True, default=None, type=str, help='Path to stats.npy file') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.' ) parser.add_argument( '--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.' ) __lowerCAmelCase : List[Any] = parser.parse_args() convert_hifigan_checkpoint( args.checkpoint_path, args.stats_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
88
'''simple docstring''' def snake_case_ (_a : str , _a : str ): UpperCAmelCase = len(_a ) + 1 UpperCAmelCase = len(_a ) + 1 # dp is a 2d matrix where dp[i][j] denotes whether prefix string of # length i of input_string matches with prefix string of length j of # given pattern. # "dp" stands for dynamic programming. UpperCAmelCase = [[0 for i in range(_a )] for j in range(_a )] # since string of zero length match pattern of zero length UpperCAmelCase = 1 # since pattern of zero length will never match with string of non-zero length for i in range(1 , _a ): UpperCAmelCase = 0 # since string of zero length will match with pattern where there # is at least one * alternatively for j in range(1 , _a ): UpperCAmelCase = dp[0][j - 2] if pattern[j - 1] == '''*''' else 0 # now using bottom-up approach to find for all remaining lengths for i in range(1 , _a ): for j in range(1 , _a ): if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".": UpperCAmelCase = dp[i - 1][j - 1] elif pattern[j - 1] == "*": if dp[i][j - 2] == 1: UpperCAmelCase = 1 elif pattern[j - 2] in (input_string[i - 1], "."): UpperCAmelCase = dp[i - 1][j] else: UpperCAmelCase = 0 else: UpperCAmelCase = 0 return bool(dp[-1][-1] ) if __name__ == "__main__": import doctest doctest.testmod() # inputing the strings # input_string = input("input a string :") # pattern = input("input a pattern :") A ='aab' A ='c*a*b' # using function to check whether given string matches the given pattern if match_pattern(input_string, pattern): print(f"""{input_string} matches the given pattern {pattern}""") else: print(f"""{input_string} does not match with the given pattern {pattern}""")
34
0
'''simple docstring''' import argparse import collections import json from pathlib import Path import requests import torch import yaml from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTImageProcessor, MobileViTVaConfig, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, ) from transformers.utils import logging logging.set_verbosity_info() __lowerCAmelCase = logging.get_logger(__name__) def __lowerCamelCase ( lowerCAmelCase_ ) -> Tuple: print('Loading config file...' ) def flatten_yaml_as_dict(lowerCAmelCase_ , lowerCAmelCase_="" , lowerCAmelCase_="." ): _a : Optional[Any] = [] for k, v in d.items(): _a : str = parent_key + sep + k if parent_key else k if isinstance(lowerCAmelCase_ , collections.abc.MutableMapping ): items.extend(flatten_yaml_as_dict(lowerCAmelCase_ , lowerCAmelCase_ , sep=lowerCAmelCase_ ).items() ) else: items.append((new_key, v) ) return dict(lowerCAmelCase_ ) _a : Optional[Any] = argparse.Namespace() with open(lowerCAmelCase_ , 'r' ) as yaml_file: try: _a : List[str] = yaml.load(lowerCAmelCase_ , Loader=yaml.FullLoader ) _a : str = flatten_yaml_as_dict(lowerCAmelCase_ ) for k, v in flat_cfg.items(): setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) except yaml.YAMLError as exc: logger.error('Error while loading config file: {}. Error message: {}'.format(lowerCAmelCase_ , str(lowerCAmelCase_ ) ) ) return config def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]: _a : int = MobileViTVaConfig() _a : Tuple = False # dataset if task_name.startswith('imagenet1k_' ): _a : Any = 1000 if int(task_name.strip().split('_' )[-1] ) == 384: _a : Optional[Any] = 384 else: _a : List[Any] = 256 _a : Optional[Any] = 'imagenet-1k-id2label.json' elif task_name.startswith('imagenet21k_to_1k_' ): _a : str = 21000 if int(task_name.strip().split('_' )[-1] ) == 384: _a : Any = 384 else: _a : int = 256 _a : List[Any] = 'imagenet-22k-id2label.json' elif task_name.startswith('ade20k_' ): _a : Union[str, Any] = 151 _a : Any = 512 _a : str = 'ade20k-id2label.json' _a : int = True elif task_name.startswith('voc_' ): _a : Tuple = 21 _a : Optional[Any] = 512 _a : Optional[int] = 'pascal-voc-id2label.json' _a : Tuple = True # orig_config _a : List[str] = load_orig_config_file(lowerCAmelCase_ ) assert getattr(lowerCAmelCase_ , 'model.classification.name' , -1 ) == "mobilevit_v2", "Invalid model" _a : List[str] = getattr(lowerCAmelCase_ , 'model.classification.mitv2.width_multiplier' , 1.0 ) assert ( getattr(lowerCAmelCase_ , 'model.classification.mitv2.attn_norm_layer' , -1 ) == "layer_norm_2d" ), "Norm layers other than layer_norm_2d is not supported" _a : Tuple = getattr(lowerCAmelCase_ , 'model.classification.activation.name' , 'swish' ) # config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256) if is_segmentation_model: _a : Tuple = getattr(lowerCAmelCase_ , 'model.segmentation.output_stride' , 16 ) if "_deeplabv3" in task_name: _a : str = getattr(lowerCAmelCase_ , 'model.segmentation.deeplabv3.aspp_rates' , [12, 24, 36] ) _a : str = getattr(lowerCAmelCase_ , 'model.segmentation.deeplabv3.aspp_out_channels' , 512 ) _a : int = getattr(lowerCAmelCase_ , 'model.segmentation.deeplabv3.aspp_dropout' , 0.1 ) # id2label _a : Optional[int] = 'huggingface/label-files' _a : int = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type='dataset' ) , 'r' ) ) _a : List[str] = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()} _a : int = idalabel _a : List[Any] = {v: k for k, v in idalabel.items()} return config def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]: _a : List[Any] = dct.pop(lowerCAmelCase_ ) _a : List[Any] = val def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=False ) -> Union[str, Any]: if base_model: _a : str = '' else: _a : Optional[int] = 'mobilevitv2.' _a : Tuple = [] for k in state_dict.keys(): if k[:8] == "encoder.": _a : Optional[Any] = k[8:] else: _a : Union[str, Any] = k if ".block." in k: _a : List[Any] = k_new.replace('.block.' , '.' ) if ".conv." in k: _a : Tuple = k_new.replace('.conv.' , '.convolution.' ) if ".norm." in k: _a : Optional[Any] = k_new.replace('.norm.' , '.normalization.' ) if "conv_1." in k: _a : List[str] = k_new.replace('conv_1.' , f"""{model_prefix}conv_stem.""" ) for i in [1, 2]: if f"""layer_{i}.""" in k: _a : Optional[int] = k_new.replace(f"""layer_{i}.""" , f"""{model_prefix}encoder.layer.{i-1}.layer.""" ) if ".exp_1x1." in k: _a : int = k_new.replace('.exp_1x1.' , '.expand_1x1.' ) if ".red_1x1." in k: _a : Tuple = k_new.replace('.red_1x1.' , '.reduce_1x1.' ) for i in [3, 4, 5]: if f"""layer_{i}.0.""" in k: _a : Union[str, Any] = k_new.replace(f"""layer_{i}.0.""" , f"""{model_prefix}encoder.layer.{i-1}.downsampling_layer.""" ) if f"""layer_{i}.1.local_rep.0.""" in k: _a : Tuple = k_new.replace(f"""layer_{i}.1.local_rep.0.""" , f"""{model_prefix}encoder.layer.{i-1}.conv_kxk.""" ) if f"""layer_{i}.1.local_rep.1.""" in k: _a : int = k_new.replace(f"""layer_{i}.1.local_rep.1.""" , f"""{model_prefix}encoder.layer.{i-1}.conv_1x1.""" ) for i in [3, 4, 5]: if i == 3: _a : Dict = [0, 1] elif i == 4: _a : Union[str, Any] = [0, 1, 2, 3] elif i == 5: _a : Union[str, Any] = [0, 1, 2] for j in j_in: if f"""layer_{i}.1.global_rep.{j}.""" in k: _a : Optional[Any] = k_new.replace( f"""layer_{i}.1.global_rep.{j}.""" , f"""{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.""" ) if f"""layer_{i}.1.global_rep.{j+1}.""" in k: _a : Dict = k_new.replace( f"""layer_{i}.1.global_rep.{j+1}.""" , f"""{model_prefix}encoder.layer.{i-1}.layernorm.""" ) if f"""layer_{i}.1.conv_proj.""" in k: _a : Any = k_new.replace(f"""layer_{i}.1.conv_proj.""" , f"""{model_prefix}encoder.layer.{i-1}.conv_projection.""" ) if "pre_norm_attn.0." in k: _a : List[str] = k_new.replace('pre_norm_attn.0.' , 'layernorm_before.' ) if "pre_norm_attn.1." in k: _a : Tuple = k_new.replace('pre_norm_attn.1.' , 'attention.' ) if "pre_norm_ffn.0." in k: _a : Optional[Any] = k_new.replace('pre_norm_ffn.0.' , 'layernorm_after.' ) if "pre_norm_ffn.1." in k: _a : Union[str, Any] = k_new.replace('pre_norm_ffn.1.' , 'ffn.conv1.' ) if "pre_norm_ffn.3." in k: _a : Optional[int] = k_new.replace('pre_norm_ffn.3.' , 'ffn.conv2.' ) if "classifier.1." in k: _a : List[Any] = k_new.replace('classifier.1.' , 'classifier.' ) if "seg_head." in k: _a : str = k_new.replace('seg_head.' , 'segmentation_head.' ) if ".aspp_layer." in k: _a : Tuple = k_new.replace('.aspp_layer.' , '.' ) if ".aspp_pool." in k: _a : List[str] = k_new.replace('.aspp_pool.' , '.' ) rename_keys.append((k, k_new) ) return rename_keys def __lowerCamelCase ( lowerCAmelCase_ ) -> Dict: _a : Any = [] for k in state_dict.keys(): if k.startswith('seg_head.aux_head.' ): keys_to_ignore.append(lowerCAmelCase_ ) for k in keys_to_ignore: state_dict.pop(lowerCAmelCase_ , lowerCAmelCase_ ) def __lowerCamelCase ( ) -> Any: _a : Optional[int] = 'http://images.cocodataset.org/val2017/000000039769.jpg' # url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg" _a : Union[str, Any] = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw ) return im @torch.no_grad() def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any: _a : Union[str, Any] = get_mobilevitva_config(lowerCAmelCase_ , lowerCAmelCase_ ) # load original state_dict _a : Optional[int] = torch.load(lowerCAmelCase_ , map_location='cpu' ) # load huggingface model if task_name.startswith('ade20k_' ) or task_name.startswith('voc_' ): _a : Optional[Any] = MobileViTVaForSemanticSegmentation(lowerCAmelCase_ ).eval() _a : Tuple = False else: _a : int = MobileViTVaForImageClassification(lowerCAmelCase_ ).eval() _a : Optional[Any] = False # remove and rename some keys of load the original model _a : str = checkpoint remove_unused_keys(lowerCAmelCase_ ) _a : Dict = create_rename_keys(lowerCAmelCase_ , base_model=lowerCAmelCase_ ) for rename_key_src, rename_key_dest in rename_keys: rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # load modified state_dict model.load_state_dict(lowerCAmelCase_ ) # Check outputs on an image, prepared by MobileViTImageProcessor _a : Optional[Any] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) _a : Dict = image_processor(images=prepare_img() , return_tensors='pt' ) _a : Dict = model(**lowerCAmelCase_ ) # verify classification model if task_name.startswith('imagenet' ): _a : Dict = outputs.logits _a : int = logits.argmax(-1 ).item() print('Predicted class:' , model.config.idalabel[predicted_class_idx] ) if task_name.startswith('imagenet1k_256' ) and config.width_multiplier == 1.0: # expected_logits for base variant _a : List[str] = torch.tensor([-1.6_336E00, -7.3_204E-02, -5.1_883E-01] ) assert torch.allclose(logits[0, :3] , lowerCAmelCase_ , atol=1E-4 ) Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ ) print(f"""Saving model {task_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(lowerCAmelCase_ ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(lowerCAmelCase_ ) if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--task''', default='''imagenet1k_256''', type=str, help=( '''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . ''' ''' Classification (ImageNet-1k) - MobileViTV2 (256x256) : imagenet1k_256 - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384 - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) : imagenet21k_to_1k_256 - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on ImageNet-1k 384x384) : imagenet21k_to_1k_384 Segmentation - ADE20K Dataset : ade20k_deeplabv3 - Pascal VOC 2012 Dataset: voc_deeplabv3 ''' ), choices=[ '''imagenet1k_256''', '''imagenet1k_384''', '''imagenet21k_to_1k_256''', '''imagenet21k_to_1k_384''', '''ade20k_deeplabv3''', '''voc_deeplabv3''', ], ) parser.add_argument( '''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).''' ) parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.''' ) __lowerCAmelCase = parser.parse_args() convert_mobilevitva_checkpoint( args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path )
89
'''simple docstring''' import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, PerceiverTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): A ='pt' elif is_tf_available(): A ='tf' else: A ='jax' class _a ( __a , unittest.TestCase ): __a : Optional[Any] = PerceiverTokenizer __a : str = False def A ( self : Union[str, Any] ): '''simple docstring''' super().setUp() UpperCAmelCase = PerceiverTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def A ( self : Optional[int] ): '''simple docstring''' return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' ) def A ( self : Union[str, Any] , **lowercase : int ): '''simple docstring''' return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase ) def A ( self : Tuple , lowercase : str , lowercase : List[str]=False , lowercase : Union[str, Any]=20 , lowercase : Union[str, Any]=5 ): '''simple docstring''' UpperCAmelCase = [] for i in range(len(lowercase ) ): try: UpperCAmelCase = tokenizer.decode([i] , clean_up_tokenization_spaces=lowercase ) except UnicodeDecodeError: pass toks.append((i, tok) ) UpperCAmelCase = list(filter(lambda lowercase : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , lowercase ) ) UpperCAmelCase = list(filter(lambda lowercase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowercase ) , lowercase ) ) if max_length is not None and len(lowercase ) > max_length: UpperCAmelCase = toks[:max_length] if min_length is not None and len(lowercase ) < min_length and len(lowercase ) > 0: while len(lowercase ) < min_length: UpperCAmelCase = toks + toks # toks_str = [t[1] for t in toks] UpperCAmelCase = [t[0] for t in toks] # Ensure consistency UpperCAmelCase = tokenizer.decode(lowercase , clean_up_tokenization_spaces=lowercase ) if " " not in output_txt and len(lowercase ) > 1: UpperCAmelCase = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowercase ) + ''' ''' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowercase ) ) if with_prefix_space: UpperCAmelCase = ''' ''' + output_txt UpperCAmelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase ) return output_txt, output_ids def A ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase = self.perceiver_tokenizer UpperCAmelCase = '''Unicode €.''' UpperCAmelCase = tokenizer(lowercase ) UpperCAmelCase = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5] self.assertEqual(encoded['''input_ids'''] , lowercase ) # decoding UpperCAmelCase = tokenizer.decode(lowercase ) self.assertEqual(lowercase , '''[CLS]Unicode €.[SEP]''' ) UpperCAmelCase = tokenizer('''e è é ê ë''' ) UpperCAmelCase = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5] self.assertEqual(encoded['''input_ids'''] , lowercase ) # decoding UpperCAmelCase = tokenizer.decode(lowercase ) self.assertEqual(lowercase , '''[CLS]e è é ê ë[SEP]''' ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' ) def A ( self : str ): '''simple docstring''' UpperCAmelCase = self.perceiver_tokenizer UpperCAmelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] # fmt: off UpperCAmelCase = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0] # fmt: on UpperCAmelCase = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase ) self.assertIsInstance(lowercase , lowercase ) if FRAMEWORK != "jax": UpperCAmelCase = list(batch.input_ids.numpy()[0] ) else: UpperCAmelCase = list(batch.input_ids.tolist()[0] ) self.assertListEqual(lowercase , lowercase ) self.assertEqual((2, 38) , batch.input_ids.shape ) self.assertEqual((2, 38) , batch.attention_mask.shape ) def A ( self : str ): '''simple docstring''' UpperCAmelCase = self.perceiver_tokenizer UpperCAmelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] UpperCAmelCase = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase ) # check if input_ids are returned and no decoder_input_ids self.assertIn('''input_ids''' , lowercase ) self.assertIn('''attention_mask''' , lowercase ) self.assertNotIn('''decoder_input_ids''' , lowercase ) self.assertNotIn('''decoder_attention_mask''' , lowercase ) def A ( self : Dict ): '''simple docstring''' UpperCAmelCase = self.perceiver_tokenizer UpperCAmelCase = [ '''Summary of the text.''', '''Another summary.''', ] UpperCAmelCase = tokenizer( text_target=lowercase , max_length=32 , padding='''max_length''' , truncation=lowercase , return_tensors=lowercase ) self.assertEqual(32 , targets['''input_ids'''].shape[1] ) def A ( self : int ): '''simple docstring''' UpperCAmelCase = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test UpperCAmelCase = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): # Isolate this from the other tests because we save additional tokens/etc UpperCAmelCase = tempfile.mkdtemp() UpperCAmelCase = ''' He is very happy, UNwant\u00E9d,running''' UpperCAmelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase ) tokenizer.save_pretrained(lowercase ) UpperCAmelCase = tokenizer.__class__.from_pretrained(lowercase ) UpperCAmelCase = after_tokenizer.encode(lowercase , add_special_tokens=lowercase ) self.assertListEqual(lowercase , lowercase ) shutil.rmtree(lowercase ) UpperCAmelCase = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): # Isolate this from the other tests because we save additional tokens/etc UpperCAmelCase = tempfile.mkdtemp() UpperCAmelCase = ''' He is very happy, UNwant\u00E9d,running''' tokenizer.add_tokens(['''bim''', '''bambam'''] ) UpperCAmelCase = tokenizer.additional_special_tokens additional_special_tokens.append('''new_additional_special_token''' ) tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} ) UpperCAmelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase ) tokenizer.save_pretrained(lowercase ) UpperCAmelCase = tokenizer.__class__.from_pretrained(lowercase ) UpperCAmelCase = after_tokenizer.encode(lowercase , add_special_tokens=lowercase ) self.assertListEqual(lowercase , lowercase ) self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) UpperCAmelCase = tokenizer.__class__.from_pretrained(lowercase , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(lowercase ) def A ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(lowercase ) with open(os.path.join(lowercase , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file: UpperCAmelCase = json.load(lowercase ) with open(os.path.join(lowercase , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file: UpperCAmelCase = json.load(lowercase ) UpperCAmelCase = [f"<extra_id_{i}>" for i in range(125 )] UpperCAmelCase = added_tokens_extra_ids + [ '''an_additional_special_token''' ] UpperCAmelCase = added_tokens_extra_ids + [ '''an_additional_special_token''' ] with open(os.path.join(lowercase , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile: json.dump(lowercase , lowercase ) with open(os.path.join(lowercase , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile: json.dump(lowercase , lowercase ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files UpperCAmelCase = tokenizer_class.from_pretrained( lowercase , ) self.assertIn( '''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens ) self.assertEqual( ['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained UpperCAmelCase = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=lowercase )] UpperCAmelCase = tokenizer_class.from_pretrained( lowercase , additional_special_tokens=lowercase , ) self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens ) self.assertEqual( ['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , ) def A ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase = self.perceiver_tokenizer self.assertEqual(tokenizer.decode([178] ) , '''�''' ) def A ( self : Union[str, Any] ): '''simple docstring''' pass def A ( self : Any ): '''simple docstring''' pass def A ( self : Dict ): '''simple docstring''' pass def A ( self : str ): '''simple docstring''' pass def A ( self : List[str] ): '''simple docstring''' UpperCAmelCase = self.get_tokenizers(fast=lowercase , do_lower_case=lowercase ) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): UpperCAmelCase = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]'''] UpperCAmelCase = tokenizer.convert_tokens_to_string(lowercase ) self.assertIsInstance(lowercase , lowercase )
34
0
from itertools import permutations def lowerCamelCase_ ( UpperCamelCase__ : tuple ) -> bool: """simple docstring""" if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False __lowerCamelCase = [7, 11, 13, 17] for i, test in enumerate(UpperCamelCase__ ): if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def lowerCamelCase_ ( UpperCamelCase__ : int = 10 ) -> int: """simple docstring""" return sum( int(''.join(map(UpperCamelCase__ , UpperCamelCase__ ) ) ) for num in permutations(range(UpperCamelCase__ ) ) if is_substring_divisible(UpperCamelCase__ ) ) if __name__ == "__main__": print(f'''{solution() = }''')
90
'''simple docstring''' import os from distutils.util import strtobool def snake_case_ (_a : Union[str, Any] , _a : List[Any] ): for e in env_keys: UpperCAmelCase = int(os.environ.get(_a , -1 ) ) if val >= 0: return val return default def snake_case_ (_a : Dict , _a : Any=False ): UpperCAmelCase = os.environ.get(_a , str(_a ) ) return strtobool(_a ) == 1 # As its name indicates `strtobool` actually returns an int... def snake_case_ (_a : str , _a : Optional[Any]="no" ): UpperCAmelCase = os.environ.get(_a , str(_a ) ) return value
34
0
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCAmelCase_ : Dict = logging.get_logger(__name__) UpperCAmelCase_ : Tuple = """▁""" UpperCAmelCase_ : Optional[Any] = {"""vocab_file""": """sentencepiece.bpe.model"""} UpperCAmelCase_ : str = { """vocab_file""": { """facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model""", } } UpperCAmelCase_ : str = { """facebook/xglm-564M""": 2048, } class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase = ["input_ids", "attention_mask"] def __init__( self : List[Any] , lowercase_ : str , lowercase_ : Tuple="<s>" , lowercase_ : Any="</s>" , lowercase_ : Optional[int]="</s>" , lowercase_ : List[Any]="<s>" , lowercase_ : Union[str, Any]="<unk>" , lowercase_ : Union[str, Any]="<pad>" , lowercase_ : Optional[Dict[str, Any]] = None , **lowercase_ : Tuple , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = {} if sp_model_kwargs is None else sp_model_kwargs # Compatibility with the original tokenizer SCREAMING_SNAKE_CASE_ : List[str] = 7 SCREAMING_SNAKE_CASE_ : Tuple = [F'<madeupword{i}>' for i in range(self.num_madeup_words)] SCREAMING_SNAKE_CASE_ : List[Any] = kwargs.get('''additional_special_tokens''' , []) kwargs["additional_special_tokens"] += [ word for word in madeup_words if word not in kwargs["additional_special_tokens"] ] super().__init__( bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , pad_token=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , ) SCREAMING_SNAKE_CASE_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(str(lowercase_)) SCREAMING_SNAKE_CASE_ : Union[str, Any] = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab SCREAMING_SNAKE_CASE_ : Union[str, Any] = 1 # Mimic fairseq token-to-id alignment for the first 4 token SCREAMING_SNAKE_CASE_ : Optional[Any] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3} SCREAMING_SNAKE_CASE_ : List[Any] = len(self.sp_model) SCREAMING_SNAKE_CASE_ : Optional[Any] = {F'<madeupword{i}>': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words)} self.fairseq_tokens_to_ids.update(lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : Dict): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Any = self.__dict__.copy() SCREAMING_SNAKE_CASE_ : str = None SCREAMING_SNAKE_CASE_ : Optional[int] = self.sp_model.serialized_model_proto() return state def __setstate__( self : Tuple , lowercase_ : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs'''): SCREAMING_SNAKE_CASE_ : Union[str, Any] = {} SCREAMING_SNAKE_CASE_ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.LoadFromSerializedProto(self.sp_model_proto) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None): '''simple docstring''' if token_ids_a is None: return [self.sep_token_id] + token_ids_a SCREAMING_SNAKE_CASE_ : Dict = [self.sep_token_id] return sep + token_ids_a + sep + sep + token_ids_a def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None , lowercase_ : bool = False): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_) if token_ids_a is None: return [1] + ([0] * len(lowercase_)) return [1] + ([0] * len(lowercase_)) + [1, 1] + ([0] * len(lowercase_)) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = [self.sep_token_id] if token_ids_a is None: return len(sep + token_ids_a) * [0] return len(sep + token_ids_a + sep + sep + token_ids_a) * [0] @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): '''simple docstring''' return len(self.sp_model) + self.fairseq_offset + self.num_madeup_words def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = {self.convert_ids_to_tokens(lowercase_): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : str): '''simple docstring''' return self.sp_model.encode(lowercase_ , out_type=lowercase_) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : Union[str, Any]): '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] SCREAMING_SNAKE_CASE_ : Optional[Any] = self.sp_model.PieceToId(lowercase_) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Optional[Any]): '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset) def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = ''''''.join(lowercase_).replace(lowercase_ , ''' ''').strip() return out_string def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : str , lowercase_ : Optional[str] = None): '''simple docstring''' if not os.path.isdir(lowercase_): logger.error(F'Vocabulary path ({save_directory}) should be a directory') return SCREAMING_SNAKE_CASE_ : List[Any] = os.path.join( lowercase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file''']) if os.path.abspath(self.vocab_file) != os.path.abspath(lowercase_) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , lowercase_) elif not os.path.isfile(self.vocab_file): with open(lowercase_ , '''wb''') as fi: SCREAMING_SNAKE_CASE_ : int = self.sp_model.serialized_model_proto() fi.write(lowercase_) return (out_vocab_file,)
91
'''simple docstring''' import argparse import logging from collections import namedtuple import torch from model_bertabs import BertAbsSummarizer from models.model_builder import AbsSummarizer # The authors' implementation from transformers import BertTokenizer logging.basicConfig(level=logging.INFO) A =logging.getLogger(__name__) A ='Hello world! cécé herlolip' A =namedtuple( 'BertAbsConfig', [ 'temp_dir', 'large', 'use_bert_emb', 'finetune_bert', 'encoder', 'share_emb', 'max_pos', 'enc_layers', 'enc_hidden_size', 'enc_heads', 'enc_ff_size', 'enc_dropout', 'dec_layers', 'dec_hidden_size', 'dec_heads', 'dec_ff_size', 'dec_dropout', ], ) def snake_case_ (_a : List[Any] , _a : Any ): UpperCAmelCase = BertAbsConfig( temp_dir='''.''' , finetune_bert=_a , large=_a , share_emb=_a , use_bert_emb=_a , encoder='''bert''' , max_pos=5_1_2 , enc_layers=6 , enc_hidden_size=5_1_2 , enc_heads=8 , enc_ff_size=5_1_2 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_6_8 , dec_heads=8 , dec_ff_size=2_0_4_8 , dec_dropout=0.2 , ) UpperCAmelCase = torch.load(_a , lambda _a , _a : storage ) UpperCAmelCase = AbsSummarizer(_a , torch.device('''cpu''' ) , _a ) original.eval() UpperCAmelCase = BertAbsSummarizer(_a , torch.device('''cpu''' ) ) new_model.eval() # ------------------- # Convert the weights # ------------------- logging.info('''convert the model''' ) new_model.bert.load_state_dict(original.bert.state_dict() ) new_model.decoder.load_state_dict(original.decoder.state_dict() ) new_model.generator.load_state_dict(original.generator.state_dict() ) # ---------------------------------- # Make sure the outpus are identical # ---------------------------------- logging.info('''Make sure that the models\' outputs are identical''' ) UpperCAmelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' ) # prepare the model inputs UpperCAmelCase = tokenizer.encode('''This is sample éàalj\'-.''' ) encoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(_a )) ) UpperCAmelCase = torch.tensor(_a ).unsqueeze(0 ) UpperCAmelCase = tokenizer.encode('''This is sample 3 éàalj\'-.''' ) decoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(_a )) ) UpperCAmelCase = torch.tensor(_a ).unsqueeze(0 ) # failsafe to make sure the weights reset does not affect the # loaded weights. assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0 # forward pass UpperCAmelCase = encoder_input_ids UpperCAmelCase = decoder_input_ids UpperCAmelCase = UpperCAmelCase = None UpperCAmelCase = None UpperCAmelCase = UpperCAmelCase = None UpperCAmelCase = UpperCAmelCase = None UpperCAmelCase = None # The original model does not apply the geneator layer immediatly but rather in # the beam search (where it combines softmax + linear layer). Since we already # apply the softmax in our generation process we only apply the linear layer here. # We make sure that the outputs of the full stack are identical UpperCAmelCase = original(_a , _a , _a , _a , _a , _a , _a )[0] UpperCAmelCase = original.generator(_a ) UpperCAmelCase = new_model( _a , _a , _a , _a , _a )[0] UpperCAmelCase = new_model.generator(_a ) UpperCAmelCase = torch.max(torch.abs(output_converted_model - output_original_model ) ).item() print('''Maximum absolute difference beween weights: {:.2f}'''.format(_a ) ) UpperCAmelCase = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item() print('''Maximum absolute difference beween weights: {:.2f}'''.format(_a ) ) UpperCAmelCase = torch.allclose(_a , _a , atol=1E-3 ) if are_identical: logging.info('''all weights are equal up to 1e-3''' ) else: raise ValueError('''the weights are different. The new model is likely different from the original one.''' ) # The model has been saved with torch.save(model) and this is bound to the exact # directory structure. We save the state_dict instead. logging.info('''saving the model\'s state dictionary''' ) torch.save( new_model.state_dict() , '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''' ) if __name__ == "__main__": A =argparse.ArgumentParser() parser.add_argument( '--bertabs_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.', ) A =parser.parse_args() convert_bertabs_checkpoints( args.bertabs_checkpoint_path, args.pytorch_dump_folder_path, )
34
0
import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class a__ ( unittest.TestCase ): def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = tempfile.mkdtemp() # fmt: off __lowerCAmelCase = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest"] # fmt: on __lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) __lowerCAmelCase = { "do_resize": True, "size": {"height": 1_8, "width": 1_8}, "do_normalize": True, "image_mean": [0.5, 0.5, 0.5], "image_std": [0.5, 0.5, 0.5], } __lowerCAmelCase = os.path.join(self.tmpdirname , _A ) with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp: json.dump(_A , _A ) def __SCREAMING_SNAKE_CASE( self , **_A ): """simple docstring""" return BertTokenizer.from_pretrained(self.tmpdirname , **_A ) def __SCREAMING_SNAKE_CASE( self , **_A ): """simple docstring""" return ViTImageProcessor.from_pretrained(self.tmpdirname , **_A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )] __lowerCAmelCase = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs] return image_inputs def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A ) processor.save_pretrained(self.tmpdirname ) __lowerCAmelCase = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , _A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __lowerCAmelCase = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) __lowerCAmelCase = self.get_image_processor(do_normalize=_A , padding_value=1.0 ) __lowerCAmelCase = VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=_A , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A ) __lowerCAmelCase = self.prepare_image_inputs() __lowerCAmelCase = image_processor(_A , return_tensors="np" ) __lowerCAmelCase = processor(images=_A , return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A ) __lowerCAmelCase = "lower newer" __lowerCAmelCase = processor(text=_A ) __lowerCAmelCase = tokenizer(_A ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A ) __lowerCAmelCase = "lower newer" __lowerCAmelCase = self.prepare_image_inputs() __lowerCAmelCase = processor(text=_A , images=_A ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] ) # test if it raises when no input is passed with self.assertRaises(_A ): processor() def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A ) __lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __lowerCAmelCase = processor.batch_decode(_A ) __lowerCAmelCase = tokenizer.batch_decode(_A ) self.assertListEqual(_A , _A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A ) __lowerCAmelCase = "lower newer" __lowerCAmelCase = self.prepare_image_inputs() __lowerCAmelCase = processor(text=_A , images=_A ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
92
'''simple docstring''' from ..utils import DummyObject, requires_backends class _a ( metaclass=__a ): __a : int = ["""flax""", """transformers"""] def __init__( self : Optional[Any] , *lowercase : str , **lowercase : List[Any] ): '''simple docstring''' requires_backends(self , ['''flax''', '''transformers'''] ) @classmethod def A ( cls : Union[str, Any] , *lowercase : List[Any] , **lowercase : List[str] ): '''simple docstring''' requires_backends(cls , ['''flax''', '''transformers'''] ) @classmethod def A ( cls : Union[str, Any] , *lowercase : Tuple , **lowercase : int ): '''simple docstring''' requires_backends(cls , ['''flax''', '''transformers'''] ) class _a ( metaclass=__a ): __a : int = ["""flax""", """transformers"""] def __init__( self : int , *lowercase : Tuple , **lowercase : Dict ): '''simple docstring''' requires_backends(self , ['''flax''', '''transformers'''] ) @classmethod def A ( cls : List[str] , *lowercase : Optional[int] , **lowercase : List[Any] ): '''simple docstring''' requires_backends(cls , ['''flax''', '''transformers'''] ) @classmethod def A ( cls : Dict , *lowercase : Union[str, Any] , **lowercase : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ['''flax''', '''transformers'''] ) class _a ( metaclass=__a ): __a : int = ["""flax""", """transformers"""] def __init__( self : Optional[int] , *lowercase : Union[str, Any] , **lowercase : Any ): '''simple docstring''' requires_backends(self , ['''flax''', '''transformers'''] ) @classmethod def A ( cls : Union[str, Any] , *lowercase : Tuple , **lowercase : Any ): '''simple docstring''' requires_backends(cls , ['''flax''', '''transformers'''] ) @classmethod def A ( cls : Any , *lowercase : Dict , **lowercase : Optional[Any] ): '''simple docstring''' requires_backends(cls , ['''flax''', '''transformers'''] ) class _a ( metaclass=__a ): __a : Any = ["""flax""", """transformers"""] def __init__( self : Any , *lowercase : Optional[Any] , **lowercase : Optional[int] ): '''simple docstring''' requires_backends(self , ['''flax''', '''transformers'''] ) @classmethod def A ( cls : Dict , *lowercase : Optional[Any] , **lowercase : Optional[Any] ): '''simple docstring''' requires_backends(cls , ['''flax''', '''transformers'''] ) @classmethod def A ( cls : Union[str, Any] , *lowercase : str , **lowercase : Any ): '''simple docstring''' requires_backends(cls , ['''flax''', '''transformers'''] )
34
0
'''simple docstring''' import itertools import json import os import unittest from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCAmelCase__ ( lowerCamelCase_ , unittest.TestCase ): lowerCAmelCase_ = RobertaTokenizer lowerCAmelCase_ = RobertaTokenizerFast lowerCAmelCase_ = True lowerCAmelCase_ = {'''cls_token''': '''<s>'''} def _snake_case ( self ): """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowercase_ : Any = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] lowercase_ : List[str] = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) ) lowercase_ : List[Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] lowercase_ : Union[str, Any] = {'''unk_token''': '''<unk>'''} lowercase_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) lowercase_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) ) def _snake_case ( self , **__SCREAMING_SNAKE_CASE ): """simple docstring""" kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE ) def _snake_case ( self , **__SCREAMING_SNAKE_CASE ): """simple docstring""" kwargs.update(self.special_tokens_map ) return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE ) def _snake_case ( self , __SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase_ : List[Any] = '''lower newer''' lowercase_ : Dict = '''lower newer''' return input_text, output_text def _snake_case ( self ): """simple docstring""" lowercase_ : Optional[int] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) lowercase_ : List[Any] = '''lower newer''' lowercase_ : Union[str, Any] = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] lowercase_ : Union[str, Any] = tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) # , add_prefix_space=True) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = tokens + [tokenizer.unk_token] lowercase_ : Tuple = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) def _snake_case ( self ): """simple docstring""" lowercase_ : List[str] = self.get_tokenizer() self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_14_14, 2_32, 3_28, 2] ) self.assertListEqual( tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , ) @slow def _snake_case ( self ): """simple docstring""" lowercase_ : List[Any] = self.tokenizer_class.from_pretrained('''roberta-base''' ) lowercase_ : Optional[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE ) lowercase_ : List[Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE ) lowercase_ : str = tokenizer.encode( '''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE ) lowercase_ : str = tokenizer.encode( '''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE ) lowercase_ : str = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def _snake_case ( self ): """simple docstring""" lowercase_ : Tuple = self.get_tokenizer() lowercase_ : Optional[Any] = '''Encode this sequence.''' lowercase_ : Optional[Any] = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]] # Testing encoder arguments lowercase_ : int = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE ) lowercase_ : Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE ) lowercase_ : List[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} ) lowercase_ : List[str] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) lowercase_ : Any = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Testing spaces after special tokens lowercase_ : Any = '''<mask>''' tokenizer.add_special_tokens( {'''mask_token''': AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE )} ) # mask token has a left space lowercase_ : List[Any] = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) lowercase_ : int = '''Encode <mask> sequence''' lowercase_ : List[Any] = '''Encode <mask>sequence''' lowercase_ : Tuple = tokenizer.encode(__SCREAMING_SNAKE_CASE ) lowercase_ : Dict = encoded.index(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : List[str] = tokenizer.encode(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[Any] = encoded.index(__SCREAMING_SNAKE_CASE ) lowercase_ : str = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _snake_case ( self ): """simple docstring""" pass def _snake_case ( self ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowercase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) lowercase_ : Union[str, Any] = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) lowercase_ : str = '''A, <mask> AllenNLP sentence.''' lowercase_ : Optional[int] = tokenizer_r.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = tokenizer_p.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , ) lowercase_ : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] ) lowercase_ : List[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual( __SCREAMING_SNAKE_CASE , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) self.assertSequenceEqual( __SCREAMING_SNAKE_CASE , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) def _snake_case ( self ): """simple docstring""" for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): lowercase_ : List[str] = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE ) lowercase_ : List[str] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) lowercase_ : List[Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , __SCREAMING_SNAKE_CASE ) self.assertEqual(post_processor_state['''add_prefix_space'''] , __SCREAMING_SNAKE_CASE ) self.assertEqual(post_processor_state['''trim_offsets'''] , __SCREAMING_SNAKE_CASE ) def _snake_case ( self ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowercase_ : Optional[Any] = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name` lowercase_ : int = F'''{text_of_1_token} {text_of_1_token}''' lowercase_ : Dict = self.rust_tokenizer_class.from_pretrained( __SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE ) lowercase_ : Tuple = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , ) lowercase_ : Optional[int] = self.rust_tokenizer_class.from_pretrained( __SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE ) lowercase_ : List[str] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , ) lowercase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained( __SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE ) lowercase_ : Dict = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , ) lowercase_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained( __SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE ) lowercase_ : Dict = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , ) lowercase_ : List[str] = F''' {text}''' # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) lowercase_ : str = self.rust_tokenizer_class.from_pretrained( __SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__SCREAMING_SNAKE_CASE )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ) + 1, 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , ) lowercase_ : Any = self.rust_tokenizer_class.from_pretrained( __SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE ) lowercase_ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , ) lowercase_ : List[str] = self.rust_tokenizer_class.from_pretrained( __SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE ) lowercase_ : Dict = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
93
'''simple docstring''' import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() A =logging.get_logger(__name__) A ={ 'b0': efficientnet.EfficientNetBa, 'b1': efficientnet.EfficientNetBa, 'b2': efficientnet.EfficientNetBa, 'b3': efficientnet.EfficientNetBa, 'b4': efficientnet.EfficientNetBa, 'b5': efficientnet.EfficientNetBa, 'b6': efficientnet.EfficientNetBa, 'b7': efficientnet.EfficientNetBa, } A ={ 'b0': { 'hidden_dim': 12_80, 'width_coef': 1.0, 'depth_coef': 1.0, 'image_size': 2_24, 'dropout_rate': 0.2, 'dw_padding': [], }, 'b1': { 'hidden_dim': 12_80, 'width_coef': 1.0, 'depth_coef': 1.1, 'image_size': 2_40, 'dropout_rate': 0.2, 'dw_padding': [16], }, 'b2': { 'hidden_dim': 14_08, 'width_coef': 1.1, 'depth_coef': 1.2, 'image_size': 2_60, 'dropout_rate': 0.3, 'dw_padding': [5, 8, 16], }, 'b3': { 'hidden_dim': 15_36, 'width_coef': 1.2, 'depth_coef': 1.4, 'image_size': 3_00, 'dropout_rate': 0.3, 'dw_padding': [5, 18], }, 'b4': { 'hidden_dim': 17_92, 'width_coef': 1.4, 'depth_coef': 1.8, 'image_size': 3_80, 'dropout_rate': 0.4, 'dw_padding': [6], }, 'b5': { 'hidden_dim': 20_48, 'width_coef': 1.6, 'depth_coef': 2.2, 'image_size': 4_56, 'dropout_rate': 0.4, 'dw_padding': [13, 27], }, 'b6': { 'hidden_dim': 23_04, 'width_coef': 1.8, 'depth_coef': 2.6, 'image_size': 5_28, 'dropout_rate': 0.5, 'dw_padding': [31], }, 'b7': { 'hidden_dim': 25_60, 'width_coef': 2.0, 'depth_coef': 3.1, 'image_size': 6_00, 'dropout_rate': 0.5, 'dw_padding': [18], }, } def snake_case_ (_a : List[str] ): UpperCAmelCase = EfficientNetConfig() UpperCAmelCase = CONFIG_MAP[model_name]['''hidden_dim'''] UpperCAmelCase = CONFIG_MAP[model_name]['''width_coef'''] UpperCAmelCase = CONFIG_MAP[model_name]['''depth_coef'''] UpperCAmelCase = CONFIG_MAP[model_name]['''image_size'''] UpperCAmelCase = CONFIG_MAP[model_name]['''dropout_rate'''] UpperCAmelCase = CONFIG_MAP[model_name]['''dw_padding'''] UpperCAmelCase = '''huggingface/label-files''' UpperCAmelCase = '''imagenet-1k-id2label.json''' UpperCAmelCase = 1_0_0_0 UpperCAmelCase = json.load(open(hf_hub_download(_a , _a , repo_type='''dataset''' ) , '''r''' ) ) UpperCAmelCase = {int(_a ): v for k, v in idalabel.items()} UpperCAmelCase = idalabel UpperCAmelCase = {v: k for k, v in idalabel.items()} return config def snake_case_ (): UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg''' UpperCAmelCase = Image.open(requests.get(_a , stream=_a ).raw ) return im def snake_case_ (_a : str ): UpperCAmelCase = CONFIG_MAP[model_name]['''image_size'''] UpperCAmelCase = EfficientNetImageProcessor( size={'''height''': size, '''width''': size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.4785_3944, 0.473_2864, 0.4743_4163] , do_center_crop=_a , ) return preprocessor def snake_case_ (_a : Optional[Any] ): UpperCAmelCase = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )] UpperCAmelCase = sorted(set(_a ) ) UpperCAmelCase = len(_a ) UpperCAmelCase = {b: str(_a ) for b, i in zip(_a , range(_a ) )} UpperCAmelCase = [] rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') ) rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') ) rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') ) rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') ) rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') ) for b in block_names: UpperCAmelCase = block_name_mapping[b] rename_keys.append((F"block{b}_expand_conv/kernel:0", F"encoder.blocks.{hf_b}.expansion.expand_conv.weight") ) rename_keys.append((F"block{b}_expand_bn/gamma:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.weight") ) rename_keys.append((F"block{b}_expand_bn/beta:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.bias") ) rename_keys.append( (F"block{b}_expand_bn/moving_mean:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.running_mean") ) rename_keys.append( (F"block{b}_expand_bn/moving_variance:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.running_var") ) rename_keys.append( (F"block{b}_dwconv/depthwise_kernel:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight") ) rename_keys.append((F"block{b}_bn/gamma:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight") ) rename_keys.append((F"block{b}_bn/beta:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias") ) rename_keys.append( (F"block{b}_bn/moving_mean:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean") ) rename_keys.append( (F"block{b}_bn/moving_variance:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var") ) rename_keys.append((F"block{b}_se_reduce/kernel:0", F"encoder.blocks.{hf_b}.squeeze_excite.reduce.weight") ) rename_keys.append((F"block{b}_se_reduce/bias:0", F"encoder.blocks.{hf_b}.squeeze_excite.reduce.bias") ) rename_keys.append((F"block{b}_se_expand/kernel:0", F"encoder.blocks.{hf_b}.squeeze_excite.expand.weight") ) rename_keys.append((F"block{b}_se_expand/bias:0", F"encoder.blocks.{hf_b}.squeeze_excite.expand.bias") ) rename_keys.append( (F"block{b}_project_conv/kernel:0", F"encoder.blocks.{hf_b}.projection.project_conv.weight") ) rename_keys.append((F"block{b}_project_bn/gamma:0", F"encoder.blocks.{hf_b}.projection.project_bn.weight") ) rename_keys.append((F"block{b}_project_bn/beta:0", F"encoder.blocks.{hf_b}.projection.project_bn.bias") ) rename_keys.append( (F"block{b}_project_bn/moving_mean:0", F"encoder.blocks.{hf_b}.projection.project_bn.running_mean") ) rename_keys.append( (F"block{b}_project_bn/moving_variance:0", F"encoder.blocks.{hf_b}.projection.project_bn.running_var") ) rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') ) rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') ) rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') ) rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') ) rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') ) UpperCAmelCase = {} for item in rename_keys: if item[0] in original_param_names: UpperCAmelCase = '''efficientnet.''' + item[1] UpperCAmelCase = '''classifier.weight''' UpperCAmelCase = '''classifier.bias''' return key_mapping def snake_case_ (_a : Dict , _a : List[str] , _a : Dict ): for key, value in tf_params.items(): if "normalization" in key: continue UpperCAmelCase = key_mapping[key] if "_conv" in key and "kernel" in key: UpperCAmelCase = torch.from_numpy(_a ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: UpperCAmelCase = torch.from_numpy(_a ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: UpperCAmelCase = torch.from_numpy(np.transpose(_a ) ) else: UpperCAmelCase = torch.from_numpy(_a ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(_a ) @torch.no_grad() def snake_case_ (_a : Optional[Any] , _a : List[str] , _a : Optional[int] , _a : Dict ): UpperCAmelCase = model_classes[model_name]( include_top=_a , weights='''imagenet''' , input_tensor=_a , input_shape=_a , pooling=_a , classes=1_0_0_0 , classifier_activation='''softmax''' , ) UpperCAmelCase = original_model.trainable_variables UpperCAmelCase = original_model.non_trainable_variables UpperCAmelCase = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: UpperCAmelCase = param.numpy() UpperCAmelCase = list(tf_params.keys() ) # Load HuggingFace model UpperCAmelCase = get_efficientnet_config(_a ) UpperCAmelCase = EfficientNetForImageClassification(_a ).eval() UpperCAmelCase = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print('''Converting parameters...''' ) UpperCAmelCase = rename_keys(_a ) replace_params(_a , _a , _a ) # Initialize preprocessor and preprocess input image UpperCAmelCase = convert_image_processor(_a ) UpperCAmelCase = preprocessor(images=prepare_img() , return_tensors='''pt''' ) # HF model inference hf_model.eval() with torch.no_grad(): UpperCAmelCase = hf_model(**_a ) UpperCAmelCase = outputs.logits.detach().numpy() # Original model inference UpperCAmelCase = False UpperCAmelCase = CONFIG_MAP[model_name]['''image_size'''] UpperCAmelCase = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) UpperCAmelCase = image.img_to_array(_a ) UpperCAmelCase = np.expand_dims(_a , axis=0 ) UpperCAmelCase = original_model.predict(_a ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(_a , _a , atol=1E-3 ), "The predicted logits are not the same." print('''Model outputs match!''' ) if save_model: # Create folder to save model if not os.path.isdir(_a ): os.mkdir(_a ) # Save converted model and image processor hf_model.save_pretrained(_a ) preprocessor.save_pretrained(_a ) if push_to_hub: # Push model and image processor to hub print(F"Pushing converted {model_name} to the hub..." ) UpperCAmelCase = F"efficientnet-{model_name}" preprocessor.push_to_hub(_a ) hf_model.push_to_hub(_a ) if __name__ == "__main__": A =argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='b0', type=str, help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].', ) parser.add_argument( '--pytorch_dump_folder_path', default='hf_model', type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument('--save_model', action='store_true', help='Save model to local') parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub') A =parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
34
0
import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class _snake_case ( _snake_case , unittest.TestCase ): SCREAMING_SNAKE_CASE__ = DiTPipeline SCREAMING_SNAKE_CASE__ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS SCREAMING_SNAKE_CASE__ = PipelineTesterMixin.required_optional_params - { 'latents', 'num_images_per_prompt', 'callback', 'callback_steps', } SCREAMING_SNAKE_CASE__ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS SCREAMING_SNAKE_CASE__ = False def SCREAMING_SNAKE_CASE__ ( self ): torch.manual_seed(0 ) a :Union[str, Any] = TransformeraDModel( sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_lowerCamelCase , activation_fn='''gelu-approximate''' , num_embeds_ada_norm=1000 , norm_type='''ada_norm_zero''' , norm_elementwise_affine=_lowerCamelCase , ) a :Optional[int] = AutoencoderKL() a :str = DDIMScheduler() a :List[str] = {'''transformer''': transformer.eval(), '''vae''': vae.eval(), '''scheduler''': scheduler} return components def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase=0 ): if str(_lowerCamelCase ).startswith('''mps''' ): a :str = torch.manual_seed(_lowerCamelCase ) else: a :Union[str, Any] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase ) a :Optional[Any] = { '''class_labels''': [1], '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs def SCREAMING_SNAKE_CASE__ ( self ): a :Optional[Any] = '''cpu''' a :int = self.get_dummy_components() a :Union[str, Any] = self.pipeline_class(**_lowerCamelCase ) pipe.to(_lowerCamelCase ) pipe.set_progress_bar_config(disable=_lowerCamelCase ) a :Tuple = self.get_dummy_inputs(_lowerCamelCase ) a :Optional[Any] = pipe(**_lowerCamelCase ).images a :int = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 16, 16, 3) ) a :Optional[Any] = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] ) a :Optional[Any] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(_lowerCamelCase , 1e-3 ) def SCREAMING_SNAKE_CASE__ ( self ): self._test_inference_batch_single_identical(relax_max_difference=_lowerCamelCase , expected_max_diff=1e-3 ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def SCREAMING_SNAKE_CASE__ ( self ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) @require_torch_gpu @slow class _snake_case ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ ( self ): super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE__ ( self ): a :Any = torch.manual_seed(0 ) a :str = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-256''' ) pipe.to('''cuda''' ) a :str = ['''vase''', '''umbrella''', '''white shark''', '''white wolf'''] a :Any = pipe.get_label_ids(_lowerCamelCase ) a :Tuple = pipe(_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=40 , output_type='''np''' ).images for word, image in zip(_lowerCamelCase , _lowerCamelCase ): a :Optional[Any] = load_numpy( F'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' ) assert np.abs((expected_image - image).max() ) < 1e-2 def SCREAMING_SNAKE_CASE__ ( self ): a :Dict = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-512''' ) a :Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to('''cuda''' ) a :Optional[int] = ['''vase''', '''umbrella'''] a :int = pipe.get_label_ids(_lowerCamelCase ) a :Union[str, Any] = torch.manual_seed(0 ) a :List[str] = pipe(_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=25 , output_type='''np''' ).images for word, image in zip(_lowerCamelCase , _lowerCamelCase ): a :List[str] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' F'''/dit/{word}_512.npy''' ) assert np.abs((expected_image - image).max() ) < 1e-1
94
'''simple docstring''' from datetime import datetime import requests from bsa import BeautifulSoup if __name__ == "__main__": A =input('Enter image url: ').strip() print(f"""Downloading image from {url} ...""") A =BeautifulSoup(requests.get(url).content, 'html.parser') # The image URL is in the content field of the first meta tag with property og:image A =soup.find('meta', {'property': 'og:image'})['content'] A =requests.get(image_url).content A =f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg""" with open(file_name, 'wb') as fp: fp.write(image_data) print(f"""Done. Image saved to disk as {file_name}.""")
34
0
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase : str = logging.get_logger(__name__) UpperCAmelCase : str = { # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class __lowerCAmelCase ( UpperCamelCase__): _lowercase : List[Any] = """megatron-bert""" def __init__( self , lowerCAmelCase__=2_9_0_5_6 , lowerCAmelCase__=1_0_2_4 , lowerCAmelCase__=2_4 , lowerCAmelCase__=1_6 , lowerCAmelCase__=4_0_9_6 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=0 , lowerCAmelCase__="absolute" , lowerCAmelCase__=True , **lowerCAmelCase__ , ) -> int: '''simple docstring''' super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ ) a__ : Optional[int] =vocab_size a__ : Any =hidden_size a__ : List[Any] =num_hidden_layers a__ : Optional[int] =num_attention_heads a__ : List[str] =hidden_act a__ : Union[str, Any] =intermediate_size a__ : Optional[int] =hidden_dropout_prob a__ : Tuple =attention_probs_dropout_prob a__ : List[str] =max_position_embeddings a__ : Optional[int] =type_vocab_size a__ : int =initializer_range a__ : Any =layer_norm_eps a__ : List[Any] =position_embedding_type a__ : Dict =use_cache
95
'''simple docstring''' from math import acos, sin from typing import List, Tuple, Union import numpy as np import torch from PIL import Image from ...models import AutoencoderKL, UNetaDConditionModel from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel class _a ( __a ): __a : str = ["""vqvae"""] def __init__( self : str , lowercase : AutoencoderKL , lowercase : UNetaDConditionModel , lowercase : Mel , lowercase : Union[DDIMScheduler, DDPMScheduler] , ): '''simple docstring''' super().__init__() self.register_modules(unet=lowercase , scheduler=lowercase , mel=lowercase , vqvae=lowercase ) def A ( self : Optional[Any] ): '''simple docstring''' return 50 if isinstance(self.scheduler , lowercase ) else 1_000 @torch.no_grad() def __call__( self : Optional[Any] , lowercase : int = 1 , lowercase : str = None , lowercase : np.ndarray = None , lowercase : int = 0 , lowercase : int = 0 , lowercase : int = None , lowercase : torch.Generator = None , lowercase : float = 0 , lowercase : float = 0 , lowercase : torch.Generator = None , lowercase : float = 0 , lowercase : torch.Tensor = None , lowercase : torch.Tensor = None , lowercase : Tuple=True , ): '''simple docstring''' UpperCAmelCase = steps or self.get_default_steps() self.scheduler.set_timesteps(lowercase ) UpperCAmelCase = step_generator or generator # For backwards compatibility if type(self.unet.config.sample_size ) == int: UpperCAmelCase = (self.unet.config.sample_size, self.unet.config.sample_size) if noise is None: UpperCAmelCase = randn_tensor( ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size[0], self.unet.config.sample_size[1], ) , generator=lowercase , device=self.device , ) UpperCAmelCase = noise UpperCAmelCase = None if audio_file is not None or raw_audio is not None: self.mel.load_audio(lowercase , lowercase ) UpperCAmelCase = self.mel.audio_slice_to_image(lowercase ) UpperCAmelCase = np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape( (input_image.height, input_image.width) ) UpperCAmelCase = (input_image / 255) * 2 - 1 UpperCAmelCase = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device ) if self.vqvae is not None: UpperCAmelCase = self.vqvae.encode(torch.unsqueeze(lowercase , 0 ) ).latent_dist.sample( generator=lowercase )[0] UpperCAmelCase = self.vqvae.config.scaling_factor * input_images if start_step > 0: UpperCAmelCase = self.scheduler.add_noise(lowercase , lowercase , self.scheduler.timesteps[start_step - 1] ) UpperCAmelCase = ( self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length ) UpperCAmelCase = int(mask_start_secs * pixels_per_second ) UpperCAmelCase = int(mask_end_secs * pixels_per_second ) UpperCAmelCase = self.scheduler.add_noise(lowercase , lowercase , torch.tensor(self.scheduler.timesteps[start_step:] ) ) for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ): if isinstance(self.unet , lowercase ): UpperCAmelCase = self.unet(lowercase , lowercase , lowercase )['''sample'''] else: UpperCAmelCase = self.unet(lowercase , lowercase )['''sample'''] if isinstance(self.scheduler , lowercase ): UpperCAmelCase = self.scheduler.step( model_output=lowercase , timestep=lowercase , sample=lowercase , eta=lowercase , generator=lowercase , )['''prev_sample'''] else: UpperCAmelCase = self.scheduler.step( model_output=lowercase , timestep=lowercase , sample=lowercase , generator=lowercase , )['''prev_sample'''] if mask is not None: if mask_start > 0: UpperCAmelCase = mask[:, step, :, :mask_start] if mask_end > 0: UpperCAmelCase = mask[:, step, :, -mask_end:] if self.vqvae is not None: # 0.18215 was scaling factor used in training to ensure unit variance UpperCAmelCase = 1 / self.vqvae.config.scaling_factor * images UpperCAmelCase = self.vqvae.decode(lowercase )['''sample'''] UpperCAmelCase = (images / 2 + 0.5).clamp(0 , 1 ) UpperCAmelCase = images.cpu().permute(0 , 2 , 3 , 1 ).numpy() UpperCAmelCase = (images * 255).round().astype('''uint8''' ) UpperCAmelCase = list( (Image.fromarray(_[:, :, 0] ) for _ in images) if images.shape[3] == 1 else (Image.fromarray(lowercase , mode='''RGB''' ).convert('''L''' ) for _ in images) ) UpperCAmelCase = [self.mel.image_to_audio(lowercase ) for _ in images] if not return_dict: return images, (self.mel.get_sample_rate(), audios) return BaseOutput(**AudioPipelineOutput(np.array(lowercase )[:, np.newaxis, :] ) , **ImagePipelineOutput(lowercase ) ) @torch.no_grad() def A ( self : Dict , lowercase : List[Image.Image] , lowercase : int = 50 ): '''simple docstring''' assert isinstance(self.scheduler , lowercase ) self.scheduler.set_timesteps(lowercase ) UpperCAmelCase = np.array( [np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] ) UpperCAmelCase = (sample / 255) * 2 - 1 UpperCAmelCase = torch.Tensor(lowercase ).to(self.device ) for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ): UpperCAmelCase = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps UpperCAmelCase = self.scheduler.alphas_cumprod[t] UpperCAmelCase = ( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) UpperCAmelCase = 1 - alpha_prod_t UpperCAmelCase = self.unet(lowercase , lowercase )['''sample'''] UpperCAmelCase = (1 - alpha_prod_t_prev) ** 0.5 * model_output UpperCAmelCase = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) UpperCAmelCase = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output return sample @staticmethod def A ( lowercase : torch.Tensor , lowercase : torch.Tensor , lowercase : float ): '''simple docstring''' UpperCAmelCase = acos(torch.dot(torch.flatten(lowercase ) , torch.flatten(lowercase ) ) / torch.norm(lowercase ) / torch.norm(lowercase ) ) return sin((1 - alpha) * theta ) * xa / sin(lowercase ) + sin(alpha * theta ) * xa / sin(lowercase )
34
0
"""simple docstring""" from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import RegNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCAmelCase__ : '''simple docstring''' def __init__( self , lowercase , lowercase=3 , lowercase=32 , lowercase=3 , lowercase=10 , lowercase=[10, 20, 30, 40] , lowercase=[1, 1, 2, 1] , lowercase=True , lowercase=True , lowercase="relu" , lowercase=3 , lowercase=None , ): _lowerCamelCase : List[str] = parent _lowerCamelCase : Dict = batch_size _lowerCamelCase : Optional[Any] = image_size _lowerCamelCase : int = num_channels _lowerCamelCase : int = embeddings_size _lowerCamelCase : Optional[Any] = hidden_sizes _lowerCamelCase : Tuple = depths _lowerCamelCase : Optional[Any] = is_training _lowerCamelCase : Optional[Any] = use_labels _lowerCamelCase : Tuple = hidden_act _lowerCamelCase : Tuple = num_labels _lowerCamelCase : List[str] = scope _lowerCamelCase : Union[str, Any] = len(lowercase ) def A_ ( self ): _lowerCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowerCamelCase : Dict = None if self.use_labels: _lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels ) _lowerCamelCase : Any = self.get_config() return config, pixel_values, labels def A_ ( self ): return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def A_ ( self , lowercase , lowercase , lowercase ): _lowerCamelCase : Optional[int] = TFRegNetModel(config=lowercase ) _lowerCamelCase : Any = model(lowercase , training=lowercase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def A_ ( self , lowercase , lowercase , lowercase ): _lowerCamelCase : str = self.num_labels _lowerCamelCase : int = TFRegNetForImageClassification(lowercase ) _lowerCamelCase : List[str] = model(lowercase , labels=lowercase , training=lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A_ ( self ): _lowerCamelCase : Any = self.prepare_config_and_inputs() _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : str = config_and_inputs _lowerCamelCase : Union[str, Any] = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class lowerCAmelCase__ ( lowercase, lowercase, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else () lowerCamelCase__ = ( {"""feature-extraction""": TFRegNetModel, """image-classification""": TFRegNetForImageClassification} if is_tf_available() else {} ) lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False def A_ ( self ): _lowerCamelCase : Union[str, Any] = TFRegNetModelTester(self ) _lowerCamelCase : Union[str, Any] = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase ) def A_ ( self ): return @unittest.skip(reason='RegNet does not use inputs_embeds' ) def A_ ( self ): pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , ) @slow def A_ ( self ): super().test_keras_fit() @unittest.skip(reason='RegNet does not support input and output embeddings' ) def A_ ( self ): pass def A_ ( self ): _lowerCamelCase, _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : Union[str, Any] = model_class(lowercase ) _lowerCamelCase : Optional[int] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCamelCase : Dict = [*signature.parameters.keys()] _lowerCamelCase : int = ['pixel_values'] self.assertListEqual(arg_names[:1] , lowercase ) def A_ ( self ): _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase ) def A_ ( self ): def check_hidden_states_output(lowercase , lowercase , lowercase ): _lowerCamelCase : Optional[Any] = model_class(lowercase ) _lowerCamelCase : List[Any] = model(**self._prepare_for_class(lowercase , lowercase ) , training=lowercase ) _lowerCamelCase : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _lowerCamelCase : str = self.model_tester.num_stages self.assertEqual(len(lowercase ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) _lowerCamelCase, _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() _lowerCamelCase : Any = ['basic', 'bottleneck'] for model_class in self.all_model_classes: for layer_type in layers_type: _lowerCamelCase : Tuple = layer_type _lowerCamelCase : int = True check_hidden_states_output(lowercase , lowercase , lowercase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowerCamelCase : str = True check_hidden_states_output(lowercase , lowercase , lowercase ) def A_ ( self ): _lowerCamelCase, _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(lowercase , lowercase , lowercase , lowercase={} ): _lowerCamelCase : Optional[Any] = model(lowercase , return_dict=lowercase , **lowercase ) _lowerCamelCase : int = model(lowercase , return_dict=lowercase , **lowercase ).to_tuple() def recursive_check(lowercase , lowercase ): if isinstance(lowercase , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(lowercase , lowercase ): recursive_check(lowercase , lowercase ) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(lowercase , lowercase ) ) , msg=( 'Tuple and dict output are not equal. Difference:' F''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}''' ) , ) recursive_check(lowercase , lowercase ) for model_class in self.all_model_classes: _lowerCamelCase : Dict = model_class(lowercase ) _lowerCamelCase : Any = self._prepare_for_class(lowercase , lowercase ) _lowerCamelCase : List[Any] = self._prepare_for_class(lowercase , lowercase ) check_equivalence(lowercase , lowercase , lowercase ) _lowerCamelCase : Any = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase ) _lowerCamelCase : int = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase ) check_equivalence(lowercase , lowercase , lowercase ) _lowerCamelCase : int = self._prepare_for_class(lowercase , lowercase ) _lowerCamelCase : Union[str, Any] = self._prepare_for_class(lowercase , lowercase ) check_equivalence(lowercase , lowercase , lowercase , {'output_hidden_states': True} ) _lowerCamelCase : Optional[Any] = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase ) _lowerCamelCase : List[str] = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase ) check_equivalence(lowercase , lowercase , lowercase , {'output_hidden_states': True} ) def A_ ( self ): _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowercase ) @slow def A_ ( self ): for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCamelCase : Dict = TFRegNetModel.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) def _snake_case ( ): _lowerCamelCase : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @cached_property def A_ ( self ): return ( AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def A_ ( self ): _lowerCamelCase : Optional[Any] = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) _lowerCamelCase : Optional[int] = self.default_image_processor _lowerCamelCase : Optional[int] = prepare_img() _lowerCamelCase : Optional[Any] = image_processor(images=lowercase , return_tensors='tf' ) # forward pass _lowerCamelCase : Dict = model(**lowercase , training=lowercase ) # verify the logits _lowerCamelCase : Any = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , lowercase ) _lowerCamelCase : str = tf.constant([-0.41_80, -1.50_51, -3.48_36] ) tf.debugging.assert_near(outputs.logits[0, :3] , lowercase , atol=1E-4 )
96
'''simple docstring''' from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal A =logging.get_logger(__name__) A =TypeVar('DatasetType', Dataset, IterableDataset) def snake_case_ (_a : List[DatasetType] , _a : Optional[List[float]] = None , _a : Optional[int] = None , _a : Optional[DatasetInfo] = None , _a : Optional[NamedSplit] = None , _a : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ): from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError('''Unable to interleave an empty list of datasets.''' ) for i, dataset in enumerate(_a ): if not isinstance(_a , (Dataset, IterableDataset) ): if isinstance(_a , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} " '''is an empty dataset dictionary.''' ) raise ValueError( F"Dataset at position {i} has at least one split: {list(_a )}\n" F"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_a ) )}']" ) raise ValueError( F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_a ).__name__}." ) if i == 0: UpperCAmelCase , UpperCAmelCase = ( (Dataset, IterableDataset) if isinstance(_a , _a ) else (IterableDataset, Dataset) ) elif not isinstance(_a , _a ): raise ValueError( F"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(F"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." ) if dataset_type is Dataset: return _interleave_map_style_datasets( _a , _a , _a , info=_a , split=_a , stopping_strategy=_a ) else: return _interleave_iterable_datasets( _a , _a , _a , info=_a , split=_a , stopping_strategy=_a ) def snake_case_ (_a : List[DatasetType] , _a : Optional[DatasetInfo] = None , _a : Optional[NamedSplit] = None , _a : int = 0 , ): if not dsets: raise ValueError('''Unable to concatenate an empty list of datasets.''' ) for i, dataset in enumerate(_a ): if not isinstance(_a , (Dataset, IterableDataset) ): if isinstance(_a , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} " '''is an empty dataset dictionary.''' ) raise ValueError( F"Dataset at position {i} has at least one split: {list(_a )}\n" F"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_a ) )}']" ) raise ValueError( F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_a ).__name__}." ) if i == 0: UpperCAmelCase , UpperCAmelCase = ( (Dataset, IterableDataset) if isinstance(_a , _a ) else (IterableDataset, Dataset) ) elif not isinstance(_a , _a ): raise ValueError( F"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." ) if dataset_type is Dataset: return _concatenate_map_style_datasets(_a , info=_a , split=_a , axis=_a ) else: return _concatenate_iterable_datasets(_a , info=_a , split=_a , axis=_a )
34
0
'''simple docstring''' from transformers import BertTokenizerFast from .custom_tokenization import CustomTokenizer class lowercase ( A__ ): """simple docstring""" _a = CustomTokenizer pass
97
'''simple docstring''' from argparse import ArgumentParser from datasets.commands.convert import ConvertCommand from datasets.commands.dummy_data import DummyDataCommand from datasets.commands.env import EnvironmentCommand from datasets.commands.run_beam import RunBeamCommand from datasets.commands.test import TestCommand from datasets.utils.logging import set_verbosity_info def snake_case_ (_a : Tuple ): return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )} def snake_case_ (): UpperCAmelCase = ArgumentParser( '''HuggingFace Datasets CLI tool''' , usage='''datasets-cli <command> [<args>]''' , allow_abbrev=_a ) UpperCAmelCase = parser.add_subparsers(help='''datasets-cli command helpers''' ) set_verbosity_info() # Register commands ConvertCommand.register_subcommand(_a ) EnvironmentCommand.register_subcommand(_a ) TestCommand.register_subcommand(_a ) RunBeamCommand.register_subcommand(_a ) DummyDataCommand.register_subcommand(_a ) # Parse args UpperCAmelCase , UpperCAmelCase = parser.parse_known_args() if not hasattr(_a , '''func''' ): parser.print_help() exit(1 ) UpperCAmelCase = parse_unknown_args(_a ) # Run UpperCAmelCase = args.func(_a , **_a ) service.run() if __name__ == "__main__": main()
34
0
"""simple docstring""" import argparse import shutil from pathlib import Path from tqdm import tqdm from transformers import AutoTokenizer def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=1_0_2_4 ): UpperCAmelCase__ , UpperCAmelCase__ = [], [] UpperCAmelCase__ = list(zip(lowerCamelCase , lowerCamelCase ) ) UpperCAmelCase__ , UpperCAmelCase__ = sorted_examples[0] def is_too_big(lowerCamelCase ): return tok(lowerCamelCase , return_tensors='pt' ).input_ids.shape[1] > max_tokens for src, tgt in tqdm(sorted_examples[1:] ): UpperCAmelCase__ = new_src + ' ' + src UpperCAmelCase__ = new_tgt + ' ' + tgt if is_too_big(lowerCamelCase ) or is_too_big(lowerCamelCase ): # cant fit, finalize example finished_src.append(lowerCamelCase ) finished_tgt.append(lowerCamelCase ) UpperCAmelCase__ , UpperCAmelCase__ = src, tgt else: # can fit, keep adding UpperCAmelCase__ , UpperCAmelCase__ = cand_src, cand_tgt # cleanup if new_src: assert new_tgt finished_src.append(lowerCamelCase ) finished_tgt.append(lowerCamelCase ) return finished_src, finished_tgt def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ): UpperCAmelCase__ = Path(lowerCamelCase ) save_path.mkdir(exist_ok=lowerCamelCase ) for split in ["train"]: UpperCAmelCase__ , UpperCAmelCase__ = data_dir / f'''{split}.source''', data_dir / f'''{split}.target''' UpperCAmelCase__ = [x.rstrip() for x in Path(lowerCamelCase ).open().readlines()] UpperCAmelCase__ = [x.rstrip() for x in Path(lowerCamelCase ).open().readlines()] UpperCAmelCase__ , UpperCAmelCase__ = pack_examples(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) print(f'''packed {split} split from {len(lowerCamelCase )} examples -> {len(lowerCamelCase )}.''' ) Path(save_path / f'''{split}.source''' ).open('w' ).write('\n'.join(lowerCamelCase ) ) Path(save_path / f'''{split}.target''' ).open('w' ).write('\n'.join(lowerCamelCase ) ) for split in ["val", "test"]: UpperCAmelCase__ , UpperCAmelCase__ = data_dir / f'''{split}.source''', data_dir / f'''{split}.target''' shutil.copyfile(lowerCamelCase , save_path / f'''{split}.source''' ) shutil.copyfile(lowerCamelCase , save_path / f'''{split}.target''' ) def a_ ( ): UpperCAmelCase__ = argparse.ArgumentParser() parser.add_argument('--tok_name' , type=lowerCamelCase , help='like facebook/bart-large-cnn,t5-base, etc.' ) parser.add_argument('--max_seq_len' , type=lowerCamelCase , default=1_2_8 ) parser.add_argument('--data_dir' , type=lowerCamelCase ) parser.add_argument('--save_path' , type=lowerCamelCase ) UpperCAmelCase__ = parser.parse_args() UpperCAmelCase__ = AutoTokenizer.from_pretrained(args.tok_name ) return pack_data_dir(lowerCamelCase , Path(args.data_dir ) , args.max_seq_len , args.save_path ) if __name__ == "__main__": packer_cli()
98
'''simple docstring''' import argparse import json import logging import os import sys from unittest.mock import patch from transformers.testing_utils import TestCasePlus, get_gpu_count, slow A =[ os.path.join(os.path.dirname(__file__), dirname) for dirname in [ 'text-classification', 'language-modeling', 'summarization', 'token-classification', 'question-answering', ] ] sys.path.extend(SRC_DIRS) if SRC_DIRS is not None: import run_clm_flax import run_flax_glue import run_flax_ner import run_mlm_flax import run_qa import run_summarization_flax import run_ta_mlm_flax logging.basicConfig(level=logging.DEBUG) A =logging.getLogger() def snake_case_ (): UpperCAmelCase = argparse.ArgumentParser() parser.add_argument('''-f''' ) UpperCAmelCase = parser.parse_args() return args.f def snake_case_ (_a : List[str] , _a : Union[str, Any]="eval" ): UpperCAmelCase = os.path.join(_a , F"{split}_results.json" ) if os.path.exists(_a ): with open(_a , '''r''' ) as f: return json.load(_a ) raise ValueError(F"can't find {path}" ) A =logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class _a ( __a ): def A ( self : Any ): '''simple docstring''' UpperCAmelCase = self.get_auto_remove_tmp_dir() UpperCAmelCase = f"\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split() with patch.object(lowercase , '''argv''' , lowercase ): run_flax_glue.main() UpperCAmelCase = get_results(lowercase ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) @slow def A ( self : Any ): '''simple docstring''' UpperCAmelCase = self.get_auto_remove_tmp_dir() UpperCAmelCase = f"\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split() with patch.object(lowercase , '''argv''' , lowercase ): run_clm_flax.main() UpperCAmelCase = get_results(lowercase ) self.assertLess(result['''eval_perplexity'''] , 100 ) @slow def A ( self : str ): '''simple docstring''' UpperCAmelCase = self.get_auto_remove_tmp_dir() UpperCAmelCase = f"\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n ".split() with patch.object(lowercase , '''argv''' , lowercase ): run_summarization_flax.main() UpperCAmelCase = get_results(lowercase , split='''test''' ) self.assertGreaterEqual(result['''test_rouge1'''] , 10 ) self.assertGreaterEqual(result['''test_rouge2'''] , 2 ) self.assertGreaterEqual(result['''test_rougeL'''] , 7 ) self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 ) @slow def A ( self : int ): '''simple docstring''' UpperCAmelCase = self.get_auto_remove_tmp_dir() UpperCAmelCase = f"\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n ".split() with patch.object(lowercase , '''argv''' , lowercase ): run_mlm_flax.main() UpperCAmelCase = get_results(lowercase ) self.assertLess(result['''eval_perplexity'''] , 42 ) @slow def A ( self : Tuple ): '''simple docstring''' UpperCAmelCase = self.get_auto_remove_tmp_dir() UpperCAmelCase = f"\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split() with patch.object(lowercase , '''argv''' , lowercase ): run_ta_mlm_flax.main() UpperCAmelCase = get_results(lowercase ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 ) @slow def A ( self : Tuple ): '''simple docstring''' UpperCAmelCase = 7 if get_gpu_count() > 1 else 2 UpperCAmelCase = self.get_auto_remove_tmp_dir() UpperCAmelCase = f"\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n ".split() with patch.object(lowercase , '''argv''' , lowercase ): run_flax_ner.main() UpperCAmelCase = get_results(lowercase ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) self.assertGreaterEqual(result['''eval_f1'''] , 0.3 ) @slow def A ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase = self.get_auto_remove_tmp_dir() UpperCAmelCase = f"\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n ".split() with patch.object(lowercase , '''argv''' , lowercase ): run_qa.main() UpperCAmelCase = get_results(lowercase ) self.assertGreaterEqual(result['''eval_f1'''] , 30 ) self.assertGreaterEqual(result['''eval_exact'''] , 30 )
34
0
def A_ ( A__ , A__ , A__ ) -> float: if principal <= 0: raise Exception('Principal borrowed must be > 0' ) if rate_per_annum < 0: raise Exception('Rate of interest must be >= 0' ) if years_to_repay <= 0 or not isinstance(A__ , A__ ): raise Exception('Years to repay must be an integer > 0' ) # Yearly rate is divided by 12 to get monthly rate a__ : Union[str, Any] = rate_per_annum / 12 # Years to repay is multiplied by 12 to get number of payments as payment is monthly a__ : Optional[int] = years_to_repay * 12 return ( principal * rate_per_month * (1 + rate_per_month) ** number_of_payments / ((1 + rate_per_month) ** number_of_payments - 1) ) if __name__ == "__main__": import doctest doctest.testmod()
99
'''simple docstring''' class _a : def __init__( self : Any ): '''simple docstring''' UpperCAmelCase = {} # Mapping from char to TrieNode UpperCAmelCase = False def A ( self : int , lowercase : list[str] ): '''simple docstring''' for word in words: self.insert(lowercase ) def A ( self : Optional[int] , lowercase : str ): '''simple docstring''' UpperCAmelCase = self for char in word: if char not in curr.nodes: UpperCAmelCase = TrieNode() UpperCAmelCase = curr.nodes[char] UpperCAmelCase = True def A ( self : Optional[int] , lowercase : str ): '''simple docstring''' UpperCAmelCase = self for char in word: if char not in curr.nodes: return False UpperCAmelCase = curr.nodes[char] return curr.is_leaf def A ( self : str , lowercase : str ): '''simple docstring''' def _delete(lowercase : TrieNode , lowercase : str , lowercase : int ) -> bool: if index == len(lowercase ): # If word does not exist if not curr.is_leaf: return False UpperCAmelCase = False return len(curr.nodes ) == 0 UpperCAmelCase = word[index] UpperCAmelCase = curr.nodes.get(lowercase ) # If char not in current trie node if not char_node: return False # Flag to check if node can be deleted UpperCAmelCase = _delete(lowercase , lowercase , index + 1 ) if delete_curr: del curr.nodes[char] return len(curr.nodes ) == 0 return delete_curr _delete(self , lowercase , 0 ) def snake_case_ (_a : TrieNode , _a : str ): if node.is_leaf: print(_a , end=''' ''' ) for key, value in node.nodes.items(): print_words(_a , word + key ) def snake_case_ (): UpperCAmelCase = '''banana bananas bandana band apple all beast'''.split() UpperCAmelCase = TrieNode() root.insert_many(_a ) # print_words(root, "") assert all(root.find(_a ) for word in words ) assert root.find('''banana''' ) assert not root.find('''bandanas''' ) assert not root.find('''apps''' ) assert root.find('''apple''' ) assert root.find('''all''' ) root.delete('''all''' ) assert not root.find('''all''' ) root.delete('''banana''' ) assert not root.find('''banana''' ) assert root.find('''bananas''' ) return True def snake_case_ (_a : str , _a : bool ): print(str(_a ) , '''works!''' if passes else '''doesn\'t work :(''' ) def snake_case_ (): assert test_trie() def snake_case_ (): print_results('''Testing trie functionality''' , test_trie() ) if __name__ == "__main__": main()
34
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = { "facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json", "facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json", # See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl } class SCREAMING_SNAKE_CASE_ ( __a ): """simple docstring""" __lowercase : Dict = '''xlm-roberta-xl''' def __init__( self , lowerCAmelCase__=2_5_0_8_8_0 , lowerCAmelCase__=2_5_6_0 , lowerCAmelCase__=3_6 , lowerCAmelCase__=3_2 , lowerCAmelCase__=1_0_2_4_0 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_1_4 , lowerCAmelCase__=1 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-05 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , lowerCAmelCase__="absolute" , lowerCAmelCase__=True , lowerCAmelCase__=None , **lowerCAmelCase__ , ): super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__) __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = layer_norm_eps __SCREAMING_SNAKE_CASE = position_embedding_type __SCREAMING_SNAKE_CASE = use_cache __SCREAMING_SNAKE_CASE = classifier_dropout class SCREAMING_SNAKE_CASE_ ( __a ): """simple docstring""" @property def snake_case_ ( self): if self.task == "multiple-choice": __SCREAMING_SNAKE_CASE = {0: """batch""", 1: """choice""", 2: """sequence"""} else: __SCREAMING_SNAKE_CASE = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ])
100
'''simple docstring''' import math from typing import Optional import numpy as np from ...configuration_utils import PretrainedConfig from ...utils import logging A =logging.get_logger(__name__) A ={ 'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json', 'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json', } class _a ( __a ): __a : Union[str, Any] = """encodec""" def __init__( self : Tuple , lowercase : List[str]=[1.5, 3.0, 6.0, 12.0, 24.0] , lowercase : Any=24_000 , lowercase : str=1 , lowercase : Optional[int]=False , lowercase : Optional[Any]=None , lowercase : str=None , lowercase : Tuple=128 , lowercase : Union[str, Any]=32 , lowercase : Union[str, Any]=1 , lowercase : Optional[Any]=[8, 5, 4, 2] , lowercase : Any="weight_norm" , lowercase : Tuple=7 , lowercase : int=7 , lowercase : Dict=3 , lowercase : List[Any]=2 , lowercase : str=True , lowercase : List[str]="reflect" , lowercase : List[Any]=2 , lowercase : Optional[Any]=2 , lowercase : int=1.0 , lowercase : Dict=1_024 , lowercase : str=None , lowercase : Union[str, Any]=True , **lowercase : Optional[int] , ): '''simple docstring''' UpperCAmelCase = target_bandwidths UpperCAmelCase = sampling_rate UpperCAmelCase = audio_channels UpperCAmelCase = normalize UpperCAmelCase = chunk_length_s UpperCAmelCase = overlap UpperCAmelCase = hidden_size UpperCAmelCase = num_filters UpperCAmelCase = num_residual_layers UpperCAmelCase = upsampling_ratios UpperCAmelCase = norm_type UpperCAmelCase = kernel_size UpperCAmelCase = last_kernel_size UpperCAmelCase = residual_kernel_size UpperCAmelCase = dilation_growth_rate UpperCAmelCase = use_causal_conv UpperCAmelCase = pad_mode UpperCAmelCase = compress UpperCAmelCase = num_lstm_layers UpperCAmelCase = trim_right_ratio UpperCAmelCase = codebook_size UpperCAmelCase = codebook_dim if codebook_dim is not None else hidden_size UpperCAmelCase = use_conv_shortcut if self.norm_type not in ["weight_norm", "time_group_norm"]: raise ValueError( f"self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}" ) super().__init__(**lowercase ) @property def A ( self : Dict ): '''simple docstring''' if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def A ( self : Union[str, Any] ): '''simple docstring''' if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 , int((1.0 - self.overlap) * self.chunk_length ) ) @property def A ( self : Any ): '''simple docstring''' UpperCAmelCase = np.prod(self.upsampling_ratios ) return math.ceil(self.sampling_rate / hop_length ) @property def A ( self : Optional[int] ): '''simple docstring''' return int(1_000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
34
0
import argparse import io import requests import torch from omegaconf import OmegaConf from diffusers import AutoencoderKL from diffusers.pipelines.stable_diffusion.convert_from_ckpt import ( assign_to_checkpoint, conv_attn_to_linear, create_vae_diffusers_config, renew_vae_attention_paths, renew_vae_resnet_paths, ) def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ): '''simple docstring''' lowercase = checkpoint lowercase = {} lowercase = vae_state_dict['''encoder.conv_in.weight'''] lowercase = vae_state_dict['''encoder.conv_in.bias'''] lowercase = vae_state_dict['''encoder.conv_out.weight'''] lowercase = vae_state_dict['''encoder.conv_out.bias'''] lowercase = vae_state_dict['''encoder.norm_out.weight'''] lowercase = vae_state_dict['''encoder.norm_out.bias'''] lowercase = vae_state_dict['''decoder.conv_in.weight'''] lowercase = vae_state_dict['''decoder.conv_in.bias'''] lowercase = vae_state_dict['''decoder.conv_out.weight'''] lowercase = vae_state_dict['''decoder.conv_out.bias'''] lowercase = vae_state_dict['''decoder.norm_out.weight'''] lowercase = vae_state_dict['''decoder.norm_out.bias'''] lowercase = vae_state_dict['''quant_conv.weight'''] lowercase = vae_state_dict['''quant_conv.bias'''] lowercase = vae_state_dict['''post_quant_conv.weight'''] lowercase = vae_state_dict['''post_quant_conv.bias'''] # Retrieves the keys for the encoder down blocks only lowercase = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''encoder.down''' in layer} ) lowercase = { layer_id: [key for key in vae_state_dict if f'down.{layer_id}' in key] for layer_id in range(lowerCAmelCase__ ) } # Retrieves the keys for the decoder up blocks only lowercase = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''decoder.up''' in layer} ) lowercase = { layer_id: [key for key in vae_state_dict if f'up.{layer_id}' in key] for layer_id in range(lowerCAmelCase__ ) } for i in range(lowerCAmelCase__ ): lowercase = [key for key in down_blocks[i] if f'down.{i}' in key and f'down.{i}.downsample' not in key] if f'encoder.down.{i}.downsample.conv.weight' in vae_state_dict: lowercase = vae_state_dict.pop( f'encoder.down.{i}.downsample.conv.weight' ) lowercase = vae_state_dict.pop( f'encoder.down.{i}.downsample.conv.bias' ) lowercase = renew_vae_resnet_paths(lowerCAmelCase__ ) lowercase = {'''old''': f'down.{i}.block', '''new''': f'down_blocks.{i}.resnets'} assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ ) lowercase = [key for key in vae_state_dict if '''encoder.mid.block''' in key] lowercase = 2 for i in range(1 , num_mid_res_blocks + 1 ): lowercase = [key for key in mid_resnets if f'encoder.mid.block_{i}' in key] lowercase = renew_vae_resnet_paths(lowerCAmelCase__ ) lowercase = {'''old''': f'mid.block_{i}', '''new''': f'mid_block.resnets.{i - 1}'} assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ ) lowercase = [key for key in vae_state_dict if '''encoder.mid.attn''' in key] lowercase = renew_vae_attention_paths(lowerCAmelCase__ ) lowercase = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''} assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ ) conv_attn_to_linear(lowerCAmelCase__ ) for i in range(lowerCAmelCase__ ): lowercase = num_up_blocks - 1 - i lowercase = [ key for key in up_blocks[block_id] if f'up.{block_id}' in key and f'up.{block_id}.upsample' not in key ] if f'decoder.up.{block_id}.upsample.conv.weight' in vae_state_dict: lowercase = vae_state_dict[ f'decoder.up.{block_id}.upsample.conv.weight' ] lowercase = vae_state_dict[ f'decoder.up.{block_id}.upsample.conv.bias' ] lowercase = renew_vae_resnet_paths(lowerCAmelCase__ ) lowercase = {'''old''': f'up.{block_id}.block', '''new''': f'up_blocks.{i}.resnets'} assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ ) lowercase = [key for key in vae_state_dict if '''decoder.mid.block''' in key] lowercase = 2 for i in range(1 , num_mid_res_blocks + 1 ): lowercase = [key for key in mid_resnets if f'decoder.mid.block_{i}' in key] lowercase = renew_vae_resnet_paths(lowerCAmelCase__ ) lowercase = {'''old''': f'mid.block_{i}', '''new''': f'mid_block.resnets.{i - 1}'} assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ ) lowercase = [key for key in vae_state_dict if '''decoder.mid.attn''' in key] lowercase = renew_vae_attention_paths(lowerCAmelCase__ ) lowercase = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''} assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ ) conv_attn_to_linear(lowerCAmelCase__ ) return new_checkpoint def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , ): '''simple docstring''' # Only support V1 lowercase = requests.get( ''' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml''' ) lowercase = io.BytesIO(r.content ) lowercase = OmegaConf.load(lowerCAmelCase__ ) lowercase = 512 lowercase = '''cuda''' if torch.cuda.is_available() else '''cpu''' if checkpoint_path.endswith('''safetensors''' ): from safetensors import safe_open lowercase = {} with safe_open(lowerCAmelCase__ , framework='''pt''' , device='''cpu''' ) as f: for key in f.keys(): lowercase = f.get_tensor(lowerCAmelCase__ ) else: lowercase = torch.load(lowerCAmelCase__ , map_location=lowerCAmelCase__ )['''state_dict'''] # Convert the VAE model. lowercase = create_vae_diffusers_config(lowerCAmelCase__ , image_size=lowerCAmelCase__ ) lowercase = custom_convert_ldm_vae_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ ) lowercase = AutoencoderKL(**lowerCAmelCase__ ) vae.load_state_dict(lowerCAmelCase__ ) vae.save_pretrained(lowerCAmelCase__ ) if __name__ == "__main__": lowercase__ :List[str] = argparse.ArgumentParser() parser.add_argument("--vae_pt_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.") parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.") lowercase__ :int = parser.parse_args() vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
101
'''simple docstring''' from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, logging if is_torch_available(): import torch A =logging.get_logger(__name__) class _a ( __a ): __a : str = ["""pixel_values"""] def __init__( self : Optional[int] , lowercase : bool = True , lowercase : Optional[Dict[str, int]] = None , lowercase : PILImageResampling = PILImageResampling.BILINEAR , lowercase : bool = True , lowercase : Dict[str, int] = None , lowercase : bool = True , lowercase : Union[int, float] = 1 / 255 , lowercase : bool = True , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[float, List[float]]] = None , **lowercase : Union[str, Any] , ): '''simple docstring''' super().__init__(**lowercase ) UpperCAmelCase = size if size is not None else {'''shortest_edge''': 256} UpperCAmelCase = get_size_dict(lowercase , default_to_square=lowercase ) UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} UpperCAmelCase = get_size_dict(lowercase , param_name='''crop_size''' ) UpperCAmelCase = do_resize UpperCAmelCase = size UpperCAmelCase = resample UpperCAmelCase = do_center_crop UpperCAmelCase = crop_size UpperCAmelCase = do_rescale UpperCAmelCase = rescale_factor UpperCAmelCase = do_normalize UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD def A ( self : Union[str, Any] , lowercase : np.ndarray , lowercase : Dict[str, int] , lowercase : PILImageResampling = PILImageResampling.BICUBIC , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : Any , ): '''simple docstring''' UpperCAmelCase = get_size_dict(lowercase , default_to_square=lowercase ) if "shortest_edge" not in size: raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" ) UpperCAmelCase = get_resize_output_image_size(lowercase , size=size['''shortest_edge'''] , default_to_square=lowercase ) return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase ) def A ( self : Union[str, Any] , lowercase : np.ndarray , lowercase : Dict[str, int] , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : int , ): '''simple docstring''' UpperCAmelCase = get_size_dict(lowercase ) if "height" not in size or "width" not in size: raise ValueError(f"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}" ) return center_crop(lowercase , size=(size['''height'''], size['''width''']) , data_format=lowercase , **lowercase ) def A ( self : Tuple , lowercase : np.ndarray , lowercase : float , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : List[str] ): '''simple docstring''' return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase ) def A ( self : Optional[int] , lowercase : np.ndarray , lowercase : Union[float, List[float]] , lowercase : Union[float, List[float]] , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : Union[str, Any] , ): '''simple docstring''' return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase ) def A ( self : Optional[int] , lowercase : ImageInput , lowercase : Optional[bool] = None , lowercase : Dict[str, int] = None , lowercase : PILImageResampling = None , lowercase : bool = None , lowercase : Dict[str, int] = None , lowercase : Optional[bool] = None , lowercase : Optional[float] = None , lowercase : Optional[bool] = None , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[str, TensorType]] = None , lowercase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowercase : Dict , ): '''simple docstring''' UpperCAmelCase = do_resize if do_resize is not None else self.do_resize UpperCAmelCase = size if size is not None else self.size UpperCAmelCase = get_size_dict(lowercase , default_to_square=lowercase ) UpperCAmelCase = resample if resample is not None else self.resample UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCAmelCase = crop_size if crop_size is not None else self.crop_size UpperCAmelCase = get_size_dict(lowercase , param_name='''crop_size''' ) UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase = image_mean if image_mean is not None else self.image_mean UpperCAmelCase = image_std if image_std is not None else self.image_std UpperCAmelCase = make_list_of_images(lowercase ) if not valid_images(lowercase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. UpperCAmelCase = [to_numpy_array(lowercase ) for image in images] if do_resize: UpperCAmelCase = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images] if do_center_crop: UpperCAmelCase = [self.center_crop(image=lowercase , size=lowercase ) for image in images] if do_rescale: UpperCAmelCase = [self.rescale(image=lowercase , scale=lowercase ) for image in images] if do_normalize: UpperCAmelCase = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images] UpperCAmelCase = [to_channel_dimension_format(lowercase , lowercase ) for image in images] UpperCAmelCase = {'''pixel_values''': images} return BatchFeature(data=lowercase , tensor_type=lowercase ) def A ( self : Tuple , lowercase : str , lowercase : List[Tuple] = None ): '''simple docstring''' UpperCAmelCase = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(lowercase ) != len(lowercase ): raise ValueError( '''Make sure that you pass in as many target sizes as the batch dimension of the logits''' ) if is_torch_tensor(lowercase ): UpperCAmelCase = target_sizes.numpy() UpperCAmelCase = [] for idx in range(len(lowercase ) ): UpperCAmelCase = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=lowercase ) UpperCAmelCase = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(lowercase ) else: UpperCAmelCase = logits.argmax(dim=1 ) UpperCAmelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
34
0
"""simple docstring""" from statistics import mean import numpy as np def lowercase ( _snake_case : list , _snake_case : list , _snake_case : list , _snake_case : int ) ->list: """simple docstring""" __snake_case : List[str] = 0 # Number of processes finished __snake_case : List[str] = 0 # Displays the finished process. # If it is 0, the performance is completed if it is 1, before the performance. __snake_case : Optional[int] = [0] * no_of_process # List to include calculation results __snake_case : Tuple = [0] * no_of_process # Sort by arrival time. __snake_case : int = [burst_time[i] for i in np.argsort(_snake_case )] __snake_case : str = [process_name[i] for i in np.argsort(_snake_case )] arrival_time.sort() while no_of_process > finished_process_count: __snake_case : List[str] = 0 while finished_process[i] == 1: i += 1 if current_time < arrival_time[i]: __snake_case : List[Any] = arrival_time[i] __snake_case : Union[str, Any] = 0 # Index showing the location of the process being performed __snake_case : Dict = 0 # Saves the current response ratio. __snake_case : List[Any] = 0 for i in range(0 , _snake_case ): if finished_process[i] == 0 and arrival_time[i] <= current_time: __snake_case : int = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[ i ] if response_ratio < temp: __snake_case : List[str] = temp __snake_case : Any = i # Calculate the turn around time __snake_case : int = current_time + burst_time[loc] - arrival_time[loc] current_time += burst_time[loc] # Indicates that the process has been performed. __snake_case : Union[str, Any] = 1 # Increase finished_process_count by 1 finished_process_count += 1 return turn_around_time def lowercase ( _snake_case : list , _snake_case : list , _snake_case : list , _snake_case : int ) ->list: """simple docstring""" __snake_case : Optional[int] = [0] * no_of_process for i in range(0 , _snake_case ): __snake_case : int = turn_around_time[i] - burst_time[i] return waiting_time if __name__ == "__main__": SCREAMING_SNAKE_CASE : Union[str, Any] = 5 SCREAMING_SNAKE_CASE : Dict = ["""A""", """B""", """C""", """D""", """E"""] SCREAMING_SNAKE_CASE : List[str] = [1, 2, 3, 4, 5] SCREAMING_SNAKE_CASE : Union[str, Any] = [1, 2, 3, 4, 5] SCREAMING_SNAKE_CASE : List[Any] = calculate_turn_around_time( process_name, arrival_time, burst_time, no_of_process ) SCREAMING_SNAKE_CASE : Union[str, Any] = calculate_waiting_time( process_name, turn_around_time, burst_time, no_of_process ) print("""Process name \tArrival time \tBurst time \tTurn around time \tWaiting time""") for i in range(0, no_of_process): print( F'{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t' F'{turn_around_time[i]}\t\t\t{waiting_time[i]}' ) print(F'average waiting time : {mean(waiting_time):.5f}') print(F'average turn around time : {mean(turn_around_time):.5f}')
102
'''simple docstring''' import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process A =logging.getLogger(__name__) def snake_case_ (_a : Dict , _a : Union[str, Any] ): return (preds == labels).mean() @dataclass class _a : __a : str = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) __a : Optional[str] = field( default=__a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) __a : Optional[str] = field( default=__a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) __a : Optional[str] = field( default=__a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) @dataclass class _a : __a : str = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} ) __a : str = field(metadata={"""help""": """Should contain the data files for the task."""} ) __a : int = field( default=128 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) __a : bool = field( default=__a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) def snake_case_ (): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F"Output directory ({training_args.output_dir}) already exists and is not empty. Use" ''' --overwrite_output_dir to overcome.''' ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('''Training/evaluation parameters %s''' , _a ) # Set seed set_seed(training_args.seed ) try: UpperCAmelCase = processors[data_args.task_name]() UpperCAmelCase = processor.get_labels() UpperCAmelCase = len(_a ) except KeyError: raise ValueError('''Task not found: %s''' % (data_args.task_name) ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. UpperCAmelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_a , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , ) UpperCAmelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) UpperCAmelCase = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_a , cache_dir=model_args.cache_dir , ) # Get datasets UpperCAmelCase = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=_a , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) UpperCAmelCase = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=_a , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def compute_metrics(_a : EvalPrediction ) -> Dict: UpperCAmelCase = np.argmax(p.predictions , axis=1 ) return {"acc": simple_accuracy(_a , p.label_ids )} # Data collator UpperCAmelCase = DataCollatorWithPadding(_a , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer UpperCAmelCase = Trainer( model=_a , args=_a , train_dataset=_a , eval_dataset=_a , compute_metrics=_a , data_collator=_a , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation UpperCAmelCase = {} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) UpperCAmelCase = trainer.evaluate() UpperCAmelCase = os.path.join(training_args.output_dir , '''eval_results.txt''' ) if trainer.is_world_master(): with open(_a , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key, value in result.items(): logger.info(''' %s = %s''' , _a , _a ) writer.write('''%s = %s\n''' % (key, value) ) results.update(_a ) return results def snake_case_ (_a : Optional[int] ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
34
0
class __snake_case : def __init__( self : str , A_ : Dict , A_ : Optional[int]): lowerCAmelCase_ : str = name lowerCAmelCase_ : List[Any] = val def __str__( self : List[str]): return F"""{self.__class__.__name__}({self.name}, {self.val})""" def __lt__( self : List[str] , A_ : Tuple): return self.val < other.val class __snake_case : def __init__( self : int , A_ : Dict): lowerCAmelCase_ : List[str] = {} lowerCAmelCase_ : Any = {} lowerCAmelCase_ : Dict = self.build_heap(A_) def __getitem__( self : List[str] , A_ : List[Any]): return self.get_value(A_) def UpperCAmelCase__ ( self : List[Any] , A_ : str): return (idx - 1) // 2 def UpperCAmelCase__ ( self : int , A_ : List[str]): return idx * 2 + 1 def UpperCAmelCase__ ( self : str , A_ : List[Any]): return idx * 2 + 2 def UpperCAmelCase__ ( self : Optional[Any] , A_ : Tuple): return self.heap_dict[key] def UpperCAmelCase__ ( self : Union[str, Any] , A_ : Tuple): lowerCAmelCase_ : Dict = len(A_) - 1 lowerCAmelCase_ : List[Any] = self.get_parent_idx(A_) for idx, i in enumerate(A_): lowerCAmelCase_ : List[str] = idx lowerCAmelCase_ : Any = i.val for i in range(A_ , -1 , -1): self.sift_down(A_ , A_) return array def UpperCAmelCase__ ( self : List[str] , A_ : int , A_ : Tuple): while True: lowerCAmelCase_ : Union[str, Any] = self.get_left_child_idx(A_) # noqa: E741 lowerCAmelCase_ : int = self.get_right_child_idx(A_) lowerCAmelCase_ : int = idx if l < len(A_) and array[l] < array[idx]: lowerCAmelCase_ : Any = l if r < len(A_) and array[r] < array[smallest]: lowerCAmelCase_ : List[str] = r if smallest != idx: lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = array[smallest], array[idx] ( ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ) : List[Any] = ( self.idx_of_element[array[smallest]], self.idx_of_element[array[idx]], ) lowerCAmelCase_ : Union[str, Any] = smallest else: break def UpperCAmelCase__ ( self : Tuple , A_ : List[str]): lowerCAmelCase_ : Any = self.get_parent_idx(A_) while p >= 0 and self.heap[p] > self.heap[idx]: lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = self.heap[idx], self.heap[p] lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = ( self.idx_of_element[self.heap[idx]], self.idx_of_element[self.heap[p]], ) lowerCAmelCase_ : int = p lowerCAmelCase_ : Tuple = self.get_parent_idx(A_) def UpperCAmelCase__ ( self : Optional[Any]): return self.heap[0] def UpperCAmelCase__ ( self : Tuple): lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = self.heap[-1], self.heap[0] lowerCAmelCase_ , lowerCAmelCase_ : Any = ( self.idx_of_element[self.heap[-1]], self.idx_of_element[self.heap[0]], ) lowerCAmelCase_ : int = self.heap.pop() del self.idx_of_element[x] self.sift_down(0 , self.heap) return x def UpperCAmelCase__ ( self : Any , A_ : Any): self.heap.append(A_) lowerCAmelCase_ : Optional[int] = len(self.heap) - 1 lowerCAmelCase_ : Optional[int] = node.val self.sift_up(len(self.heap) - 1) def UpperCAmelCase__ ( self : List[Any]): return len(self.heap) == 0 def UpperCAmelCase__ ( self : Optional[int] , A_ : Optional[Any] , A_ : int): assert ( self.heap[self.idx_of_element[node]].val > new_value ), "newValue must be less that current value" lowerCAmelCase_ : Tuple = new_value lowerCAmelCase_ : Tuple = new_value self.sift_up(self.idx_of_element[node]) A__ : List[str] = Node('''R''', -1) A__ : Optional[Any] = Node('''B''', 6) A__ : List[str] = Node('''A''', 3) A__ : Optional[Any] = Node('''X''', 1) A__ : Tuple = Node('''E''', 4) # Use one of these two ways to generate Min-Heap # Generating Min-Heap from array A__ : Optional[Any] = MinHeap([r, b, a, x, e]) # Generating Min-Heap by Insert method # myMinHeap.insert(a) # myMinHeap.insert(b) # myMinHeap.insert(x) # myMinHeap.insert(r) # myMinHeap.insert(e) # Before print('''Min Heap - before decrease key''') for i in my_min_heap.heap: print(i) print('''Min Heap - After decrease key of node [B -> -17]''') my_min_heap.decrease_key(b, -17) # After for i in my_min_heap.heap: print(i) if __name__ == "__main__": import doctest doctest.testmod()
103
'''simple docstring''' import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _a ( __a ): __a : int = ["""image_processor""", """tokenizer"""] __a : Union[str, Any] = """ChineseCLIPImageProcessor""" __a : List[Any] = ("""BertTokenizer""", """BertTokenizerFast""") def __init__( self : Dict , lowercase : Union[str, Any]=None , lowercase : Dict=None , **lowercase : Optional[Any] ): '''simple docstring''' UpperCAmelCase = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , lowercase , ) UpperCAmelCase = kwargs.pop('''feature_extractor''' ) UpperCAmelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(lowercase , lowercase ) UpperCAmelCase = self.image_processor def __call__( self : Tuple , lowercase : Optional[Any]=None , lowercase : Union[str, Any]=None , lowercase : int=None , **lowercase : Dict ): '''simple docstring''' if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: UpperCAmelCase = self.tokenizer(lowercase , return_tensors=lowercase , **lowercase ) if images is not None: UpperCAmelCase = self.image_processor(lowercase , return_tensors=lowercase , **lowercase ) if text is not None and images is not None: UpperCAmelCase = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**lowercase ) , tensor_type=lowercase ) def A ( self : int , *lowercase : Tuple , **lowercase : List[str] ): '''simple docstring''' return self.tokenizer.batch_decode(*lowercase , **lowercase ) def A ( self : Optional[Any] , *lowercase : int , **lowercase : Optional[int] ): '''simple docstring''' return self.tokenizer.decode(*lowercase , **lowercase ) @property def A ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase = self.tokenizer.model_input_names UpperCAmelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def A ( self : List[Any] ): '''simple docstring''' warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowercase , ) return self.image_processor_class
34
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { '''google/bit-50''': '''https://huggingface.co/google/bit-50/resolve/main/config.json''', } class lowercase_ (lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" SCREAMING_SNAKE_CASE : str = 'bit' SCREAMING_SNAKE_CASE : Union[str, Any] = ['preactivation', 'bottleneck'] SCREAMING_SNAKE_CASE : str = ['SAME', 'VALID'] def __init__( self : Optional[Any] ,lowercase__ : Any=3 ,lowercase__ : Tuple=6_4 ,lowercase__ : List[str]=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] ,lowercase__ : Any=[3, 4, 6, 3] ,lowercase__ : str="preactivation" ,lowercase__ : Dict="relu" ,lowercase__ : Optional[int]=None ,lowercase__ : str=3_2 ,lowercase__ : int=0.0 ,lowercase__ : List[Any]=False ,lowercase__ : Optional[Any]=3_2 ,lowercase__ : Any=1 ,lowercase__ : Any=None ,lowercase__ : Dict=None ,**lowercase__ : Union[str, Any] ,): super().__init__(**lowercase__ ) if layer_type not in self.layer_types: raise ValueError(F"layer_type={layer_type} is not one of {','.join(self.layer_types )}" ) if global_padding is not None: if global_padding.upper() in self.supported_padding: __lowercase = global_padding.upper() else: raise ValueError(F"Padding strategy {global_padding} not supported" ) __lowercase = num_channels __lowercase = embedding_size __lowercase = hidden_sizes __lowercase = depths __lowercase = layer_type __lowercase = hidden_act __lowercase = global_padding __lowercase = num_groups __lowercase = drop_path_rate __lowercase = embedding_dynamic_padding __lowercase = output_stride __lowercase = width_factor __lowercase = ['''stem'''] + [F"stage{idx}" for idx in range(1 ,len(lowercase__ ) + 1 )] __lowercase , __lowercase = get_aligned_output_features_output_indices( out_features=lowercase__ ,out_indices=lowercase__ ,stage_names=self.stage_names )
104
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging A =logging.get_logger(__name__) A ={ 'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json', # See all Marian models at https://huggingface.co/models?filter=marian } class _a ( __a ): __a : List[Any] = """marian""" __a : Union[str, Any] = ["""past_key_values"""] __a : List[str] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""} def __init__( self : List[Any] , lowercase : Union[str, Any]=58_101 , lowercase : Tuple=None , lowercase : str=1_024 , lowercase : Optional[int]=12 , lowercase : Optional[int]=4_096 , lowercase : int=16 , lowercase : List[Any]=12 , lowercase : int=4_096 , lowercase : Optional[int]=16 , lowercase : int=0.0 , lowercase : Tuple=0.0 , lowercase : Tuple=True , lowercase : Union[str, Any]=True , lowercase : List[Any]="gelu" , lowercase : Tuple=1_024 , lowercase : str=0.1 , lowercase : str=0.0 , lowercase : Optional[int]=0.0 , lowercase : Dict=0.02 , lowercase : Union[str, Any]=58_100 , lowercase : List[str]=False , lowercase : str=58_100 , lowercase : Any=0 , lowercase : Optional[Any]=0 , lowercase : Tuple=True , **lowercase : Optional[int] , ): '''simple docstring''' UpperCAmelCase = vocab_size UpperCAmelCase = decoder_vocab_size or vocab_size UpperCAmelCase = max_position_embeddings UpperCAmelCase = d_model UpperCAmelCase = encoder_ffn_dim UpperCAmelCase = encoder_layers UpperCAmelCase = encoder_attention_heads UpperCAmelCase = decoder_ffn_dim UpperCAmelCase = decoder_layers UpperCAmelCase = decoder_attention_heads UpperCAmelCase = dropout UpperCAmelCase = attention_dropout UpperCAmelCase = activation_dropout UpperCAmelCase = activation_function UpperCAmelCase = init_std UpperCAmelCase = encoder_layerdrop UpperCAmelCase = decoder_layerdrop UpperCAmelCase = use_cache UpperCAmelCase = encoder_layers UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True UpperCAmelCase = share_encoder_decoder_embeddings super().__init__( pad_token_id=lowercase , eos_token_id=lowercase , is_encoder_decoder=lowercase , decoder_start_token_id=lowercase , forced_eos_token_id=lowercase , **lowercase , ) class _a ( __a ): @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs def A ( self : int ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: UpperCAmelCase = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: UpperCAmelCase = {0: '''batch'''} UpperCAmelCase = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: UpperCAmelCase = {0: '''batch''', 1: '''decoder_sequence'''} UpperCAmelCase = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(lowercase , direction='''inputs''' ) elif self.task == "causal-lm": # TODO: figure this case out. UpperCAmelCase = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: UpperCAmelCase , UpperCAmelCase = self.num_layers for i in range(lowercase ): UpperCAmelCase = {0: '''batch''', 2: '''past_sequence + sequence'''} UpperCAmelCase = {0: '''batch''', 2: '''past_sequence + sequence'''} else: UpperCAmelCase = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}), ('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}), ] ) return common_inputs @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs def A ( self : Any ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: UpperCAmelCase = super().outputs else: UpperCAmelCase = super(lowercase , self ).outputs if self.use_past: UpperCAmelCase , UpperCAmelCase = self.num_layers for i in range(lowercase ): UpperCAmelCase = {0: '''batch''', 2: '''past_sequence + sequence'''} UpperCAmelCase = {0: '''batch''', 2: '''past_sequence + sequence'''} return common_outputs def A ( self : Dict , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ): '''simple docstring''' UpperCAmelCase = self._generate_dummy_inputs_for_encoder_and_decoder( lowercase , lowercase , lowercase , lowercase , lowercase ) # Generate decoder inputs UpperCAmelCase = seq_length if not self.use_past else 1 UpperCAmelCase = self._generate_dummy_inputs_for_encoder_and_decoder( lowercase , lowercase , lowercase , lowercase , lowercase ) UpperCAmelCase = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()} UpperCAmelCase = dict(**lowercase , **lowercase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch UpperCAmelCase , UpperCAmelCase = common_inputs['''input_ids'''].shape UpperCAmelCase = common_inputs['''decoder_input_ids'''].shape[1] UpperCAmelCase , UpperCAmelCase = self.num_attention_heads UpperCAmelCase = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) UpperCAmelCase = decoder_seq_length + 3 UpperCAmelCase = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) UpperCAmelCase = torch.cat( [common_inputs['''decoder_attention_mask'''], torch.ones(lowercase , lowercase )] , dim=1 ) UpperCAmelCase = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered UpperCAmelCase , UpperCAmelCase = self.num_layers UpperCAmelCase = min(lowercase , lowercase ) UpperCAmelCase = max(lowercase , lowercase ) - min_num_layers UpperCAmelCase = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder''' for _ in range(lowercase ): common_inputs["past_key_values"].append( ( torch.zeros(lowercase ), torch.zeros(lowercase ), torch.zeros(lowercase ), torch.zeros(lowercase ), ) ) # TODO: test this. UpperCAmelCase = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape for _ in range(lowercase , lowercase ): common_inputs["past_key_values"].append((torch.zeros(lowercase ), torch.zeros(lowercase )) ) return common_inputs def A ( self : int , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ): '''simple docstring''' UpperCAmelCase = self._generate_dummy_inputs_for_encoder_and_decoder( lowercase , lowercase , lowercase , lowercase , lowercase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch UpperCAmelCase , UpperCAmelCase = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values UpperCAmelCase = seqlen + 2 UpperCAmelCase , UpperCAmelCase = self.num_layers UpperCAmelCase , UpperCAmelCase = self.num_attention_heads UpperCAmelCase = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) UpperCAmelCase = common_inputs['''attention_mask'''].dtype UpperCAmelCase = torch.cat( [common_inputs['''attention_mask'''], torch.ones(lowercase , lowercase , dtype=lowercase )] , dim=1 ) UpperCAmelCase = [ (torch.zeros(lowercase ), torch.zeros(lowercase )) for _ in range(lowercase ) ] return common_inputs def A ( self : str , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ): '''simple docstring''' UpperCAmelCase = compute_effective_axis_dimension( lowercase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX UpperCAmelCase = tokenizer.num_special_tokens_to_add(lowercase ) UpperCAmelCase = compute_effective_axis_dimension( lowercase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowercase ) # Generate dummy inputs according to compute batch and sequence UpperCAmelCase = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size UpperCAmelCase = dict(tokenizer(lowercase , return_tensors=lowercase ) ) return common_inputs def A ( self : List[str] , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: UpperCAmelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm( lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase ) else: UpperCAmelCase = self._generate_dummy_inputs_for_causal_lm( lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase ) return common_inputs def A ( self : List[Any] , lowercase : Any , lowercase : Tuple , lowercase : Any , lowercase : Any ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: UpperCAmelCase = super()._flatten_past_key_values_(lowercase , lowercase , lowercase , lowercase ) else: UpperCAmelCase = super(lowercase , self )._flatten_past_key_values_( lowercase , lowercase , lowercase , lowercase ) @property def A ( self : Any ): '''simple docstring''' return 1E-4
34
0
"""simple docstring""" from __future__ import annotations a : List[str] = 10 def _SCREAMING_SNAKE_CASE ( _lowercase : list[int] ) ->list[int]: '''simple docstring''' a : Any = 1 a : List[Any] = max(_lowercase ) while placement <= max_digit: # declare and initialize empty buckets a : list[list] = [[] for _ in range(_lowercase )] # split list_of_ints between the buckets for i in list_of_ints: a : Dict = int((i / placement) % RADIX ) buckets[tmp].append(_lowercase ) # put each buckets' contents into list_of_ints a : Tuple = 0 for b in range(_lowercase ): for i in buckets[b]: a : List[Any] = i a += 1 # move to next placement *= RADIX return list_of_ints if __name__ == "__main__": import doctest doctest.testmod()
105
'''simple docstring''' import os def snake_case_ (): UpperCAmelCase = os.path.join(os.path.dirname(_a ) , '''num.txt''' ) with open(_a ) as file_hand: return str(sum(int(_a ) for line in file_hand ) )[:1_0] if __name__ == "__main__": print(solution())
34
0
"""simple docstring""" import math import random def __SCREAMING_SNAKE_CASE ( A_ , A_ = False ): if deriv: return value * (1 - value) return 1 / (1 + math.exp(-value )) # Initial Value __UpperCamelCase : Any = 0.0_2 def __SCREAMING_SNAKE_CASE ( A_ , A_ ): lowerCAmelCase__ : Tuple = float(2 * (random.randint(1 , 1_00 )) - 1 ) for _ in range(A_ ): # Forward propagation lowerCAmelCase__ : Dict = sigmoid_function(INITIAL_VALUE * weight ) # How much did we miss? lowerCAmelCase__ : List[Any] = (expected / 1_00) - layer_a # Error delta lowerCAmelCase__ : str = layer_1_error * sigmoid_function(A_ , A_ ) # Update weight weight += INITIAL_VALUE * layer_1_delta return layer_a * 1_00 if __name__ == "__main__": import doctest doctest.testmod() __UpperCamelCase : List[str] = int(input('''Expected value: ''')) __UpperCamelCase : Optional[Any] = int(input('''Number of propagations: ''')) print(forward_propagation(expected, number_propagations))
106
'''simple docstring''' import argparse import logging import os from pathlib import Path from typing import Any, Dict import pytorch_lightning as pl from pytorch_lightning.utilities import rank_zero_info from transformers import ( AdamW, AutoConfig, AutoModel, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoModelWithLMHead, AutoTokenizer, PretrainedConfig, PreTrainedTokenizer, ) from transformers.optimization import ( Adafactor, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.utils.versions import require_version A =logging.getLogger(__name__) require_version('pytorch_lightning>=1.0.4') A ={ 'base': AutoModel, 'sequence-classification': AutoModelForSequenceClassification, 'question-answering': AutoModelForQuestionAnswering, 'pretraining': AutoModelForPreTraining, 'token-classification': AutoModelForTokenClassification, 'language-modeling': AutoModelWithLMHead, 'summarization': AutoModelForSeqaSeqLM, 'translation': AutoModelForSeqaSeqLM, } # update this and the import above to support new schedulers from transformers.optimization A ={ 'linear': get_linear_schedule_with_warmup, 'cosine': get_cosine_schedule_with_warmup, 'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup, 'polynomial': get_polynomial_decay_schedule_with_warmup, # '': get_constant_schedule, # not supported for now # '': get_constant_schedule_with_warmup, # not supported for now } A =sorted(arg_to_scheduler.keys()) A ='{' + ', '.join(arg_to_scheduler_choices) + '}' class _a ( pl.LightningModule ): def __init__( self : List[str] , lowercase : argparse.Namespace , lowercase : List[Any]=None , lowercase : Dict="base" , lowercase : Optional[int]=None , lowercase : Dict=None , lowercase : Tuple=None , **lowercase : Optional[int] , ): '''simple docstring''' super().__init__() # TODO: move to self.save_hyperparameters() # self.save_hyperparameters() # can also expand arguments into trainer signature for easier reading self.save_hyperparameters(lowercase ) UpperCAmelCase = 0 UpperCAmelCase = Path(self.hparams.output_dir ) UpperCAmelCase = self.hparams.cache_dir if self.hparams.cache_dir else None if config is None: UpperCAmelCase = AutoConfig.from_pretrained( self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'''num_labels''': num_labels} if num_labels is not None else {}) , cache_dir=lowercase , **lowercase , ) else: UpperCAmelCase = config UpperCAmelCase = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''') for p in extra_model_params: if getattr(self.hparams , lowercase , lowercase ): assert hasattr(self.config , lowercase ), f"model config doesn't have a `{p}` attribute" setattr(self.config , lowercase , getattr(self.hparams , lowercase ) ) if tokenizer is None: UpperCAmelCase = AutoTokenizer.from_pretrained( self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=lowercase , ) else: UpperCAmelCase = tokenizer UpperCAmelCase = MODEL_MODES[mode] if model is None: UpperCAmelCase = self.model_type.from_pretrained( self.hparams.model_name_or_path , from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=lowercase , ) else: UpperCAmelCase = model def A ( self : List[Any] , *lowercase : List[str] , **lowercase : List[str] ): '''simple docstring''' UpperCAmelCase = self.model_type.from_pretrained(*lowercase , **lowercase ) def A ( self : Tuple ): '''simple docstring''' UpperCAmelCase = arg_to_scheduler[self.hparams.lr_scheduler] UpperCAmelCase = get_schedule_func( self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() ) UpperCAmelCase = {'''scheduler''': scheduler, '''interval''': '''step''', '''frequency''': 1} return scheduler def A ( self : str ): '''simple docstring''' UpperCAmelCase = self.model UpperCAmelCase = ['''bias''', '''LayerNorm.weight'''] UpperCAmelCase = [ { '''params''': [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay ) ], # check this named paramters '''weight_decay''': self.hparams.weight_decay, }, { '''params''': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0, }, ] if self.hparams.adafactor: UpperCAmelCase = Adafactor( lowercase , lr=self.hparams.learning_rate , scale_parameter=lowercase , relative_step=lowercase ) else: UpperCAmelCase = AdamW( lowercase , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon ) UpperCAmelCase = optimizer UpperCAmelCase = self.get_lr_scheduler() return [optimizer], [scheduler] def A ( self : List[Any] , lowercase : int , lowercase : List[str] ): '''simple docstring''' return self.validation_step(lowercase , lowercase ) def A ( self : List[Any] , lowercase : Tuple ): '''simple docstring''' return self.validation_end(lowercase ) def A ( self : List[Any] ): '''simple docstring''' UpperCAmelCase = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores UpperCAmelCase = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs def A ( self : List[str] , lowercase : Any ): '''simple docstring''' if stage == "test": UpperCAmelCase = len(self.test_dataloader().dataset ) else: UpperCAmelCase = self.get_dataloader('''train''' , self.hparams.train_batch_size , shuffle=lowercase ) UpperCAmelCase = len(self.train_dataloader().dataset ) def A ( self : List[str] , lowercase : str , lowercase : int , lowercase : bool = False ): '''simple docstring''' raise NotImplementedError('''You must implement this for your task''' ) def A ( self : Union[str, Any] ): '''simple docstring''' return self.train_loader def A ( self : Optional[Any] ): '''simple docstring''' return self.get_dataloader('''dev''' , self.hparams.eval_batch_size , shuffle=lowercase ) def A ( self : List[Any] ): '''simple docstring''' return self.get_dataloader('''test''' , self.hparams.eval_batch_size , shuffle=lowercase ) def A ( self : Any , lowercase : Union[str, Any] ): '''simple docstring''' return os.path.join( self.hparams.data_dir , '''cached_{}_{}_{}'''.format( lowercase , list(filter(lowercase , self.hparams.model_name_or_path.split('''/''' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , ) @pl.utilities.rank_zero_only def A ( self : List[str] , lowercase : Dict[str, Any] ): '''simple docstring''' UpperCAmelCase = self.output_dir.joinpath('''best_tfmr''' ) UpperCAmelCase = self.step_count self.model.save_pretrained(lowercase ) self.tokenizer.save_pretrained(lowercase ) @staticmethod def A ( lowercase : Optional[int] , lowercase : List[str] ): '''simple docstring''' parser.add_argument( '''--model_name_or_path''' , default=lowercase , type=lowercase , required=lowercase , help='''Path to pretrained model or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--config_name''' , default='''''' , type=lowercase , help='''Pretrained config name or path if not the same as model_name''' ) parser.add_argument( '''--tokenizer_name''' , default=lowercase , type=lowercase , help='''Pretrained tokenizer name or path if not the same as model_name''' , ) parser.add_argument( '''--cache_dir''' , default=str(Path(lowercase ).parent / '''test_run''' / '''cache''' ) , type=lowercase , help='''Where do you want to store the pre-trained models downloaded from huggingface.co''' , ) parser.add_argument( '''--encoder_layerdrop''' , type=lowercase , help='''Encoder layer dropout probability (Optional). Goes into model.config''' , ) parser.add_argument( '''--decoder_layerdrop''' , type=lowercase , help='''Decoder layer dropout probability (Optional). Goes into model.config''' , ) parser.add_argument( '''--dropout''' , type=lowercase , help='''Dropout probability (Optional). Goes into model.config''' , ) parser.add_argument( '''--attention_dropout''' , type=lowercase , help='''Attention dropout probability (Optional). Goes into model.config''' , ) parser.add_argument('''--learning_rate''' , default=5E-5 , type=lowercase , help='''The initial learning rate for Adam.''' ) parser.add_argument( '''--lr_scheduler''' , default='''linear''' , choices=lowercase , metavar=lowercase , type=lowercase , help='''Learning rate scheduler''' , ) parser.add_argument('''--weight_decay''' , default=0.0 , type=lowercase , help='''Weight decay if we apply some.''' ) parser.add_argument('''--adam_epsilon''' , default=1E-8 , type=lowercase , help='''Epsilon for Adam optimizer.''' ) parser.add_argument('''--warmup_steps''' , default=0 , type=lowercase , help='''Linear warmup over warmup_steps.''' ) parser.add_argument('''--num_workers''' , default=4 , type=lowercase , help='''kwarg passed to DataLoader''' ) parser.add_argument('''--num_train_epochs''' , dest='''max_epochs''' , default=3 , type=lowercase ) parser.add_argument('''--train_batch_size''' , default=32 , type=lowercase ) parser.add_argument('''--eval_batch_size''' , default=32 , type=lowercase ) parser.add_argument('''--adafactor''' , action='''store_true''' ) class _a ( pl.Callback ): def A ( self : Dict , lowercase : Optional[Any] , lowercase : List[Any] ): '''simple docstring''' if ( trainer.is_global_zero and trainer.global_rank == 0 ): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed. pl_module.model.rag.retriever.init_retrieval() # better to use hook functions. class _a ( pl.Callback ): def A ( self : Optional[int] , lowercase : Union[str, Any] , lowercase : Any ): '''simple docstring''' for name, param in pl_module.model.rag.named_parameters(): if param.grad is None: print(lowercase ) class _a ( pl.Callback ): def A ( self : Optional[int] , lowercase : Optional[int] , lowercase : Dict ): '''simple docstring''' UpperCAmelCase = trainer.lr_schedulers[0]['''scheduler'''] UpperCAmelCase = {f"lr_group_{i}": lr for i, lr in enumerate(lr_scheduler.get_lr() )} pl_module.logger.log_metrics(lowercase ) def A ( self : Tuple , lowercase : pl.Trainer , lowercase : pl.LightningModule ): '''simple docstring''' rank_zero_info('''***** Validation results *****''' ) UpperCAmelCase = trainer.callback_metrics # Log results for key in sorted(lowercase ): if key not in ["log", "progress_bar"]: rank_zero_info('''{} = {}\n'''.format(lowercase , str(metrics[key] ) ) ) def A ( self : Dict , lowercase : pl.Trainer , lowercase : pl.LightningModule ): '''simple docstring''' rank_zero_info('''***** Test results *****''' ) UpperCAmelCase = trainer.callback_metrics # Log and save results to file UpperCAmelCase = os.path.join(pl_module.hparams.output_dir , '''test_results.txt''' ) with open(lowercase , '''w''' ) as writer: for key in sorted(lowercase ): if key not in ["log", "progress_bar"]: rank_zero_info('''{} = {}\n'''.format(lowercase , str(metrics[key] ) ) ) writer.write('''{} = {}\n'''.format(lowercase , str(metrics[key] ) ) ) def snake_case_ (_a : int , _a : Optional[Any] ): # To allow all pl args uncomment the following line # parser = pl.Trainer.add_argparse_args(parser) parser.add_argument( '''--output_dir''' , default=str(Path(_a ).parent / '''test_run''' / '''model_checkpoints''' ) , type=_a , help='''The output directory where the model predictions and checkpoints will be written.''' , ) parser.add_argument( '''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , ) parser.add_argument( '''--fp16_opt_level''' , type=_a , default='''O2''' , help=( '''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].''' '''See details at https://nvidia.github.io/apex/amp.html''' ) , ) parser.add_argument('''--n_tpu_cores''' , dest='''tpu_cores''' , type=_a ) parser.add_argument('''--max_grad_norm''' , dest='''gradient_clip_val''' , default=1.0 , type=_a , help='''Max gradient norm''' ) parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' ) parser.add_argument('''--do_predict''' , action='''store_true''' , help='''Whether to run predictions on the test set.''' ) parser.add_argument( '''--gradient_accumulation_steps''' , dest='''accumulate_grad_batches''' , type=_a , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , ) parser.add_argument('''--seed''' , type=_a , default=4_2 , help='''random seed for initialization''' ) parser.add_argument( '''--data_dir''' , default=str(Path(_a ).parent / '''test_run''' / '''dummy-train-data''' ) , type=_a , help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''' , ) def snake_case_ (_a : BaseTransformer , _a : argparse.Namespace , _a : List[Any]=None , _a : Tuple=True , _a : int=[] , _a : Any=None , _a : int=None , **_a : Optional[Any] , ): pl.seed_everything(args.seed ) # init model UpperCAmelCase = Path(model.hparams.output_dir ) odir.mkdir(exist_ok=_a ) # add custom checkpoints if checkpoint_callback is None: UpperCAmelCase = pl.callbacks.ModelCheckpoint( filepath=args.output_dir , prefix='''checkpoint''' , monitor='''val_loss''' , mode='''min''' , save_top_k=1 ) if early_stopping_callback: extra_callbacks.append(_a ) if logging_callback is None: UpperCAmelCase = LoggingCallback() UpperCAmelCase = {} if args.fpaa: UpperCAmelCase = 1_6 if args.gpus > 1: UpperCAmelCase = '''auto''' UpperCAmelCase = '''ddp''' UpperCAmelCase = args.accumulate_grad_batches UpperCAmelCase = None UpperCAmelCase = '''auto''' UpperCAmelCase = pl.Trainer.from_argparse_args( _a , weights_summary=_a , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=_a , val_check_interval=1 , num_sanity_val_steps=2 , **_a , ) if args.do_train: trainer.fit(_a ) else: print('''RAG modeling tests with new set functions successfuly executed!''' ) return trainer
34
0
import argparse import pytorch_lightning as pl import torch from torch import nn from transformers import LongformerForQuestionAnswering, LongformerModel class snake_case__ (pl.LightningModule ): """simple docstring""" def __init__( self : Any , __lowerCamelCase : Optional[int] ) -> List[str]: super().__init__() a = model a = 2 a = nn.Linear(self.model.config.hidden_size , self.num_labels ) def __UpperCAmelCase ( self : str ) -> int: pass def __magic_name__ ( A : str, A : str, A : str ): '''simple docstring''' a = LongformerModel.from_pretrained(A ) a = LightningModel(A ) a = torch.load(A, map_location=torch.device("cpu" ) ) lightning_model.load_state_dict(ckpt["state_dict"] ) # init longformer question answering model a = LongformerForQuestionAnswering.from_pretrained(A ) # transfer weights longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() ) longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() ) longformer_for_qa.eval() # save model longformer_for_qa.save_pretrained(A ) print(F"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" ) if __name__ == "__main__": __lowerCAmelCase : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--longformer_model', default=None, type=str, required=True, help='model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.', ) parser.add_argument( '--longformer_question_answering_ckpt_path', default=None, type=str, required=True, help='Path the official PyTorch Lightning Checkpoint.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) __lowerCAmelCase : List[str] = parser.parse_args() convert_longformer_qa_checkpoint_to_pytorch( args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path )
107
'''simple docstring''' from queue import PriorityQueue from typing import Any import numpy as np def snake_case_ (_a : dict , _a : str , _a : set , _a : set , _a : dict , _a : dict , _a : PriorityQueue , _a : dict , _a : float | int , ): for nxt, d in graph[v]: if nxt in visited_forward: continue UpperCAmelCase = cst_fwd.get(_a , np.inf ) UpperCAmelCase = cst_fwd[v] + d if new_cost_f < old_cost_f: queue.put((new_cost_f, nxt) ) UpperCAmelCase = new_cost_f UpperCAmelCase = v if nxt in visited_backward: if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: UpperCAmelCase = cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance def snake_case_ (_a : str , _a : str , _a : dict , _a : dict ): UpperCAmelCase = -1 UpperCAmelCase = set() UpperCAmelCase = set() UpperCAmelCase = {source: 0} UpperCAmelCase = {destination: 0} UpperCAmelCase = {source: None} UpperCAmelCase = {destination: None} UpperCAmelCase = PriorityQueue() UpperCAmelCase = PriorityQueue() UpperCAmelCase = np.inf queue_forward.put((0, source) ) queue_backward.put((0, destination) ) if source == destination: return 0 while not queue_forward.empty() and not queue_backward.empty(): UpperCAmelCase , UpperCAmelCase = queue_forward.get() visited_forward.add(_a ) UpperCAmelCase , UpperCAmelCase = queue_backward.get() visited_backward.add(_a ) UpperCAmelCase = pass_and_relaxation( _a , _a , _a , _a , _a , _a , _a , _a , _a , ) UpperCAmelCase = pass_and_relaxation( _a , _a , _a , _a , _a , _a , _a , _a , _a , ) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break if shortest_distance != np.inf: UpperCAmelCase = shortest_distance return shortest_path_distance A ={ 'B': [['C', 1]], 'C': [['D', 1]], 'D': [['F', 1]], 'E': [['B', 1], ['G', 2]], 'F': [], 'G': [['F', 1]], } A ={ 'B': [['E', 1]], 'C': [['B', 1]], 'D': [['C', 1]], 'F': [['D', 1], ['G', 1]], 'E': [[None, np.inf]], 'G': [['E', 2]], } if __name__ == "__main__": import doctest doctest.testmod()
34
0
"""simple docstring""" import random import unittest import numpy as np import torch from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionUpscalePipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class SCREAMING_SNAKE_CASE__ ( lowercase , unittest.TestCase ): """simple docstring""" a : Tuple ="ssube/stable-diffusion-x4-upscaler-onnx" def lowercase__ ( self , snake_case__=0 ): """simple docstring""" lowerCAmelCase : Optional[int] = floats_tensor((1, 3, 128, 128) , rng=random.Random(snake_case__ ) ) lowerCAmelCase : List[str] = torch.manual_seed(snake_case__ ) lowerCAmelCase : Tuple = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) pipe.set_progress_bar_config(disable=snake_case__ ) lowerCAmelCase : int = self.get_dummy_inputs() lowerCAmelCase : Optional[Any] = pipe(**snake_case__ ).images lowerCAmelCase : str = image[0, -3:, -3:, -1].flatten() # started as 128, should now be 512 assert image.shape == (1, 512, 512, 3) lowerCAmelCase : Tuple = np.array( [0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] ) assert np.abs(image_slice - expected_slice ).max() < 1e-1 def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) lowerCAmelCase : List[Any] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) lowerCAmelCase : int = self.get_dummy_inputs() lowerCAmelCase : Any = pipe(**snake_case__ ).images lowerCAmelCase : str = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCAmelCase : str = np.array( [0.6898892, 0.59240556, 0.52499527, 0.58866215, 0.52258235, 0.52572715, 0.62414473, 0.6174387, 0.6214964] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) lowerCAmelCase : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=snake_case__ ) lowerCAmelCase : int = self.get_dummy_inputs() lowerCAmelCase : Union[str, Any] = pipe(**snake_case__ ).images lowerCAmelCase : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCAmelCase : Tuple = np.array( [0.7659278, 0.76437664, 0.75579107, 0.7691116, 0.77666986, 0.7727672, 0.7758664, 0.7812226, 0.76942515] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : Any = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) lowerCAmelCase : int = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=snake_case__ ) lowerCAmelCase : Optional[int] = self.get_dummy_inputs() lowerCAmelCase : List[str] = pipe(**snake_case__ ).images lowerCAmelCase : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCAmelCase : Union[str, Any] = np.array( [0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) lowerCAmelCase : List[str] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=snake_case__ ) lowerCAmelCase : Tuple = self.get_dummy_inputs() lowerCAmelCase : Union[str, Any] = pipe(**snake_case__ ).images lowerCAmelCase : Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCAmelCase : int = np.array( [0.77424496, 0.773601, 0.7645288, 0.7769598, 0.7772739, 0.7738688, 0.78187233, 0.77879584, 0.767043] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" @property def lowercase__ ( self ): """simple docstring""" return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : List[Any] = ort.SessionOptions() lowerCAmelCase : Tuple = False return options def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : Union[str, Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) lowerCAmelCase : Optional[int] = init_image.resize((128, 128) ) # using the PNDM scheduler by default lowerCAmelCase : str = OnnxStableDiffusionUpscalePipeline.from_pretrained( "ssube/stable-diffusion-x4-upscaler-onnx" , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=snake_case__ ) lowerCAmelCase : List[str] = "A fantasy landscape, trending on artstation" lowerCAmelCase : str = torch.manual_seed(0 ) lowerCAmelCase : Any = pipe( prompt=snake_case__ , image=snake_case__ , guidance_scale=7.5 , num_inference_steps=10 , generator=snake_case__ , output_type="np" , ) lowerCAmelCase : Tuple = output.images lowerCAmelCase : Any = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 512, 3) lowerCAmelCase : str = np.array([0.4883, 0.4947, 0.4980, 0.4975, 0.4982, 0.4980, 0.5000, 0.5006, 0.4972] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : Union[str, Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) lowerCAmelCase : Tuple = init_image.resize((128, 128) ) lowerCAmelCase : Dict = LMSDiscreteScheduler.from_pretrained( "ssube/stable-diffusion-x4-upscaler-onnx" , subfolder="scheduler" ) lowerCAmelCase : str = OnnxStableDiffusionUpscalePipeline.from_pretrained( "ssube/stable-diffusion-x4-upscaler-onnx" , scheduler=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=snake_case__ ) lowerCAmelCase : Tuple = "A fantasy landscape, trending on artstation" lowerCAmelCase : int = torch.manual_seed(0 ) lowerCAmelCase : Union[str, Any] = pipe( prompt=snake_case__ , image=snake_case__ , guidance_scale=7.5 , num_inference_steps=20 , generator=snake_case__ , output_type="np" , ) lowerCAmelCase : Any = output.images lowerCAmelCase : Union[str, Any] = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 512, 3) lowerCAmelCase : List[str] = np.array( [0.50173753, 0.50223356, 0.502039, 0.50233036, 0.5023725, 0.5022601, 0.5018758, 0.50234085, 0.50241566] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
108
'''simple docstring''' import argparse import json from pathlib import Path import torch import torchaudio from datasets import load_dataset from huggingface_hub import hf_hub_download from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification from transformers.utils import logging logging.set_verbosity_info() A =logging.get_logger(__name__) def snake_case_ (_a : List[str] ): UpperCAmelCase = ASTConfig() if "10-10" in model_name: pass elif "speech-commands" in model_name: UpperCAmelCase = 1_2_8 elif "12-12" in model_name: UpperCAmelCase = 1_2 UpperCAmelCase = 1_2 elif "14-14" in model_name: UpperCAmelCase = 1_4 UpperCAmelCase = 1_4 elif "16-16" in model_name: UpperCAmelCase = 1_6 UpperCAmelCase = 1_6 else: raise ValueError('''Model not supported''' ) UpperCAmelCase = '''huggingface/label-files''' if "speech-commands" in model_name: UpperCAmelCase = 3_5 UpperCAmelCase = '''speech-commands-v2-id2label.json''' else: UpperCAmelCase = 5_2_7 UpperCAmelCase = '''audioset-id2label.json''' UpperCAmelCase = json.load(open(hf_hub_download(_a , _a , repo_type='''dataset''' ) , '''r''' ) ) UpperCAmelCase = {int(_a ): v for k, v in idalabel.items()} UpperCAmelCase = idalabel UpperCAmelCase = {v: k for k, v in idalabel.items()} return config def snake_case_ (_a : Tuple ): if "module.v" in name: UpperCAmelCase = name.replace('''module.v''' , '''audio_spectrogram_transformer''' ) if "cls_token" in name: UpperCAmelCase = name.replace('''cls_token''' , '''embeddings.cls_token''' ) if "dist_token" in name: UpperCAmelCase = name.replace('''dist_token''' , '''embeddings.distillation_token''' ) if "pos_embed" in name: UpperCAmelCase = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' ) if "patch_embed.proj" in name: UpperCAmelCase = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) # transformer blocks if "blocks" in name: UpperCAmelCase = name.replace('''blocks''' , '''encoder.layer''' ) if "attn.proj" in name: UpperCAmelCase = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: UpperCAmelCase = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: UpperCAmelCase = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: UpperCAmelCase = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: UpperCAmelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: UpperCAmelCase = name.replace('''mlp.fc2''' , '''output.dense''' ) # final layernorm if "audio_spectrogram_transformer.norm" in name: UpperCAmelCase = name.replace('''audio_spectrogram_transformer.norm''' , '''audio_spectrogram_transformer.layernorm''' ) # classifier head if "module.mlp_head.0" in name: UpperCAmelCase = name.replace('''module.mlp_head.0''' , '''classifier.layernorm''' ) if "module.mlp_head.1" in name: UpperCAmelCase = name.replace('''module.mlp_head.1''' , '''classifier.dense''' ) return name def snake_case_ (_a : Dict , _a : List[Any] ): for key in orig_state_dict.copy().keys(): UpperCAmelCase = orig_state_dict.pop(_a ) if "qkv" in key: UpperCAmelCase = key.split('''.''' ) UpperCAmelCase = int(key_split[3] ) UpperCAmelCase = config.hidden_size if "weight" in key: UpperCAmelCase = val[:dim, :] UpperCAmelCase = val[dim : dim * 2, :] UpperCAmelCase = val[-dim:, :] else: UpperCAmelCase = val[:dim] UpperCAmelCase = val[dim : dim * 2] UpperCAmelCase = val[-dim:] else: UpperCAmelCase = val return orig_state_dict def snake_case_ (_a : Tuple ): UpperCAmelCase = [ '''module.v.head.weight''', '''module.v.head.bias''', '''module.v.head_dist.weight''', '''module.v.head_dist.bias''', ] for k in ignore_keys: state_dict.pop(_a , _a ) @torch.no_grad() def snake_case_ (_a : int , _a : Union[str, Any] , _a : Dict=False ): UpperCAmelCase = get_audio_spectrogram_transformer_config(_a ) UpperCAmelCase = { '''ast-finetuned-audioset-10-10-0.4593''': ( '''https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1''' ), '''ast-finetuned-audioset-10-10-0.450''': ( '''https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1''' ), '''ast-finetuned-audioset-10-10-0.448''': ( '''https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1''' ), '''ast-finetuned-audioset-10-10-0.448-v2''': ( '''https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1''' ), '''ast-finetuned-audioset-12-12-0.447''': ( '''https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1''' ), '''ast-finetuned-audioset-14-14-0.443''': ( '''https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1''' ), '''ast-finetuned-audioset-16-16-0.442''': ( '''https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1''' ), '''ast-finetuned-speech-commands-v2''': ( '''https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1''' ), } # load original state_dict UpperCAmelCase = model_name_to_url[model_name] UpperCAmelCase = torch.hub.load_state_dict_from_url(_a , map_location='''cpu''' ) # remove some keys remove_keys(_a ) # rename some keys UpperCAmelCase = convert_state_dict(_a , _a ) # load 🤗 model UpperCAmelCase = ASTForAudioClassification(_a ) model.eval() model.load_state_dict(_a ) # verify outputs on dummy input # source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62 UpperCAmelCase = -4.267_7393 if '''speech-commands''' not in model_name else -6.84_5978 UpperCAmelCase = 4.568_9974 if '''speech-commands''' not in model_name else 5.565_4526 UpperCAmelCase = 1_0_2_4 if '''speech-commands''' not in model_name else 1_2_8 UpperCAmelCase = ASTFeatureExtractor(mean=_a , std=_a , max_length=_a ) if "speech-commands" in model_name: UpperCAmelCase = load_dataset('''speech_commands''' , '''v0.02''' , split='''validation''' ) UpperCAmelCase = dataset[0]['''audio''']['''array'''] else: UpperCAmelCase = hf_hub_download( repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' , ) UpperCAmelCase , UpperCAmelCase = torchaudio.load(_a ) UpperCAmelCase = waveform.squeeze().numpy() UpperCAmelCase = feature_extractor(_a , sampling_rate=1_6_0_0_0 , return_tensors='''pt''' ) # forward pass UpperCAmelCase = model(**_a ) UpperCAmelCase = outputs.logits if model_name == "ast-finetuned-audioset-10-10-0.4593": UpperCAmelCase = torch.tensor([-0.8760, -7.0042, -8.6602] ) elif model_name == "ast-finetuned-audioset-10-10-0.450": UpperCAmelCase = torch.tensor([-1.1986, -7.0903, -8.2718] ) elif model_name == "ast-finetuned-audioset-10-10-0.448": UpperCAmelCase = torch.tensor([-2.6128, -8.0080, -9.4344] ) elif model_name == "ast-finetuned-audioset-10-10-0.448-v2": UpperCAmelCase = torch.tensor([-1.5080, -7.4534, -8.8917] ) elif model_name == "ast-finetuned-audioset-12-12-0.447": UpperCAmelCase = torch.tensor([-0.5050, -6.5833, -8.0843] ) elif model_name == "ast-finetuned-audioset-14-14-0.443": UpperCAmelCase = torch.tensor([-0.3826, -7.0336, -8.2413] ) elif model_name == "ast-finetuned-audioset-16-16-0.442": UpperCAmelCase = torch.tensor([-1.2113, -6.9101, -8.3470] ) elif model_name == "ast-finetuned-speech-commands-v2": UpperCAmelCase = torch.tensor([6.1589, -8.0566, -8.7984] ) else: raise ValueError('''Unknown model name''' ) if not torch.allclose(logits[0, :3] , _a , atol=1E-4 ): raise ValueError('''Logits don\'t match''' ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: Path(_a ).mkdir(exist_ok=_a ) print(F"Saving model {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(_a ) print(F"Saving feature extractor to {pytorch_dump_folder_path}" ) feature_extractor.save_pretrained(_a ) if push_to_hub: print('''Pushing model and feature extractor to the hub...''' ) model.push_to_hub(F"MIT/{model_name}" ) feature_extractor.push_to_hub(F"MIT/{model_name}" ) if __name__ == "__main__": A =argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='ast-finetuned-audioset-10-10-0.4593', type=str, help='Name of the Audio Spectrogram Transformer model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) A =parser.parse_args() convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
34
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices A: str = logging.get_logger(__name__) A: List[str] = { "microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json", } class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , UpperCAmelCase__ ): __lowerCAmelCase : List[Any] = 'focalnet' def __init__( self , _SCREAMING_SNAKE_CASE=224 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=96 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=[192, 384, 768, 768] , _SCREAMING_SNAKE_CASE=[2, 2, 6, 2] , _SCREAMING_SNAKE_CASE=[2, 2, 2, 2] , _SCREAMING_SNAKE_CASE=[3, 3, 3, 3] , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=4.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=1E-4 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1E-5 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> Dict: '''simple docstring''' super().__init__(**_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Union[str, Any] = image_size UpperCAmelCase : Union[str, Any] = patch_size UpperCAmelCase : Any = num_channels UpperCAmelCase : Optional[int] = embed_dim UpperCAmelCase : List[str] = use_conv_embed UpperCAmelCase : Dict = hidden_sizes UpperCAmelCase : Any = depths UpperCAmelCase : str = focal_levels UpperCAmelCase : Tuple = focal_windows UpperCAmelCase : Tuple = hidden_act UpperCAmelCase : Dict = mlp_ratio UpperCAmelCase : List[Any] = hidden_dropout_prob UpperCAmelCase : Dict = drop_path_rate UpperCAmelCase : Tuple = use_layerscale UpperCAmelCase : Dict = layerscale_value UpperCAmelCase : Optional[int] = use_post_layernorm UpperCAmelCase : Dict = use_post_layernorm_in_modulation UpperCAmelCase : Tuple = normalize_modulator UpperCAmelCase : List[Any] = initializer_range UpperCAmelCase : Union[str, Any] = layer_norm_eps UpperCAmelCase : List[Any] = encoder_stride UpperCAmelCase : Optional[Any] = ["""stem"""] + [F"stage{idx}" for idx in range(1 , len(self.depths ) + 1 )] UpperCAmelCase , UpperCAmelCase : Any = get_aligned_output_features_output_indices( out_features=_SCREAMING_SNAKE_CASE , out_indices=_SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
109
'''simple docstring''' from __future__ import annotations def snake_case_ (_a : Dict , _a : str , _a : Optional[Any] , _a : List[str] ): # noqa: E741 while r - l > 1: UpperCAmelCase = (l + r) // 2 if v[m] >= key: UpperCAmelCase = m else: UpperCAmelCase = m # noqa: E741 return r def snake_case_ (_a : list[int] ): if len(_a ) == 0: return 0 UpperCAmelCase = [0] * len(_a ) UpperCAmelCase = 1 UpperCAmelCase = v[0] for i in range(1 , len(_a ) ): if v[i] < tail[0]: UpperCAmelCase = v[i] elif v[i] > tail[length - 1]: UpperCAmelCase = v[i] length += 1 else: UpperCAmelCase = v[i] return length if __name__ == "__main__": import doctest doctest.testmod()
34
0
import socket def _a ( ): """simple docstring""" lowercase__ = socket.socket(socket.AF_INET , socket.SOCK_STREAM ) lowercase__ = socket.gethostname() lowercase__ = 1_23_12 sock.connect((host, port) ) sock.send(B'''Hello server!''' ) with open('''Received_file''' , '''wb''' ) as out_file: print('''File opened''' ) print('''Receiving data...''' ) while True: lowercase__ = sock.recv(10_24 ) if not data: break out_file.write(SCREAMING_SNAKE_CASE ) print('''Successfully received the file''' ) sock.close() print('''Connection closed''' ) if __name__ == "__main__": main()
110
'''simple docstring''' def snake_case_ (_a : str , _a : str ): UpperCAmelCase = len(_a ) + 1 UpperCAmelCase = len(_a ) + 1 # dp is a 2d matrix where dp[i][j] denotes whether prefix string of # length i of input_string matches with prefix string of length j of # given pattern. # "dp" stands for dynamic programming. UpperCAmelCase = [[0 for i in range(_a )] for j in range(_a )] # since string of zero length match pattern of zero length UpperCAmelCase = 1 # since pattern of zero length will never match with string of non-zero length for i in range(1 , _a ): UpperCAmelCase = 0 # since string of zero length will match with pattern where there # is at least one * alternatively for j in range(1 , _a ): UpperCAmelCase = dp[0][j - 2] if pattern[j - 1] == '''*''' else 0 # now using bottom-up approach to find for all remaining lengths for i in range(1 , _a ): for j in range(1 , _a ): if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".": UpperCAmelCase = dp[i - 1][j - 1] elif pattern[j - 1] == "*": if dp[i][j - 2] == 1: UpperCAmelCase = 1 elif pattern[j - 2] in (input_string[i - 1], "."): UpperCAmelCase = dp[i - 1][j] else: UpperCAmelCase = 0 else: UpperCAmelCase = 0 return bool(dp[-1][-1] ) if __name__ == "__main__": import doctest doctest.testmod() # inputing the strings # input_string = input("input a string :") # pattern = input("input a pattern :") A ='aab' A ='c*a*b' # using function to check whether given string matches the given pattern if match_pattern(input_string, pattern): print(f"""{input_string} matches the given pattern {pattern}""") else: print(f"""{input_string} does not match with the given pattern {pattern}""")
34
0
def lowercase_ ( _A : int , _A : int ): """simple docstring""" return 1 if input_a == input_a else 0 def lowercase_ ( ): """simple docstring""" assert xnor_gate(0 , 0 ) == 1 assert xnor_gate(0 , 1 ) == 0 assert xnor_gate(1 , 0 ) == 0 assert xnor_gate(1 , 1 ) == 1 if __name__ == "__main__": print(xnor_gate(0, 0)) print(xnor_gate(0, 1)) print(xnor_gate(1, 0)) print(xnor_gate(1, 1))
184
'''simple docstring''' import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, PerceiverTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): A ='pt' elif is_tf_available(): A ='tf' else: A ='jax' class _a ( __a , unittest.TestCase ): __a : Optional[Any] = PerceiverTokenizer __a : str = False def A ( self : Union[str, Any] ): '''simple docstring''' super().setUp() UpperCAmelCase = PerceiverTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def A ( self : Optional[int] ): '''simple docstring''' return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' ) def A ( self : Union[str, Any] , **lowercase : int ): '''simple docstring''' return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase ) def A ( self : Tuple , lowercase : str , lowercase : List[str]=False , lowercase : Union[str, Any]=20 , lowercase : Union[str, Any]=5 ): '''simple docstring''' UpperCAmelCase = [] for i in range(len(lowercase ) ): try: UpperCAmelCase = tokenizer.decode([i] , clean_up_tokenization_spaces=lowercase ) except UnicodeDecodeError: pass toks.append((i, tok) ) UpperCAmelCase = list(filter(lambda lowercase : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , lowercase ) ) UpperCAmelCase = list(filter(lambda lowercase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowercase ) , lowercase ) ) if max_length is not None and len(lowercase ) > max_length: UpperCAmelCase = toks[:max_length] if min_length is not None and len(lowercase ) < min_length and len(lowercase ) > 0: while len(lowercase ) < min_length: UpperCAmelCase = toks + toks # toks_str = [t[1] for t in toks] UpperCAmelCase = [t[0] for t in toks] # Ensure consistency UpperCAmelCase = tokenizer.decode(lowercase , clean_up_tokenization_spaces=lowercase ) if " " not in output_txt and len(lowercase ) > 1: UpperCAmelCase = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowercase ) + ''' ''' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowercase ) ) if with_prefix_space: UpperCAmelCase = ''' ''' + output_txt UpperCAmelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase ) return output_txt, output_ids def A ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase = self.perceiver_tokenizer UpperCAmelCase = '''Unicode €.''' UpperCAmelCase = tokenizer(lowercase ) UpperCAmelCase = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5] self.assertEqual(encoded['''input_ids'''] , lowercase ) # decoding UpperCAmelCase = tokenizer.decode(lowercase ) self.assertEqual(lowercase , '''[CLS]Unicode €.[SEP]''' ) UpperCAmelCase = tokenizer('''e è é ê ë''' ) UpperCAmelCase = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5] self.assertEqual(encoded['''input_ids'''] , lowercase ) # decoding UpperCAmelCase = tokenizer.decode(lowercase ) self.assertEqual(lowercase , '''[CLS]e è é ê ë[SEP]''' ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' ) def A ( self : str ): '''simple docstring''' UpperCAmelCase = self.perceiver_tokenizer UpperCAmelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] # fmt: off UpperCAmelCase = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0] # fmt: on UpperCAmelCase = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase ) self.assertIsInstance(lowercase , lowercase ) if FRAMEWORK != "jax": UpperCAmelCase = list(batch.input_ids.numpy()[0] ) else: UpperCAmelCase = list(batch.input_ids.tolist()[0] ) self.assertListEqual(lowercase , lowercase ) self.assertEqual((2, 38) , batch.input_ids.shape ) self.assertEqual((2, 38) , batch.attention_mask.shape ) def A ( self : str ): '''simple docstring''' UpperCAmelCase = self.perceiver_tokenizer UpperCAmelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] UpperCAmelCase = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase ) # check if input_ids are returned and no decoder_input_ids self.assertIn('''input_ids''' , lowercase ) self.assertIn('''attention_mask''' , lowercase ) self.assertNotIn('''decoder_input_ids''' , lowercase ) self.assertNotIn('''decoder_attention_mask''' , lowercase ) def A ( self : Dict ): '''simple docstring''' UpperCAmelCase = self.perceiver_tokenizer UpperCAmelCase = [ '''Summary of the text.''', '''Another summary.''', ] UpperCAmelCase = tokenizer( text_target=lowercase , max_length=32 , padding='''max_length''' , truncation=lowercase , return_tensors=lowercase ) self.assertEqual(32 , targets['''input_ids'''].shape[1] ) def A ( self : int ): '''simple docstring''' UpperCAmelCase = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test UpperCAmelCase = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): # Isolate this from the other tests because we save additional tokens/etc UpperCAmelCase = tempfile.mkdtemp() UpperCAmelCase = ''' He is very happy, UNwant\u00E9d,running''' UpperCAmelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase ) tokenizer.save_pretrained(lowercase ) UpperCAmelCase = tokenizer.__class__.from_pretrained(lowercase ) UpperCAmelCase = after_tokenizer.encode(lowercase , add_special_tokens=lowercase ) self.assertListEqual(lowercase , lowercase ) shutil.rmtree(lowercase ) UpperCAmelCase = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): # Isolate this from the other tests because we save additional tokens/etc UpperCAmelCase = tempfile.mkdtemp() UpperCAmelCase = ''' He is very happy, UNwant\u00E9d,running''' tokenizer.add_tokens(['''bim''', '''bambam'''] ) UpperCAmelCase = tokenizer.additional_special_tokens additional_special_tokens.append('''new_additional_special_token''' ) tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} ) UpperCAmelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase ) tokenizer.save_pretrained(lowercase ) UpperCAmelCase = tokenizer.__class__.from_pretrained(lowercase ) UpperCAmelCase = after_tokenizer.encode(lowercase , add_special_tokens=lowercase ) self.assertListEqual(lowercase , lowercase ) self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) UpperCAmelCase = tokenizer.__class__.from_pretrained(lowercase , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(lowercase ) def A ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(lowercase ) with open(os.path.join(lowercase , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file: UpperCAmelCase = json.load(lowercase ) with open(os.path.join(lowercase , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file: UpperCAmelCase = json.load(lowercase ) UpperCAmelCase = [f"<extra_id_{i}>" for i in range(125 )] UpperCAmelCase = added_tokens_extra_ids + [ '''an_additional_special_token''' ] UpperCAmelCase = added_tokens_extra_ids + [ '''an_additional_special_token''' ] with open(os.path.join(lowercase , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile: json.dump(lowercase , lowercase ) with open(os.path.join(lowercase , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile: json.dump(lowercase , lowercase ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files UpperCAmelCase = tokenizer_class.from_pretrained( lowercase , ) self.assertIn( '''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens ) self.assertEqual( ['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained UpperCAmelCase = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=lowercase )] UpperCAmelCase = tokenizer_class.from_pretrained( lowercase , additional_special_tokens=lowercase , ) self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens ) self.assertEqual( ['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , ) def A ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase = self.perceiver_tokenizer self.assertEqual(tokenizer.decode([178] ) , '''�''' ) def A ( self : Union[str, Any] ): '''simple docstring''' pass def A ( self : Any ): '''simple docstring''' pass def A ( self : Dict ): '''simple docstring''' pass def A ( self : str ): '''simple docstring''' pass def A ( self : List[str] ): '''simple docstring''' UpperCAmelCase = self.get_tokenizers(fast=lowercase , do_lower_case=lowercase ) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): UpperCAmelCase = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]'''] UpperCAmelCase = tokenizer.convert_tokens_to_string(lowercase ) self.assertIsInstance(lowercase , lowercase )
34
0
import re from ..utils import cached_file # docstyle-ignore _a = '''\nHuman: <<task>>\n\nAssistant: ''' _a = '''huggingface-tools/default-prompts''' _a = {'''chat''': '''chat_prompt_template.txt''', '''run''': '''run_prompt_template.txt'''} def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase="run" )-> Any: """simple docstring""" if prompt_or_repo_id is None: _UpperCAmelCase = DEFAULT_PROMPTS_REPO # prompt is considered a repo ID when it does not contain any kind of space if re.search('\\s' , _a ) is not None: return prompt_or_repo_id _UpperCAmelCase = cached_file( _a , PROMPT_FILES[mode] , repo_type='dataset' , user_agent={'agent': agent_name} ) with open(_a , 'r' , encoding='utf-8' ) as f: return f.read()
39
'''simple docstring''' import os from distutils.util import strtobool def snake_case_ (_a : Union[str, Any] , _a : List[Any] ): for e in env_keys: UpperCAmelCase = int(os.environ.get(_a , -1 ) ) if val >= 0: return val return default def snake_case_ (_a : Dict , _a : Any=False ): UpperCAmelCase = os.environ.get(_a , str(_a ) ) return strtobool(_a ) == 1 # As its name indicates `strtobool` actually returns an int... def snake_case_ (_a : str , _a : Optional[Any]="no" ): UpperCAmelCase = os.environ.get(_a , str(_a ) ) return value
34
0
import argparse import gc import json import os import re import torch from huggingface_hub import hf_hub_download from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint _snake_case = { "169M": 12, "430M": 24, "1B5": 24, "3B": 32, "7B": 32, "14B": 40, } _snake_case = { "169M": 768, "430M": 1024, "1B5": 2048, "3B": 2560, "7B": 4096, "14B": 5120, } def lowerCAmelCase_ ( snake_case_ ): _A : Optional[Any] = list(state_dict.keys() ) for name in state_dict_keys: _A : Optional[int] = state_dict.pop(_a ) # emb -> embedding if name.startswith("""emb.""" ): _A : List[str] = name.replace("""emb.""","""embeddings.""" ) # ln_0 -> pre_ln (only present at block 0) if name.startswith("""blocks.0.ln0""" ): _A : List[str] = name.replace("""blocks.0.ln0""","""blocks.0.pre_ln""" ) # att -> attention _A : Dict = re.sub(r"""blocks\.(\d+)\.att""",r"""blocks.\1.attention""",_a ) # ffn -> feed_forward _A : List[Any] = re.sub(r"""blocks\.(\d+)\.ffn""",r"""blocks.\1.feed_forward""",_a ) # time_mix_k -> time_mix_key and reshape if name.endswith(""".time_mix_k""" ): _A : List[str] = name.replace(""".time_mix_k""",""".time_mix_key""" ) # time_mix_v -> time_mix_value and reshape if name.endswith(""".time_mix_v""" ): _A : Optional[Any] = name.replace(""".time_mix_v""",""".time_mix_value""" ) # time_mix_r -> time_mix_key and reshape if name.endswith(""".time_mix_r""" ): _A : Union[str, Any] = name.replace(""".time_mix_r""",""".time_mix_receptance""" ) if name != "head.weight": _A : Optional[int] = """rwkv.""" + name _A : Dict = weight return state_dict def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_=None,snake_case_=None,snake_case_=False,snake_case_=None ): # 1. If possible, build the tokenizer. if tokenizer_file is None: print("""No `--tokenizer_file` provided, we will use the default tokenizer.""" ) _A : Dict = 50277 _A : str = AutoTokenizer.from_pretrained("""EleutherAI/gpt-neox-20b""" ) else: _A : int = PreTrainedTokenizerFast(tokenizer_file=_a ) _A : int = len(_a ) tokenizer.save_pretrained(_a ) # 2. Build the config _A : Any = list(NUM_HIDDEN_LAYERS_MAPPING.keys() ) if size is None: # Try to infer size from the checkpoint name for candidate in possible_sizes: if candidate in checkpoint_file: _A : Dict = candidate break if size is None: raise ValueError("""Could not infer the size, please provide it with the `--size` argument.""" ) if size not in possible_sizes: raise ValueError(f'''`size` should be one of {possible_sizes}, got {size}.''' ) _A : Optional[int] = RwkvConfig( vocab_size=_a,num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size],hidden_size=HIDEN_SIZE_MAPPING[size],) config.save_pretrained(_a ) # 3. Download model file then convert state_dict _A : Any = hf_hub_download(_a,_a ) _A : Tuple = torch.load(_a,map_location="""cpu""" ) _A : Tuple = convert_state_dict(_a ) # 4. Split in shards and save _A , _A : Union[str, Any] = shard_checkpoint(_a ) for shard_file, shard in shards.items(): torch.save(_a,os.path.join(_a,_a ) ) if index is not None: _A : Tuple = os.path.join(_a,_a ) # Save the index as well with open(_a,"""w""",encoding="""utf-8""" ) as f: _A : Dict = json.dumps(_a,indent=2,sort_keys=_a ) + """\n""" f.write(_a ) # 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict print( """Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.""" ) _A : Any = list(shards.keys() ) del state_dict del shards gc.collect() for shard_file in shard_files: _A : List[Any] = torch.load(os.path.join(_a,_a ) ) torch.save({k: v.cpu().clone() for k, v in state_dict.items()},os.path.join(_a,_a ) ) del state_dict gc.collect() if push_to_hub: if model_name is None: raise ValueError("""Please provide a `model_name` to push the model to the Hub.""" ) _A : List[Any] = AutoModelForCausalLM.from_pretrained(_a ) model.push_to_hub(_a,max_shard_size="""2GB""" ) tokenizer.push_to_hub(_a ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( "--repo_id", default=None, type=str, required=True, help="Repo ID from which to pull the checkpoint." ) parser.add_argument( "--checkpoint_file", default=None, type=str, required=True, help="Name of the checkpoint file in the repo." ) parser.add_argument( "--output_dir", default=None, type=str, required=True, help="Where to save the converted model." ) parser.add_argument( "--tokenizer_file", default=None, type=str, help="Path to the tokenizer file to use (if not provided, only the model is converted).", ) parser.add_argument( "--size", default=None, type=str, help="Size of the model. Will be inferred from the `checkpoint_file` if not passed.", ) parser.add_argument( "--push_to_hub", action="store_true", help="Push to the Hub the converted model.", ) parser.add_argument( "--model_name", default=None, type=str, help="Name of the pushed model on the Hub, including the username / organization.", ) _snake_case = parser.parse_args() convert_rmkv_checkpoint_to_hf_format( args.repo_id, args.checkpoint_file, args.output_dir, size=args.size, tokenizer_file=args.tokenizer_file, push_to_hub=args.push_to_hub, model_name=args.model_name, )
26
'''simple docstring''' import argparse import logging from collections import namedtuple import torch from model_bertabs import BertAbsSummarizer from models.model_builder import AbsSummarizer # The authors' implementation from transformers import BertTokenizer logging.basicConfig(level=logging.INFO) A =logging.getLogger(__name__) A ='Hello world! cécé herlolip' A =namedtuple( 'BertAbsConfig', [ 'temp_dir', 'large', 'use_bert_emb', 'finetune_bert', 'encoder', 'share_emb', 'max_pos', 'enc_layers', 'enc_hidden_size', 'enc_heads', 'enc_ff_size', 'enc_dropout', 'dec_layers', 'dec_hidden_size', 'dec_heads', 'dec_ff_size', 'dec_dropout', ], ) def snake_case_ (_a : List[Any] , _a : Any ): UpperCAmelCase = BertAbsConfig( temp_dir='''.''' , finetune_bert=_a , large=_a , share_emb=_a , use_bert_emb=_a , encoder='''bert''' , max_pos=5_1_2 , enc_layers=6 , enc_hidden_size=5_1_2 , enc_heads=8 , enc_ff_size=5_1_2 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_6_8 , dec_heads=8 , dec_ff_size=2_0_4_8 , dec_dropout=0.2 , ) UpperCAmelCase = torch.load(_a , lambda _a , _a : storage ) UpperCAmelCase = AbsSummarizer(_a , torch.device('''cpu''' ) , _a ) original.eval() UpperCAmelCase = BertAbsSummarizer(_a , torch.device('''cpu''' ) ) new_model.eval() # ------------------- # Convert the weights # ------------------- logging.info('''convert the model''' ) new_model.bert.load_state_dict(original.bert.state_dict() ) new_model.decoder.load_state_dict(original.decoder.state_dict() ) new_model.generator.load_state_dict(original.generator.state_dict() ) # ---------------------------------- # Make sure the outpus are identical # ---------------------------------- logging.info('''Make sure that the models\' outputs are identical''' ) UpperCAmelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' ) # prepare the model inputs UpperCAmelCase = tokenizer.encode('''This is sample éàalj\'-.''' ) encoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(_a )) ) UpperCAmelCase = torch.tensor(_a ).unsqueeze(0 ) UpperCAmelCase = tokenizer.encode('''This is sample 3 éàalj\'-.''' ) decoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(_a )) ) UpperCAmelCase = torch.tensor(_a ).unsqueeze(0 ) # failsafe to make sure the weights reset does not affect the # loaded weights. assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0 # forward pass UpperCAmelCase = encoder_input_ids UpperCAmelCase = decoder_input_ids UpperCAmelCase = UpperCAmelCase = None UpperCAmelCase = None UpperCAmelCase = UpperCAmelCase = None UpperCAmelCase = UpperCAmelCase = None UpperCAmelCase = None # The original model does not apply the geneator layer immediatly but rather in # the beam search (where it combines softmax + linear layer). Since we already # apply the softmax in our generation process we only apply the linear layer here. # We make sure that the outputs of the full stack are identical UpperCAmelCase = original(_a , _a , _a , _a , _a , _a , _a )[0] UpperCAmelCase = original.generator(_a ) UpperCAmelCase = new_model( _a , _a , _a , _a , _a )[0] UpperCAmelCase = new_model.generator(_a ) UpperCAmelCase = torch.max(torch.abs(output_converted_model - output_original_model ) ).item() print('''Maximum absolute difference beween weights: {:.2f}'''.format(_a ) ) UpperCAmelCase = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item() print('''Maximum absolute difference beween weights: {:.2f}'''.format(_a ) ) UpperCAmelCase = torch.allclose(_a , _a , atol=1E-3 ) if are_identical: logging.info('''all weights are equal up to 1e-3''' ) else: raise ValueError('''the weights are different. The new model is likely different from the original one.''' ) # The model has been saved with torch.save(model) and this is bound to the exact # directory structure. We save the state_dict instead. logging.info('''saving the model\'s state dictionary''' ) torch.save( new_model.state_dict() , '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''' ) if __name__ == "__main__": A =argparse.ArgumentParser() parser.add_argument( '--bertabs_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.', ) A =parser.parse_args() convert_bertabs_checkpoints( args.bertabs_checkpoint_path, args.pytorch_dump_folder_path, )
34
0
'''simple docstring''' from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo lowerCAmelCase_ : List[str] = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n' lowerCAmelCase_ : Union[str, Any] = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n' lowerCAmelCase_ : Optional[int] = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __SCREAMING_SNAKE_CASE (datasets.Metric ): """simple docstring""" def UpperCamelCase__ ( self : Optional[Any] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ), "references": datasets.Sequence( datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ), } ) , ) def UpperCamelCase__ ( self : List[Any] , __a : List[List[List[str]]] , __a : List[List[str]] , __a : int = 1 , __a : int = 4 , ): return { "google_bleu": gleu_score.corpus_gleu( list_of_references=__a , hypotheses=__a , min_len=__a , max_len=__a ) }
63
'''simple docstring''' from ..utils import DummyObject, requires_backends class _a ( metaclass=__a ): __a : int = ["""flax""", """transformers"""] def __init__( self : Optional[Any] , *lowercase : str , **lowercase : List[Any] ): '''simple docstring''' requires_backends(self , ['''flax''', '''transformers'''] ) @classmethod def A ( cls : Union[str, Any] , *lowercase : List[Any] , **lowercase : List[str] ): '''simple docstring''' requires_backends(cls , ['''flax''', '''transformers'''] ) @classmethod def A ( cls : Union[str, Any] , *lowercase : Tuple , **lowercase : int ): '''simple docstring''' requires_backends(cls , ['''flax''', '''transformers'''] ) class _a ( metaclass=__a ): __a : int = ["""flax""", """transformers"""] def __init__( self : int , *lowercase : Tuple , **lowercase : Dict ): '''simple docstring''' requires_backends(self , ['''flax''', '''transformers'''] ) @classmethod def A ( cls : List[str] , *lowercase : Optional[int] , **lowercase : List[Any] ): '''simple docstring''' requires_backends(cls , ['''flax''', '''transformers'''] ) @classmethod def A ( cls : Dict , *lowercase : Union[str, Any] , **lowercase : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ['''flax''', '''transformers'''] ) class _a ( metaclass=__a ): __a : int = ["""flax""", """transformers"""] def __init__( self : Optional[int] , *lowercase : Union[str, Any] , **lowercase : Any ): '''simple docstring''' requires_backends(self , ['''flax''', '''transformers'''] ) @classmethod def A ( cls : Union[str, Any] , *lowercase : Tuple , **lowercase : Any ): '''simple docstring''' requires_backends(cls , ['''flax''', '''transformers'''] ) @classmethod def A ( cls : Any , *lowercase : Dict , **lowercase : Optional[Any] ): '''simple docstring''' requires_backends(cls , ['''flax''', '''transformers'''] ) class _a ( metaclass=__a ): __a : Any = ["""flax""", """transformers"""] def __init__( self : Any , *lowercase : Optional[Any] , **lowercase : Optional[int] ): '''simple docstring''' requires_backends(self , ['''flax''', '''transformers'''] ) @classmethod def A ( cls : Dict , *lowercase : Optional[Any] , **lowercase : Optional[Any] ): '''simple docstring''' requires_backends(cls , ['''flax''', '''transformers'''] ) @classmethod def A ( cls : Union[str, Any] , *lowercase : str , **lowercase : Any ): '''simple docstring''' requires_backends(cls , ['''flax''', '''transformers'''] )
34
0
"""simple docstring""" def A_ ( _lowercase = 100 ): '''simple docstring''' snake_case_ :Optional[Any] = set() snake_case_ :List[Any] = 0 snake_case_ :Tuple = n + 1 # maximum limit for a in range(2, _a ): for b in range(2, _a ): snake_case_ :Tuple = a**b # calculates the current power collect_powers.add(_a ) # adds the result to the set return len(_a ) if __name__ == "__main__": print("Number of terms ", solution(int(str(input()).strip())))
66
'''simple docstring''' import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() A =logging.get_logger(__name__) A ={ 'b0': efficientnet.EfficientNetBa, 'b1': efficientnet.EfficientNetBa, 'b2': efficientnet.EfficientNetBa, 'b3': efficientnet.EfficientNetBa, 'b4': efficientnet.EfficientNetBa, 'b5': efficientnet.EfficientNetBa, 'b6': efficientnet.EfficientNetBa, 'b7': efficientnet.EfficientNetBa, } A ={ 'b0': { 'hidden_dim': 12_80, 'width_coef': 1.0, 'depth_coef': 1.0, 'image_size': 2_24, 'dropout_rate': 0.2, 'dw_padding': [], }, 'b1': { 'hidden_dim': 12_80, 'width_coef': 1.0, 'depth_coef': 1.1, 'image_size': 2_40, 'dropout_rate': 0.2, 'dw_padding': [16], }, 'b2': { 'hidden_dim': 14_08, 'width_coef': 1.1, 'depth_coef': 1.2, 'image_size': 2_60, 'dropout_rate': 0.3, 'dw_padding': [5, 8, 16], }, 'b3': { 'hidden_dim': 15_36, 'width_coef': 1.2, 'depth_coef': 1.4, 'image_size': 3_00, 'dropout_rate': 0.3, 'dw_padding': [5, 18], }, 'b4': { 'hidden_dim': 17_92, 'width_coef': 1.4, 'depth_coef': 1.8, 'image_size': 3_80, 'dropout_rate': 0.4, 'dw_padding': [6], }, 'b5': { 'hidden_dim': 20_48, 'width_coef': 1.6, 'depth_coef': 2.2, 'image_size': 4_56, 'dropout_rate': 0.4, 'dw_padding': [13, 27], }, 'b6': { 'hidden_dim': 23_04, 'width_coef': 1.8, 'depth_coef': 2.6, 'image_size': 5_28, 'dropout_rate': 0.5, 'dw_padding': [31], }, 'b7': { 'hidden_dim': 25_60, 'width_coef': 2.0, 'depth_coef': 3.1, 'image_size': 6_00, 'dropout_rate': 0.5, 'dw_padding': [18], }, } def snake_case_ (_a : List[str] ): UpperCAmelCase = EfficientNetConfig() UpperCAmelCase = CONFIG_MAP[model_name]['''hidden_dim'''] UpperCAmelCase = CONFIG_MAP[model_name]['''width_coef'''] UpperCAmelCase = CONFIG_MAP[model_name]['''depth_coef'''] UpperCAmelCase = CONFIG_MAP[model_name]['''image_size'''] UpperCAmelCase = CONFIG_MAP[model_name]['''dropout_rate'''] UpperCAmelCase = CONFIG_MAP[model_name]['''dw_padding'''] UpperCAmelCase = '''huggingface/label-files''' UpperCAmelCase = '''imagenet-1k-id2label.json''' UpperCAmelCase = 1_0_0_0 UpperCAmelCase = json.load(open(hf_hub_download(_a , _a , repo_type='''dataset''' ) , '''r''' ) ) UpperCAmelCase = {int(_a ): v for k, v in idalabel.items()} UpperCAmelCase = idalabel UpperCAmelCase = {v: k for k, v in idalabel.items()} return config def snake_case_ (): UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg''' UpperCAmelCase = Image.open(requests.get(_a , stream=_a ).raw ) return im def snake_case_ (_a : str ): UpperCAmelCase = CONFIG_MAP[model_name]['''image_size'''] UpperCAmelCase = EfficientNetImageProcessor( size={'''height''': size, '''width''': size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.4785_3944, 0.473_2864, 0.4743_4163] , do_center_crop=_a , ) return preprocessor def snake_case_ (_a : Optional[Any] ): UpperCAmelCase = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )] UpperCAmelCase = sorted(set(_a ) ) UpperCAmelCase = len(_a ) UpperCAmelCase = {b: str(_a ) for b, i in zip(_a , range(_a ) )} UpperCAmelCase = [] rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') ) rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') ) rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') ) rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') ) rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') ) for b in block_names: UpperCAmelCase = block_name_mapping[b] rename_keys.append((F"block{b}_expand_conv/kernel:0", F"encoder.blocks.{hf_b}.expansion.expand_conv.weight") ) rename_keys.append((F"block{b}_expand_bn/gamma:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.weight") ) rename_keys.append((F"block{b}_expand_bn/beta:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.bias") ) rename_keys.append( (F"block{b}_expand_bn/moving_mean:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.running_mean") ) rename_keys.append( (F"block{b}_expand_bn/moving_variance:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.running_var") ) rename_keys.append( (F"block{b}_dwconv/depthwise_kernel:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight") ) rename_keys.append((F"block{b}_bn/gamma:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight") ) rename_keys.append((F"block{b}_bn/beta:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias") ) rename_keys.append( (F"block{b}_bn/moving_mean:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean") ) rename_keys.append( (F"block{b}_bn/moving_variance:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var") ) rename_keys.append((F"block{b}_se_reduce/kernel:0", F"encoder.blocks.{hf_b}.squeeze_excite.reduce.weight") ) rename_keys.append((F"block{b}_se_reduce/bias:0", F"encoder.blocks.{hf_b}.squeeze_excite.reduce.bias") ) rename_keys.append((F"block{b}_se_expand/kernel:0", F"encoder.blocks.{hf_b}.squeeze_excite.expand.weight") ) rename_keys.append((F"block{b}_se_expand/bias:0", F"encoder.blocks.{hf_b}.squeeze_excite.expand.bias") ) rename_keys.append( (F"block{b}_project_conv/kernel:0", F"encoder.blocks.{hf_b}.projection.project_conv.weight") ) rename_keys.append((F"block{b}_project_bn/gamma:0", F"encoder.blocks.{hf_b}.projection.project_bn.weight") ) rename_keys.append((F"block{b}_project_bn/beta:0", F"encoder.blocks.{hf_b}.projection.project_bn.bias") ) rename_keys.append( (F"block{b}_project_bn/moving_mean:0", F"encoder.blocks.{hf_b}.projection.project_bn.running_mean") ) rename_keys.append( (F"block{b}_project_bn/moving_variance:0", F"encoder.blocks.{hf_b}.projection.project_bn.running_var") ) rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') ) rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') ) rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') ) rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') ) rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') ) UpperCAmelCase = {} for item in rename_keys: if item[0] in original_param_names: UpperCAmelCase = '''efficientnet.''' + item[1] UpperCAmelCase = '''classifier.weight''' UpperCAmelCase = '''classifier.bias''' return key_mapping def snake_case_ (_a : Dict , _a : List[str] , _a : Dict ): for key, value in tf_params.items(): if "normalization" in key: continue UpperCAmelCase = key_mapping[key] if "_conv" in key and "kernel" in key: UpperCAmelCase = torch.from_numpy(_a ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: UpperCAmelCase = torch.from_numpy(_a ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: UpperCAmelCase = torch.from_numpy(np.transpose(_a ) ) else: UpperCAmelCase = torch.from_numpy(_a ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(_a ) @torch.no_grad() def snake_case_ (_a : Optional[Any] , _a : List[str] , _a : Optional[int] , _a : Dict ): UpperCAmelCase = model_classes[model_name]( include_top=_a , weights='''imagenet''' , input_tensor=_a , input_shape=_a , pooling=_a , classes=1_0_0_0 , classifier_activation='''softmax''' , ) UpperCAmelCase = original_model.trainable_variables UpperCAmelCase = original_model.non_trainable_variables UpperCAmelCase = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: UpperCAmelCase = param.numpy() UpperCAmelCase = list(tf_params.keys() ) # Load HuggingFace model UpperCAmelCase = get_efficientnet_config(_a ) UpperCAmelCase = EfficientNetForImageClassification(_a ).eval() UpperCAmelCase = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print('''Converting parameters...''' ) UpperCAmelCase = rename_keys(_a ) replace_params(_a , _a , _a ) # Initialize preprocessor and preprocess input image UpperCAmelCase = convert_image_processor(_a ) UpperCAmelCase = preprocessor(images=prepare_img() , return_tensors='''pt''' ) # HF model inference hf_model.eval() with torch.no_grad(): UpperCAmelCase = hf_model(**_a ) UpperCAmelCase = outputs.logits.detach().numpy() # Original model inference UpperCAmelCase = False UpperCAmelCase = CONFIG_MAP[model_name]['''image_size'''] UpperCAmelCase = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) UpperCAmelCase = image.img_to_array(_a ) UpperCAmelCase = np.expand_dims(_a , axis=0 ) UpperCAmelCase = original_model.predict(_a ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(_a , _a , atol=1E-3 ), "The predicted logits are not the same." print('''Model outputs match!''' ) if save_model: # Create folder to save model if not os.path.isdir(_a ): os.mkdir(_a ) # Save converted model and image processor hf_model.save_pretrained(_a ) preprocessor.save_pretrained(_a ) if push_to_hub: # Push model and image processor to hub print(F"Pushing converted {model_name} to the hub..." ) UpperCAmelCase = F"efficientnet-{model_name}" preprocessor.push_to_hub(_a ) hf_model.push_to_hub(_a ) if __name__ == "__main__": A =argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='b0', type=str, help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].', ) parser.add_argument( '--pytorch_dump_folder_path', default='hf_model', type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument('--save_model', action='store_true', help='Save model to local') parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub') A =parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
34
0
import warnings from ...utils import logging from .image_processing_beit import BeitImageProcessor __lowerCamelCase : int = logging.get_logger(__name__) class A__ ( __a ): def __init__( self , *A_ , **A_ ): '''simple docstring''' warnings.warn( "The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use BeitImageProcessor instead." , A_ , ) super().__init__(*A_ , **A_ )
52
'''simple docstring''' from datetime import datetime import requests from bsa import BeautifulSoup if __name__ == "__main__": A =input('Enter image url: ').strip() print(f"""Downloading image from {url} ...""") A =BeautifulSoup(requests.get(url).content, 'html.parser') # The image URL is in the content field of the first meta tag with property og:image A =soup.find('meta', {'property': 'og:image'})['content'] A =requests.get(image_url).content A =f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg""" with open(file_name, 'wb') as fp: fp.write(image_data) print(f"""Done. Image saved to disk as {file_name}.""")
34
0
"""simple docstring""" import os # Precomputes a list of the 100 first triangular numbers _UpperCAmelCase = [int(0.5 * n * (n + 1)) for n in range(1, 1_0_1)] def __magic_name__ ( ): SCREAMING_SNAKE_CASE_: int =os.path.dirname(os.path.realpath(_a ) ) SCREAMING_SNAKE_CASE_: int =os.path.join(_a , """words.txt""" ) SCREAMING_SNAKE_CASE_: List[Any] ="""""" with open(_a ) as f: SCREAMING_SNAKE_CASE_: Optional[Any] =f.readline() SCREAMING_SNAKE_CASE_: str =[word.strip("""\"""" ) for word in words.strip("""\r\n""" ).split(""",""" )] SCREAMING_SNAKE_CASE_: List[Any] =[ word for word in [sum(ord(_a ) - 64 for x in word ) for word in words] if word in TRIANGULAR_NUMBERS ] return len(_a ) if __name__ == "__main__": print(solution())
173
'''simple docstring''' from math import acos, sin from typing import List, Tuple, Union import numpy as np import torch from PIL import Image from ...models import AutoencoderKL, UNetaDConditionModel from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel class _a ( __a ): __a : str = ["""vqvae"""] def __init__( self : str , lowercase : AutoencoderKL , lowercase : UNetaDConditionModel , lowercase : Mel , lowercase : Union[DDIMScheduler, DDPMScheduler] , ): '''simple docstring''' super().__init__() self.register_modules(unet=lowercase , scheduler=lowercase , mel=lowercase , vqvae=lowercase ) def A ( self : Optional[Any] ): '''simple docstring''' return 50 if isinstance(self.scheduler , lowercase ) else 1_000 @torch.no_grad() def __call__( self : Optional[Any] , lowercase : int = 1 , lowercase : str = None , lowercase : np.ndarray = None , lowercase : int = 0 , lowercase : int = 0 , lowercase : int = None , lowercase : torch.Generator = None , lowercase : float = 0 , lowercase : float = 0 , lowercase : torch.Generator = None , lowercase : float = 0 , lowercase : torch.Tensor = None , lowercase : torch.Tensor = None , lowercase : Tuple=True , ): '''simple docstring''' UpperCAmelCase = steps or self.get_default_steps() self.scheduler.set_timesteps(lowercase ) UpperCAmelCase = step_generator or generator # For backwards compatibility if type(self.unet.config.sample_size ) == int: UpperCAmelCase = (self.unet.config.sample_size, self.unet.config.sample_size) if noise is None: UpperCAmelCase = randn_tensor( ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size[0], self.unet.config.sample_size[1], ) , generator=lowercase , device=self.device , ) UpperCAmelCase = noise UpperCAmelCase = None if audio_file is not None or raw_audio is not None: self.mel.load_audio(lowercase , lowercase ) UpperCAmelCase = self.mel.audio_slice_to_image(lowercase ) UpperCAmelCase = np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape( (input_image.height, input_image.width) ) UpperCAmelCase = (input_image / 255) * 2 - 1 UpperCAmelCase = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device ) if self.vqvae is not None: UpperCAmelCase = self.vqvae.encode(torch.unsqueeze(lowercase , 0 ) ).latent_dist.sample( generator=lowercase )[0] UpperCAmelCase = self.vqvae.config.scaling_factor * input_images if start_step > 0: UpperCAmelCase = self.scheduler.add_noise(lowercase , lowercase , self.scheduler.timesteps[start_step - 1] ) UpperCAmelCase = ( self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length ) UpperCAmelCase = int(mask_start_secs * pixels_per_second ) UpperCAmelCase = int(mask_end_secs * pixels_per_second ) UpperCAmelCase = self.scheduler.add_noise(lowercase , lowercase , torch.tensor(self.scheduler.timesteps[start_step:] ) ) for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ): if isinstance(self.unet , lowercase ): UpperCAmelCase = self.unet(lowercase , lowercase , lowercase )['''sample'''] else: UpperCAmelCase = self.unet(lowercase , lowercase )['''sample'''] if isinstance(self.scheduler , lowercase ): UpperCAmelCase = self.scheduler.step( model_output=lowercase , timestep=lowercase , sample=lowercase , eta=lowercase , generator=lowercase , )['''prev_sample'''] else: UpperCAmelCase = self.scheduler.step( model_output=lowercase , timestep=lowercase , sample=lowercase , generator=lowercase , )['''prev_sample'''] if mask is not None: if mask_start > 0: UpperCAmelCase = mask[:, step, :, :mask_start] if mask_end > 0: UpperCAmelCase = mask[:, step, :, -mask_end:] if self.vqvae is not None: # 0.18215 was scaling factor used in training to ensure unit variance UpperCAmelCase = 1 / self.vqvae.config.scaling_factor * images UpperCAmelCase = self.vqvae.decode(lowercase )['''sample'''] UpperCAmelCase = (images / 2 + 0.5).clamp(0 , 1 ) UpperCAmelCase = images.cpu().permute(0 , 2 , 3 , 1 ).numpy() UpperCAmelCase = (images * 255).round().astype('''uint8''' ) UpperCAmelCase = list( (Image.fromarray(_[:, :, 0] ) for _ in images) if images.shape[3] == 1 else (Image.fromarray(lowercase , mode='''RGB''' ).convert('''L''' ) for _ in images) ) UpperCAmelCase = [self.mel.image_to_audio(lowercase ) for _ in images] if not return_dict: return images, (self.mel.get_sample_rate(), audios) return BaseOutput(**AudioPipelineOutput(np.array(lowercase )[:, np.newaxis, :] ) , **ImagePipelineOutput(lowercase ) ) @torch.no_grad() def A ( self : Dict , lowercase : List[Image.Image] , lowercase : int = 50 ): '''simple docstring''' assert isinstance(self.scheduler , lowercase ) self.scheduler.set_timesteps(lowercase ) UpperCAmelCase = np.array( [np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] ) UpperCAmelCase = (sample / 255) * 2 - 1 UpperCAmelCase = torch.Tensor(lowercase ).to(self.device ) for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ): UpperCAmelCase = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps UpperCAmelCase = self.scheduler.alphas_cumprod[t] UpperCAmelCase = ( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) UpperCAmelCase = 1 - alpha_prod_t UpperCAmelCase = self.unet(lowercase , lowercase )['''sample'''] UpperCAmelCase = (1 - alpha_prod_t_prev) ** 0.5 * model_output UpperCAmelCase = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) UpperCAmelCase = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output return sample @staticmethod def A ( lowercase : torch.Tensor , lowercase : torch.Tensor , lowercase : float ): '''simple docstring''' UpperCAmelCase = acos(torch.dot(torch.flatten(lowercase ) , torch.flatten(lowercase ) ) / torch.norm(lowercase ) / torch.norm(lowercase ) ) return sin((1 - alpha) * theta ) * xa / sin(lowercase ) + sin(alpha * theta ) * xa / sin(lowercase )
34
0
"""simple docstring""" import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class UpperCamelCase ( __a ): SCREAMING_SNAKE_CASE_ = ["""image_processor""", """tokenizer"""] SCREAMING_SNAKE_CASE_ = """CLIPImageProcessor""" SCREAMING_SNAKE_CASE_ = ("""CLIPTokenizer""", """CLIPTokenizerFast""") def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, **lowerCAmelCase__) -> Union[str, Any]: snake_case_ = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.', lowerCAmelCase__, ) snake_case_ = kwargs.pop('feature_extractor') snake_case_ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.') if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.') super().__init__(lowerCAmelCase__, lowerCAmelCase__) def __call__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=None, **lowerCAmelCase__) -> str: if text is None and images is None: raise ValueError('You have to specify either text or images. Both cannot be none.') if text is not None: snake_case_ = self.tokenizer(lowerCAmelCase__, return_tensors=lowerCAmelCase__, **lowerCAmelCase__) if images is not None: snake_case_ = self.image_processor(lowerCAmelCase__, return_tensors=lowerCAmelCase__, **lowerCAmelCase__) if text is not None and images is not None: snake_case_ = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**lowerCAmelCase__), tensor_type=lowerCAmelCase__) def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> Any: return self.tokenizer.batch_decode(*lowerCAmelCase__, **lowerCAmelCase__) def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> str: return self.tokenizer.decode(*lowerCAmelCase__, **lowerCAmelCase__) @property def a_ ( self) -> Dict: snake_case_ = self.tokenizer.model_input_names snake_case_ = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) @property def a_ ( self) -> List[Any]: warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.', lowerCAmelCase__, ) return self.image_processor_class @property def a_ ( self) -> str: warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.', lowerCAmelCase__, ) return self.image_processor
69
'''simple docstring''' from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal A =logging.get_logger(__name__) A =TypeVar('DatasetType', Dataset, IterableDataset) def snake_case_ (_a : List[DatasetType] , _a : Optional[List[float]] = None , _a : Optional[int] = None , _a : Optional[DatasetInfo] = None , _a : Optional[NamedSplit] = None , _a : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ): from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError('''Unable to interleave an empty list of datasets.''' ) for i, dataset in enumerate(_a ): if not isinstance(_a , (Dataset, IterableDataset) ): if isinstance(_a , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} " '''is an empty dataset dictionary.''' ) raise ValueError( F"Dataset at position {i} has at least one split: {list(_a )}\n" F"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_a ) )}']" ) raise ValueError( F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_a ).__name__}." ) if i == 0: UpperCAmelCase , UpperCAmelCase = ( (Dataset, IterableDataset) if isinstance(_a , _a ) else (IterableDataset, Dataset) ) elif not isinstance(_a , _a ): raise ValueError( F"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(F"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." ) if dataset_type is Dataset: return _interleave_map_style_datasets( _a , _a , _a , info=_a , split=_a , stopping_strategy=_a ) else: return _interleave_iterable_datasets( _a , _a , _a , info=_a , split=_a , stopping_strategy=_a ) def snake_case_ (_a : List[DatasetType] , _a : Optional[DatasetInfo] = None , _a : Optional[NamedSplit] = None , _a : int = 0 , ): if not dsets: raise ValueError('''Unable to concatenate an empty list of datasets.''' ) for i, dataset in enumerate(_a ): if not isinstance(_a , (Dataset, IterableDataset) ): if isinstance(_a , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} " '''is an empty dataset dictionary.''' ) raise ValueError( F"Dataset at position {i} has at least one split: {list(_a )}\n" F"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_a ) )}']" ) raise ValueError( F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_a ).__name__}." ) if i == 0: UpperCAmelCase , UpperCAmelCase = ( (Dataset, IterableDataset) if isinstance(_a , _a ) else (IterableDataset, Dataset) ) elif not isinstance(_a , _a ): raise ValueError( F"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." ) if dataset_type is Dataset: return _concatenate_map_style_datasets(_a , info=_a , split=_a , axis=_a ) else: return _concatenate_iterable_datasets(_a , info=_a , split=_a , axis=_a )
34
0
from collections import defaultdict from math import ceil, sqrt def _UpperCAmelCase ( snake_case = 1_00_00_00 , snake_case = 10 ): """simple docstring""" _lowerCAmelCase = defaultdict(_a ) for outer_width in range(3 , (t_limit // 4) + 2 ): if outer_width * outer_width > t_limit: _lowerCAmelCase = max( ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 ) else: _lowerCAmelCase = 1 hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2 for hole_width in range(_a , outer_width - 1 , 2 ): count[outer_width * outer_width - hole_width * hole_width] += 1 return sum(1 for n in count.values() if 1 <= n <= 10 ) if __name__ == "__main__": print(f"{solution() = }")
82
'''simple docstring''' from argparse import ArgumentParser from datasets.commands.convert import ConvertCommand from datasets.commands.dummy_data import DummyDataCommand from datasets.commands.env import EnvironmentCommand from datasets.commands.run_beam import RunBeamCommand from datasets.commands.test import TestCommand from datasets.utils.logging import set_verbosity_info def snake_case_ (_a : Tuple ): return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )} def snake_case_ (): UpperCAmelCase = ArgumentParser( '''HuggingFace Datasets CLI tool''' , usage='''datasets-cli <command> [<args>]''' , allow_abbrev=_a ) UpperCAmelCase = parser.add_subparsers(help='''datasets-cli command helpers''' ) set_verbosity_info() # Register commands ConvertCommand.register_subcommand(_a ) EnvironmentCommand.register_subcommand(_a ) TestCommand.register_subcommand(_a ) RunBeamCommand.register_subcommand(_a ) DummyDataCommand.register_subcommand(_a ) # Parse args UpperCAmelCase , UpperCAmelCase = parser.parse_known_args() if not hasattr(_a , '''func''' ): parser.print_help() exit(1 ) UpperCAmelCase = parse_unknown_args(_a ) # Run UpperCAmelCase = args.func(_a , **_a ) service.run() if __name__ == "__main__": main()
34
0
"""simple docstring""" import argparse import collections import json import os import re import string import sys import numpy as np lowerCamelCase__ = re.compile(R"""\b(a|an|the)\b""", re.UNICODE) lowerCamelCase__ = None def __lowerCAmelCase (): __lowerCAmelCase : Dict = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.' ) parser.add_argument('data_file' , metavar='data.json' , help='Input data JSON file.' ) parser.add_argument('pred_file' , metavar='pred.json' , help='Model predictions.' ) parser.add_argument( '--out-file' , '-o' , metavar='eval.json' , help='Write accuracy metrics to file (default is stdout).' ) parser.add_argument( '--na-prob-file' , '-n' , metavar='na_prob.json' , help='Model estimates of probability of no answer.' ) parser.add_argument( '--na-prob-thresh' , '-t' , type=_a , default=1.0 , help='Predict "" if no-answer probability exceeds this (default = 1.0).' , ) parser.add_argument( '--out-image-dir' , '-p' , metavar='out_images' , default=_a , help='Save precision-recall curves to directory.' ) parser.add_argument('--verbose' , '-v' , action='store_true' ) if len(sys.argv ) == 1: parser.print_help() sys.exit(1 ) return parser.parse_args() def __lowerCAmelCase (_UpperCamelCase ): __lowerCAmelCase : Union[str, Any] = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: __lowerCAmelCase : Any = bool(qa['answers']['text'] ) return qid_to_has_ans def __lowerCAmelCase (_UpperCamelCase ): def remove_articles(_UpperCamelCase ): return ARTICLES_REGEX.sub(' ' , _a ) def white_space_fix(_UpperCamelCase ): return " ".join(text.split() ) def remove_punc(_UpperCamelCase ): __lowerCAmelCase : List[Any] = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(_UpperCamelCase ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(_a ) ) ) ) def __lowerCAmelCase (_UpperCamelCase ): if not s: return [] return normalize_answer(_a ).split() def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ): return int(normalize_answer(_a ) == normalize_answer(_a ) ) def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ): __lowerCAmelCase : List[Any] = get_tokens(_a ) __lowerCAmelCase : Any = get_tokens(_a ) __lowerCAmelCase : Union[str, Any] = collections.Counter(_a ) & collections.Counter(_a ) __lowerCAmelCase : Union[str, Any] = sum(common.values() ) if len(_a ) == 0 or len(_a ) == 0: # If either is no-answer, then F1 is 1 if they agree, 0 otherwise return int(gold_toks == pred_toks ) if num_same == 0: return 0 __lowerCAmelCase : Optional[int] = 1.0 * num_same / len(_a ) __lowerCAmelCase : str = 1.0 * num_same / len(_a ) __lowerCAmelCase : str = (2 * precision * recall) / (precision + recall) return fa def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ): __lowerCAmelCase : int = {} __lowerCAmelCase : Optional[int] = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: __lowerCAmelCase : str = qa['id'] __lowerCAmelCase : str = [t for t in qa['answers']['text'] if normalize_answer(_a )] if not gold_answers: # For unanswerable questions, only correct answer is empty string __lowerCAmelCase : Optional[int] = [''] if qid not in preds: print(F"Missing prediction for {qid}" ) continue __lowerCAmelCase : int = preds[qid] # Take max over all gold answers __lowerCAmelCase : Union[str, Any] = max(compute_exact(_a , _a ) for a in gold_answers ) __lowerCAmelCase : Optional[Any] = max(compute_fa(_a , _a ) for a in gold_answers ) return exact_scores, fa_scores def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): __lowerCAmelCase : Dict = {} for qid, s in scores.items(): __lowerCAmelCase : Dict = na_probs[qid] > na_prob_thresh if pred_na: __lowerCAmelCase : List[str] = float(not qid_to_has_ans[qid] ) else: __lowerCAmelCase : int = s return new_scores def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ): if not qid_list: __lowerCAmelCase : Union[str, Any] = len(_a ) return collections.OrderedDict( [ ('exact', 100.0 * sum(exact_scores.values() ) / total), ('f1', 100.0 * sum(fa_scores.values() ) / total), ('total', total), ] ) else: __lowerCAmelCase : str = len(_a ) return collections.OrderedDict( [ ('exact', 100.0 * sum(exact_scores[k] for k in qid_list ) / total), ('f1', 100.0 * sum(fa_scores[k] for k in qid_list ) / total), ('total', total), ] ) def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): for k in new_eval: __lowerCAmelCase : str = new_eval[k] def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): plt.step(_a , _a , color='b' , alpha=0.2 , where='post' ) plt.fill_between(_a , _a , step='post' , alpha=0.2 , color='b' ) plt.xlabel('Recall' ) plt.ylabel('Precision' ) plt.xlim([0.0, 1.05] ) plt.ylim([0.0, 1.05] ) plt.title(_a ) plt.savefig(_a ) plt.clf() def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None ): __lowerCAmelCase : str = sorted(_a , key=lambda _UpperCamelCase : na_probs[k] ) __lowerCAmelCase : Optional[Any] = 0.0 __lowerCAmelCase : Tuple = 1.0 __lowerCAmelCase : Any = 0.0 __lowerCAmelCase : Union[str, Any] = [1.0] __lowerCAmelCase : Any = [0.0] __lowerCAmelCase : Tuple = 0.0 for i, qid in enumerate(_a ): if qid_to_has_ans[qid]: true_pos += scores[qid] __lowerCAmelCase : Optional[Any] = true_pos / float(i + 1 ) __lowerCAmelCase : Optional[int] = true_pos / float(_a ) if i == len(_a ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]: # i.e., if we can put a threshold after this point avg_prec += cur_p * (cur_r - recalls[-1]) precisions.append(_a ) recalls.append(_a ) if out_image: plot_pr_curve(_a , _a , _a , _a ) return {"ap": 100.0 * avg_prec} def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): if out_image_dir and not os.path.exists(_a ): os.makedirs(_a ) __lowerCAmelCase : Optional[Any] = sum(1 for v in qid_to_has_ans.values() if v ) if num_true_pos == 0: return __lowerCAmelCase : int = make_precision_recall_eval( _a , _a , _a , _a , out_image=os.path.join(_a , 'pr_exact.png' ) , title='Precision-Recall curve for Exact Match score' , ) __lowerCAmelCase : List[Any] = make_precision_recall_eval( _a , _a , _a , _a , out_image=os.path.join(_a , 'pr_f1.png' ) , title='Precision-Recall curve for F1 score' , ) __lowerCAmelCase : Union[str, Any] = {k: float(_a ) for k, v in qid_to_has_ans.items()} __lowerCAmelCase : Optional[int] = make_precision_recall_eval( _a , _a , _a , _a , out_image=os.path.join(_a , 'pr_oracle.png' ) , title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)' , ) merge_eval(_a , _a , 'pr_exact' ) merge_eval(_a , _a , 'pr_f1' ) merge_eval(_a , _a , 'pr_oracle' ) def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): if not qid_list: return __lowerCAmelCase : Tuple = [na_probs[k] for k in qid_list] __lowerCAmelCase : Optional[int] = np.ones_like(_a ) / float(len(_a ) ) plt.hist(_a , weights=_a , bins=20 , range=(0.0, 1.0) ) plt.xlabel('Model probability of no-answer' ) plt.ylabel('Proportion of dataset' ) plt.title(F"Histogram of no-answer probability: {name}" ) plt.savefig(os.path.join(_a , F"na_prob_hist_{name}.png" ) ) plt.clf() def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): __lowerCAmelCase : List[Any] = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] ) __lowerCAmelCase : List[Any] = num_no_ans __lowerCAmelCase : Any = cur_score __lowerCAmelCase : Any = 0.0 __lowerCAmelCase : Dict = sorted(_a , key=lambda _UpperCamelCase : na_probs[k] ) for i, qid in enumerate(_a ): if qid not in scores: continue if qid_to_has_ans[qid]: __lowerCAmelCase : Union[str, Any] = scores[qid] else: if preds[qid]: __lowerCAmelCase : int = -1 else: __lowerCAmelCase : Optional[int] = 0 cur_score += diff if cur_score > best_score: __lowerCAmelCase : List[Any] = cur_score __lowerCAmelCase : Any = na_probs[qid] return 100.0 * best_score / len(_a ), best_thresh def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): __lowerCAmelCase , __lowerCAmelCase : List[Any] = find_best_thresh(_a , _a , _a , _a ) __lowerCAmelCase , __lowerCAmelCase : Optional[int] = find_best_thresh(_a , _a , _a , _a ) __lowerCAmelCase : List[Any] = best_exact __lowerCAmelCase : str = exact_thresh __lowerCAmelCase : Any = best_fa __lowerCAmelCase : Union[str, Any] = fa_thresh def __lowerCAmelCase (): with open(OPTS.data_file ) as f: __lowerCAmelCase : Any = json.load(_a ) __lowerCAmelCase : str = dataset_json['data'] with open(OPTS.pred_file ) as f: __lowerCAmelCase : Union[str, Any] = json.load(_a ) if OPTS.na_prob_file: with open(OPTS.na_prob_file ) as f: __lowerCAmelCase : Union[str, Any] = json.load(_a ) else: __lowerCAmelCase : Tuple = {k: 0.0 for k in preds} __lowerCAmelCase : Optional[Any] = make_qid_to_has_ans(_a ) # maps qid to True/False __lowerCAmelCase : Any = [k for k, v in qid_to_has_ans.items() if v] __lowerCAmelCase : int = [k for k, v in qid_to_has_ans.items() if not v] __lowerCAmelCase , __lowerCAmelCase : Optional[Any] = get_raw_scores(_a , _a ) __lowerCAmelCase : Union[str, Any] = apply_no_ans_threshold(_a , _a , _a , OPTS.na_prob_thresh ) __lowerCAmelCase : str = apply_no_ans_threshold(_a , _a , _a , OPTS.na_prob_thresh ) __lowerCAmelCase : Tuple = make_eval_dict(_a , _a ) if has_ans_qids: __lowerCAmelCase : Dict = make_eval_dict(_a , _a , qid_list=_a ) merge_eval(_a , _a , 'HasAns' ) if no_ans_qids: __lowerCAmelCase : List[str] = make_eval_dict(_a , _a , qid_list=_a ) merge_eval(_a , _a , 'NoAns' ) if OPTS.na_prob_file: find_all_best_thresh(_a , _a , _a , _a , _a , _a ) if OPTS.na_prob_file and OPTS.out_image_dir: run_precision_recall_analysis(_a , _a , _a , _a , _a , OPTS.out_image_dir ) histogram_na_prob(_a , _a , OPTS.out_image_dir , 'hasAns' ) histogram_na_prob(_a , _a , OPTS.out_image_dir , 'noAns' ) if OPTS.out_file: with open(OPTS.out_file , 'w' ) as f: json.dump(_a , _a ) else: print(json.dumps(_a , indent=2 ) ) if __name__ == "__main__": lowerCamelCase__ = parse_args() if OPTS.out_image_dir: import matplotlib matplotlib.use("""Agg""") import matplotlib.pyplot as plt main()
86
'''simple docstring''' import argparse import json import logging import os import sys from unittest.mock import patch from transformers.testing_utils import TestCasePlus, get_gpu_count, slow A =[ os.path.join(os.path.dirname(__file__), dirname) for dirname in [ 'text-classification', 'language-modeling', 'summarization', 'token-classification', 'question-answering', ] ] sys.path.extend(SRC_DIRS) if SRC_DIRS is not None: import run_clm_flax import run_flax_glue import run_flax_ner import run_mlm_flax import run_qa import run_summarization_flax import run_ta_mlm_flax logging.basicConfig(level=logging.DEBUG) A =logging.getLogger() def snake_case_ (): UpperCAmelCase = argparse.ArgumentParser() parser.add_argument('''-f''' ) UpperCAmelCase = parser.parse_args() return args.f def snake_case_ (_a : List[str] , _a : Union[str, Any]="eval" ): UpperCAmelCase = os.path.join(_a , F"{split}_results.json" ) if os.path.exists(_a ): with open(_a , '''r''' ) as f: return json.load(_a ) raise ValueError(F"can't find {path}" ) A =logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class _a ( __a ): def A ( self : Any ): '''simple docstring''' UpperCAmelCase = self.get_auto_remove_tmp_dir() UpperCAmelCase = f"\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split() with patch.object(lowercase , '''argv''' , lowercase ): run_flax_glue.main() UpperCAmelCase = get_results(lowercase ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) @slow def A ( self : Any ): '''simple docstring''' UpperCAmelCase = self.get_auto_remove_tmp_dir() UpperCAmelCase = f"\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split() with patch.object(lowercase , '''argv''' , lowercase ): run_clm_flax.main() UpperCAmelCase = get_results(lowercase ) self.assertLess(result['''eval_perplexity'''] , 100 ) @slow def A ( self : str ): '''simple docstring''' UpperCAmelCase = self.get_auto_remove_tmp_dir() UpperCAmelCase = f"\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n ".split() with patch.object(lowercase , '''argv''' , lowercase ): run_summarization_flax.main() UpperCAmelCase = get_results(lowercase , split='''test''' ) self.assertGreaterEqual(result['''test_rouge1'''] , 10 ) self.assertGreaterEqual(result['''test_rouge2'''] , 2 ) self.assertGreaterEqual(result['''test_rougeL'''] , 7 ) self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 ) @slow def A ( self : int ): '''simple docstring''' UpperCAmelCase = self.get_auto_remove_tmp_dir() UpperCAmelCase = f"\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n ".split() with patch.object(lowercase , '''argv''' , lowercase ): run_mlm_flax.main() UpperCAmelCase = get_results(lowercase ) self.assertLess(result['''eval_perplexity'''] , 42 ) @slow def A ( self : Tuple ): '''simple docstring''' UpperCAmelCase = self.get_auto_remove_tmp_dir() UpperCAmelCase = f"\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split() with patch.object(lowercase , '''argv''' , lowercase ): run_ta_mlm_flax.main() UpperCAmelCase = get_results(lowercase ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 ) @slow def A ( self : Tuple ): '''simple docstring''' UpperCAmelCase = 7 if get_gpu_count() > 1 else 2 UpperCAmelCase = self.get_auto_remove_tmp_dir() UpperCAmelCase = f"\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n ".split() with patch.object(lowercase , '''argv''' , lowercase ): run_flax_ner.main() UpperCAmelCase = get_results(lowercase ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) self.assertGreaterEqual(result['''eval_f1'''] , 0.3 ) @slow def A ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase = self.get_auto_remove_tmp_dir() UpperCAmelCase = f"\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n ".split() with patch.object(lowercase , '''argv''' , lowercase ): run_qa.main() UpperCAmelCase = get_results(lowercase ) self.assertGreaterEqual(result['''eval_f1'''] , 30 ) self.assertGreaterEqual(result['''eval_exact'''] , 30 )
34
0