code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' import shutil import tempfile import unittest from unittest.mock import patch from transformers import ( DefaultFlowCallback, IntervalStrategy, PrinterCallback, ProgressCallback, Trainer, TrainerCallback, TrainingArguments, is_torch_available, ) from transformers.testing_utils import require_torch if is_torch_available(): from transformers.trainer import DEFAULT_CALLBACKS from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' def __init__( self ) -> Union[str, Any]: lowerCAmelCase__ : Union[str, Any] = [] def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[int]: self.events.append("""on_init_end""" ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,**__UpperCAmelCase ) -> int: self.events.append("""on_train_begin""" ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[Any]: self.events.append("""on_train_end""" ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,**__UpperCAmelCase ) -> Dict: self.events.append("""on_epoch_begin""" ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,**__UpperCAmelCase ) -> Dict: self.events.append("""on_epoch_end""" ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[int]: self.events.append("""on_step_begin""" ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,**__UpperCAmelCase ) -> Union[str, Any]: self.events.append("""on_step_end""" ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[int]: self.events.append("""on_evaluate""" ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,**__UpperCAmelCase ) -> int: self.events.append("""on_predict""" ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,**__UpperCAmelCase ) -> Any: self.events.append("""on_save""" ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,**__UpperCAmelCase ) -> Any: self.events.append("""on_log""" ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[int]: self.events.append("""on_prediction_step""" ) @require_torch class lowerCAmelCase_( unittest.TestCase ): '''simple docstring''' def UpperCAmelCase_ ( self ) -> Dict: lowerCAmelCase__ : int = tempfile.mkdtemp() def UpperCAmelCase_ ( self ) -> Tuple: shutil.rmtree(self.output_dir ) def UpperCAmelCase_ ( self ,__UpperCAmelCase=0 ,__UpperCAmelCase=0 ,__UpperCAmelCase=64 ,__UpperCAmelCase=64 ,__UpperCAmelCase=None ,__UpperCAmelCase=False ,**__UpperCAmelCase ) -> Optional[int]: # disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure # its set to False since the tests later on depend on its value. lowerCAmelCase__ : Tuple = RegressionDataset(length=__UpperCAmelCase ) lowerCAmelCase__ : Tuple = RegressionDataset(length=__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = RegressionModelConfig(a=__UpperCAmelCase ,b=__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = RegressionPreTrainedModel(__UpperCAmelCase ) lowerCAmelCase__ : int = TrainingArguments(self.output_dir ,disable_tqdm=__UpperCAmelCase ,report_to=[] ,**__UpperCAmelCase ) return Trainer( __UpperCAmelCase ,__UpperCAmelCase ,train_dataset=__UpperCAmelCase ,eval_dataset=__UpperCAmelCase ,callbacks=__UpperCAmelCase ,) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Dict: self.assertEqual(len(__UpperCAmelCase ) ,len(__UpperCAmelCase ) ) # Order doesn't matter lowerCAmelCase__ : Optional[int] = sorted(__UpperCAmelCase ,key=lambda __UpperCAmelCase : cb.__name__ if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else cb.__class__.__name__ ) lowerCAmelCase__ : Optional[int] = sorted(__UpperCAmelCase ,key=lambda __UpperCAmelCase : cb.__name__ if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else cb.__class__.__name__ ) for cba, cba in zip(__UpperCAmelCase ,__UpperCAmelCase ): if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) and isinstance(__UpperCAmelCase ,__UpperCAmelCase ): self.assertEqual(__UpperCAmelCase ,__UpperCAmelCase ) elif isinstance(__UpperCAmelCase ,__UpperCAmelCase ) and not isinstance(__UpperCAmelCase ,__UpperCAmelCase ): self.assertEqual(__UpperCAmelCase ,cba.__class__ ) elif not isinstance(__UpperCAmelCase ,__UpperCAmelCase ) and isinstance(__UpperCAmelCase ,__UpperCAmelCase ): self.assertEqual(cba.__class__ ,__UpperCAmelCase ) else: self.assertEqual(__UpperCAmelCase ,__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Union[str, Any]: lowerCAmelCase__ : Any = ["""on_init_end""", """on_train_begin"""] lowerCAmelCase__ : Union[str, Any] = 0 lowerCAmelCase__ : Tuple = len(trainer.get_eval_dataloader() ) lowerCAmelCase__ : Optional[int] = ["""on_prediction_step"""] * len(trainer.get_eval_dataloader() ) + ["""on_log""", """on_evaluate"""] for _ in range(trainer.state.num_train_epochs ): expected_events.append("""on_epoch_begin""" ) for _ in range(__UpperCAmelCase ): step += 1 expected_events += ["on_step_begin", "on_step_end"] if step % trainer.args.logging_steps == 0: expected_events.append("""on_log""" ) if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0: expected_events += evaluation_events.copy() if step % trainer.args.save_steps == 0: expected_events.append("""on_save""" ) expected_events.append("""on_epoch_end""" ) if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH: expected_events += evaluation_events.copy() expected_events += ["on_log", "on_train_end"] return expected_events def UpperCAmelCase_ ( self ) -> Dict: lowerCAmelCase__ : Any = self.get_trainer() lowerCAmelCase__ : Tuple = DEFAULT_CALLBACKS.copy() + [ProgressCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks ,__UpperCAmelCase ) # Callbacks passed at init are added to the default callbacks lowerCAmelCase__ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] ) expected_callbacks.append(__UpperCAmelCase ) self.check_callbacks_equality(trainer.callback_handler.callbacks ,__UpperCAmelCase ) # TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback lowerCAmelCase__ : Tuple = self.get_trainer(disable_tqdm=__UpperCAmelCase ) lowerCAmelCase__ : int = DEFAULT_CALLBACKS.copy() + [PrinterCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks ,__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> int: lowerCAmelCase__ : Optional[int] = DEFAULT_CALLBACKS.copy() + [ProgressCallback] lowerCAmelCase__ : List[Any] = self.get_trainer() # We can add, pop, or remove by class name trainer.remove_callback(__UpperCAmelCase ) expected_callbacks.remove(__UpperCAmelCase ) self.check_callbacks_equality(trainer.callback_handler.callbacks ,__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = self.get_trainer() lowerCAmelCase__ : Optional[int] = trainer.pop_callback(__UpperCAmelCase ) self.assertEqual(cb.__class__ ,__UpperCAmelCase ) self.check_callbacks_equality(trainer.callback_handler.callbacks ,__UpperCAmelCase ) trainer.add_callback(__UpperCAmelCase ) expected_callbacks.insert(0 ,__UpperCAmelCase ) self.check_callbacks_equality(trainer.callback_handler.callbacks ,__UpperCAmelCase ) # We can also add, pop, or remove by instance lowerCAmelCase__ : Optional[int] = self.get_trainer() lowerCAmelCase__ : Any = trainer.callback_handler.callbacks[0] trainer.remove_callback(__UpperCAmelCase ) expected_callbacks.remove(__UpperCAmelCase ) self.check_callbacks_equality(trainer.callback_handler.callbacks ,__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = self.get_trainer() lowerCAmelCase__ : Dict = trainer.callback_handler.callbacks[0] lowerCAmelCase__ : List[Any] = trainer.pop_callback(__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase ,__UpperCAmelCase ) self.check_callbacks_equality(trainer.callback_handler.callbacks ,__UpperCAmelCase ) trainer.add_callback(__UpperCAmelCase ) expected_callbacks.insert(0 ,__UpperCAmelCase ) self.check_callbacks_equality(trainer.callback_handler.callbacks ,__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> Optional[Any]: import warnings # XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested warnings.simplefilter(action="""ignore""" ,category=__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] ) trainer.train() lowerCAmelCase__ : str = trainer.callback_handler.callbacks[-2].events self.assertEqual(__UpperCAmelCase ,self.get_expected_events(__UpperCAmelCase ) ) # Independent log/save/eval lowerCAmelCase__ : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] ,logging_steps=5 ) trainer.train() lowerCAmelCase__ : List[str] = trainer.callback_handler.callbacks[-2].events self.assertEqual(__UpperCAmelCase ,self.get_expected_events(__UpperCAmelCase ) ) lowerCAmelCase__ : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] ,save_steps=5 ) trainer.train() lowerCAmelCase__ : str = trainer.callback_handler.callbacks[-2].events self.assertEqual(__UpperCAmelCase ,self.get_expected_events(__UpperCAmelCase ) ) lowerCAmelCase__ : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] ,eval_steps=5 ,evaluation_strategy="""steps""" ) trainer.train() lowerCAmelCase__ : Tuple = trainer.callback_handler.callbacks[-2].events self.assertEqual(__UpperCAmelCase ,self.get_expected_events(__UpperCAmelCase ) ) lowerCAmelCase__ : int = self.get_trainer(callbacks=[MyTestTrainerCallback] ,evaluation_strategy="""epoch""" ) trainer.train() lowerCAmelCase__ : Any = trainer.callback_handler.callbacks[-2].events self.assertEqual(__UpperCAmelCase ,self.get_expected_events(__UpperCAmelCase ) ) # A bit of everything lowerCAmelCase__ : List[str] = self.get_trainer( callbacks=[MyTestTrainerCallback] ,logging_steps=3 ,save_steps=10 ,eval_steps=5 ,evaluation_strategy="""steps""" ,) trainer.train() lowerCAmelCase__ : Optional[Any] = trainer.callback_handler.callbacks[-2].events self.assertEqual(__UpperCAmelCase ,self.get_expected_events(__UpperCAmelCase ) ) # warning should be emitted for duplicated callbacks with patch("""transformers.trainer_callback.logger.warning""" ) as warn_mock: lowerCAmelCase__ : Union[str, Any] = self.get_trainer( callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] ,) assert str(__UpperCAmelCase ) in warn_mock.call_args[0][0]
37
'''simple docstring''' import unittest from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryDirectory from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline from transformers.convert_graph_to_onnx import ( convert, ensure_valid_input, generate_identified_filename, infer_shapes, quantize, ) from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow class lowerCAmelCase_: '''simple docstring''' def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[Any]: return None class lowerCAmelCase_: '''simple docstring''' def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Tuple: return None class lowerCAmelCase_( unittest.TestCase ): '''simple docstring''' __lowercase : Dict = [ # (model_name, model_kwargs) ('''bert-base-cased''', {}), ('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore ] @require_tf @slow def UpperCAmelCase_ ( self ) -> int: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__UpperCAmelCase ,"""tf""" ,12 ,**__UpperCAmelCase ) @require_torch @slow def UpperCAmelCase_ ( self ) -> Union[str, Any]: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__UpperCAmelCase ,"""pt""" ,12 ,**__UpperCAmelCase ) @require_torch @slow def UpperCAmelCase_ ( self ) -> Any: from transformers import BertModel lowerCAmelCase__ : Optional[int] = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""] with NamedTemporaryFile(mode="""w+t""" ) as vocab_file: vocab_file.write("""\n""".join(__UpperCAmelCase ) ) vocab_file.flush() lowerCAmelCase__ : Dict = BertTokenizerFast(vocab_file.name ) with TemporaryDirectory() as bert_save_dir: lowerCAmelCase__ : Tuple = BertModel(BertConfig(vocab_size=len(__UpperCAmelCase ) ) ) model.save_pretrained(__UpperCAmelCase ) self._test_export(__UpperCAmelCase ,"""pt""" ,12 ,__UpperCAmelCase ) @require_tf @slow def UpperCAmelCase_ ( self ) -> List[str]: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: lowerCAmelCase__ : Dict = self._test_export(__UpperCAmelCase ,"""tf""" ,12 ,**__UpperCAmelCase ) lowerCAmelCase__ : List[str] = quantize(Path(__UpperCAmelCase ) ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__UpperCAmelCase ).stat().st_size: self.fail("""Quantized model is bigger than initial ONNX model""" ) @require_torch @slow def UpperCAmelCase_ ( self ) -> List[Any]: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: lowerCAmelCase__ : Any = self._test_export(__UpperCAmelCase ,"""pt""" ,12 ,**__UpperCAmelCase ) lowerCAmelCase__ : Dict = quantize(__UpperCAmelCase ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__UpperCAmelCase ).stat().st_size: self.fail("""Quantized model is bigger than initial ONNX model""" ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=None ,**__UpperCAmelCase ) -> Optional[Any]: try: # Compute path with TemporaryDirectory() as tempdir: lowerCAmelCase__ : Optional[int] = Path(__UpperCAmelCase ).joinpath("""model.onnx""" ) # Remove folder if exists if path.parent.exists(): path.parent.rmdir() # Export convert(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,**__UpperCAmelCase ) return path except Exception as e: self.fail(__UpperCAmelCase ) @require_torch @require_tokenizers @slow def UpperCAmelCase_ ( self ) -> Union[str, Any]: from transformers import BertModel lowerCAmelCase__ : List[Any] = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) ) lowerCAmelCase__ : Union[str, Any] = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" ) self._test_infer_dynamic_axis(__UpperCAmelCase ,__UpperCAmelCase ,"""pt""" ) @require_tf @require_tokenizers @slow def UpperCAmelCase_ ( self ) -> Optional[int]: from transformers import TFBertModel lowerCAmelCase__ : int = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) ) lowerCAmelCase__ : Optional[int] = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" ) self._test_infer_dynamic_axis(__UpperCAmelCase ,__UpperCAmelCase ,"""tf""" ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Tuple: lowerCAmelCase__ : Any = FeatureExtractionPipeline(__UpperCAmelCase ,__UpperCAmelCase ) lowerCAmelCase__ : List[str] = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""] lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = infer_shapes(__UpperCAmelCase ,__UpperCAmelCase ) # Assert all variables are present self.assertEqual(len(__UpperCAmelCase ) ,len(__UpperCAmelCase ) ) self.assertTrue(all(var_name in shapes for var_name in variable_names ) ) self.assertSequenceEqual(variable_names[:3] ,__UpperCAmelCase ) self.assertSequenceEqual(variable_names[3:] ,__UpperCAmelCase ) # Assert inputs are {0: batch, 1: sequence} for var_name in ["input_ids", "token_type_ids", "attention_mask"]: self.assertDictEqual(shapes[var_name] ,{0: """batch""", 1: """sequence"""} ) # Assert outputs are {0: batch, 1: sequence} and {0: batch} self.assertDictEqual(shapes["""output_0"""] ,{0: """batch""", 1: """sequence"""} ) self.assertDictEqual(shapes["""output_1"""] ,{0: """batch"""} ) def UpperCAmelCase_ ( self ) -> Optional[int]: lowerCAmelCase__ : List[str] = ["""input_ids""", """attention_mask""", """token_type_ids"""] lowerCAmelCase__ : Union[str, Any] = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]} lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = ensure_valid_input(FuncContiguousArgs() ,__UpperCAmelCase ,__UpperCAmelCase ) # Should have exactly the same number of args (all are valid) self.assertEqual(len(__UpperCAmelCase ) ,3 ) # Should have exactly the same input names self.assertEqual(set(__UpperCAmelCase ) ,set(__UpperCAmelCase ) ) # Parameter should be reordered according to their respective place in the function: # (input_ids, token_type_ids, attention_mask) self.assertEqual(__UpperCAmelCase ,(tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) ) # Generated args are interleaved with another args (for instance parameter "past" in GPT2) lowerCAmelCase__ , lowerCAmelCase__ : int = ensure_valid_input(FuncNonContiguousArgs() ,__UpperCAmelCase ,__UpperCAmelCase ) # Should have exactly the one arg (all before the one not provided "some_other_args") self.assertEqual(len(__UpperCAmelCase ) ,1 ) self.assertEqual(len(__UpperCAmelCase ) ,1 ) # Should have only "input_ids" self.assertEqual(inputs_args[0] ,tokens["""input_ids"""] ) self.assertEqual(ordered_input_names[0] ,"""input_ids""" ) def UpperCAmelCase_ ( self ) -> Tuple: lowerCAmelCase__ : Dict = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) ,"""-test""" ) self.assertEqual("""/home/something/my_fake_model-test.onnx""" ,generated.as_posix() )
37
1
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Tuple = abs(UpperCamelCase ) lowerCAmelCase__ : List[Any] = 0 while n > 0: res += n % 10 n //= 10 return res def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Union[str, Any] = abs(UpperCamelCase ) return n if n < 10 else n % 10 + sum_of_digits(n // 10 ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" return sum(int(UpperCamelCase ) for c in str(abs(UpperCamelCase ) ) ) def _SCREAMING_SNAKE_CASE ( ): """simple docstring""" from collections.abc import Callable from timeit import timeit def benchmark_a_function(UpperCamelCase , UpperCamelCase ) -> None: lowerCAmelCase__ : str = f"""{func.__name__}({value})""" lowerCAmelCase__ : str = timeit(f"""__main__.{call}""" , setup="""import __main__""" ) print(f"""{call:56} = {func(UpperCamelCase )} -- {timing:.4f} seconds""" ) for value in (262144, 1125899906842624, 1267650600228229401496703205376): for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact): benchmark_a_function(UpperCamelCase , UpperCamelCase ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
37
'''simple docstring''' from maths.prime_factors import prime_factors def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" if not isinstance(UpperCamelCase , UpperCamelCase ): lowerCAmelCase__ : int = f"""Input value of [number={number}] must be an integer""" raise TypeError(UpperCamelCase ) if number < 1: raise ValueError("""Input must be a positive integer""" ) return -1 if len(prime_factors(UpperCamelCase ) ) % 2 else 1 if __name__ == "__main__": import doctest doctest.testmod()
37
1
'''simple docstring''' from __future__ import annotations def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" if (voltage, current, resistance).count(0 ) != 1: raise ValueError("""One and only one argument must be 0""" ) if resistance < 0: raise ValueError("""Resistance cannot be negative""" ) if voltage == 0: return {"voltage": float(current * resistance )} elif current == 0: return {"current": voltage / resistance} elif resistance == 0: return {"resistance": voltage / current} else: raise ValueError("""Exactly one argument must be 0""" ) if __name__ == "__main__": import doctest doctest.testmod()
37
'''simple docstring''' import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = {'''vocab_file''': '''spiece.model'''} _lowerCAmelCase = { '''vocab_file''': { '''AI-Sweden/gpt-sw3-126m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model''', '''AI-Sweden/gpt-sw3-350m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model''', '''AI-Sweden/gpt-sw3-1.6b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model''', '''AI-Sweden/gpt-sw3-6.7b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model''', '''AI-Sweden/gpt-sw3-20b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model''', } } _lowerCAmelCase = { '''AI-Sweden/gpt-sw3-126m''': 2048, '''AI-Sweden/gpt-sw3-350m''': 2048, '''AI-Sweden/gpt-sw3-1.6b''': 2048, '''AI-Sweden/gpt-sw3-6.7b''': 2048, '''AI-Sweden/gpt-sw3-20b''': 2048, } class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Dict = VOCAB_FILES_NAMES __lowercase : str = PRETRAINED_VOCAB_FILES_MAP __lowercase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowercase : Optional[int] = ['''input_ids''', '''attention_mask'''] def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> None: lowerCAmelCase__ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs lowerCAmelCase__ : Dict = kwargs.get("""name_or_path""" ) if name_or_path is None: logger.warning( """name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,""" """ you are testing the model, this can safely be ignored""" ) lowerCAmelCase__ : Tuple = """None""" # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing lowerCAmelCase__ : Union[str, Any] = """<|endoftext|>""" if eos_token is None else eos_token lowerCAmelCase__ : Dict = """<unk>""" if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: lowerCAmelCase__ : Any = unk_token if pad_token is None else pad_token lowerCAmelCase__ : Dict = eos_token if bos_token is None else bos_token else: lowerCAmelCase__ : List[str] = """<pad>""" if pad_token is None else pad_token lowerCAmelCase__ : Optional[int] = """<s>""" if bos_token is None else bos_token super().__init__( do_lower_case=__UpperCAmelCase ,remove_space=__UpperCAmelCase ,keep_accents=__UpperCAmelCase ,bos_token=__UpperCAmelCase ,eos_token=__UpperCAmelCase ,unk_token=__UpperCAmelCase ,pad_token=__UpperCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**__UpperCAmelCase ,) lowerCAmelCase__ : Optional[int] = do_lower_case lowerCAmelCase__ : Dict = remove_space lowerCAmelCase__ : Optional[Any] = keep_accents lowerCAmelCase__ : int = vocab_file lowerCAmelCase__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__UpperCAmelCase ) # Used for whitespace normalization in input texts # fmt : off lowerCAmelCase__ : int = {""" """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """""", """„"""} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing lowerCAmelCase__ : List[str] = re.compile( F"""[{''.join(map(__UpperCAmelCase ,list(range(0 ,9 ) ) + list(range(11 ,32 ) ) + list(range(127 ,160 ) ) + [160, 173, 8203] ) )}]""" ) def __getstate__( self ) -> Any: lowerCAmelCase__ : int = self.__dict__.copy() lowerCAmelCase__ : Optional[int] = None return state def __setstate__( self ,__UpperCAmelCase ) -> List[str]: lowerCAmelCase__ : List[str] = d # for backward compatibility if not hasattr(self ,"""sp_model_kwargs""" ): lowerCAmelCase__ : Tuple = {} lowerCAmelCase__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def UpperCAmelCase_ ( self ) -> int: return len(self.sp_model ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str: lowerCAmelCase__ : Tuple = self.non_printing_characters_re.sub("""""" ,__UpperCAmelCase ) # Normalize whitespaces lowerCAmelCase__ : List[Any] = """""".join([char if char not in self.whitespaces else """ """ for char in text] ) # NFC Unicode normalization lowerCAmelCase__ : List[Any] = unicodedata.normalize("""NFC""" ,__UpperCAmelCase ) return text def UpperCAmelCase_ ( self ,__UpperCAmelCase ,**__UpperCAmelCase ) -> List[str]: lowerCAmelCase__ : List[Any] = self.preprocess_text(__UpperCAmelCase ) return self.sp_model.encode(__UpperCAmelCase ,out_type=__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> int: return self.sp_model.PieceToId(__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str: return self.sp_model.IdToPiece(__UpperCAmelCase ) @staticmethod def UpperCAmelCase_ ( __UpperCAmelCase ) -> str: return out_string def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str: lowerCAmelCase__ : int = [] lowerCAmelCase__ : Optional[int] = """""" lowerCAmelCase__ : Tuple = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__UpperCAmelCase ) + token lowerCAmelCase__ : Union[str, Any] = True lowerCAmelCase__ : Optional[Any] = [] else: current_sub_tokens.append(__UpperCAmelCase ) lowerCAmelCase__ : Any = False out_string += self.sp_model.decode(__UpperCAmelCase ) return out_string def UpperCAmelCase_ ( self ) -> Dict[str, int]: lowerCAmelCase__ : Optional[int] = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(__UpperCAmelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase__ : Optional[int] = os.path.join( __UpperCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,__UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__UpperCAmelCase ,"""wb""" ) as fi: lowerCAmelCase__ : str = self.sp_model.serialized_model_proto() fi.write(__UpperCAmelCase ) return (out_vocab_file,) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]: if isinstance(__UpperCAmelCase ,__UpperCAmelCase ): lowerCAmelCase__ : Tuple = self.preprocess_text(__UpperCAmelCase ) lowerCAmelCase__ : int = self.sp_model.encode(__UpperCAmelCase ) else: lowerCAmelCase__ : int = [self.preprocess_text(__UpperCAmelCase ) for t in text] lowerCAmelCase__ : Any = self.sp_model.encode(__UpperCAmelCase ) if return_tensors is True or return_tensors == "pt": lowerCAmelCase__ : Tuple = torch.tensor(__UpperCAmelCase ) return token_ids def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str: return self.sp_model.decode(__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[int]: lowerCAmelCase__ : List[Any] = [F"""User: {text}""" if is_user else F"""Bot: {text}""" for is_user, text in conversation.iter_texts()] lowerCAmelCase__ : Any = ( F"""{self.eos_token}{self.bos_token}""" + F"""{self.bos_token}""".join(__UpperCAmelCase ) + F"""{self.bos_token}Bot:""" ) return self.encode(text=__UpperCAmelCase )
37
1
'''simple docstring''' import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''} # See all BART models at https://huggingface.co/models?filter=bart _lowerCAmelCase = { '''vocab_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''', }, '''merges_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''', }, } _lowerCAmelCase = { '''facebook/bart-base''': 1024, '''facebook/bart-large''': 1024, '''facebook/bart-large-mnli''': 1024, '''facebook/bart-large-cnn''': 1024, '''facebook/bart-large-xsum''': 1024, '''yjernite/bart_eli5''': 1024, } @lru_cache() def _SCREAMING_SNAKE_CASE ( ): """simple docstring""" lowerCAmelCase__ : Union[str, Any] = ( list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) ) ) lowerCAmelCase__ : Union[str, Any] = bs[:] lowerCAmelCase__ : int = 0 for b in range(2**8 ): if b not in bs: bs.append(UpperCamelCase ) cs.append(2**8 + n ) n += 1 lowerCAmelCase__ : Any = [chr(UpperCamelCase ) for n in cs] return dict(zip(UpperCamelCase , UpperCamelCase ) ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Union[str, Any] = set() lowerCAmelCase__ : List[Any] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCAmelCase__ : Optional[int] = char return pairs class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Any = VOCAB_FILES_NAMES __lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP __lowercase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowercase : Any = ['''input_ids''', '''attention_mask'''] def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase="replace" ,__UpperCAmelCase="<s>" ,__UpperCAmelCase="</s>" ,__UpperCAmelCase="</s>" ,__UpperCAmelCase="<s>" ,__UpperCAmelCase="<unk>" ,__UpperCAmelCase="<pad>" ,__UpperCAmelCase="<mask>" ,__UpperCAmelCase=False ,**__UpperCAmelCase ,) -> List[str]: lowerCAmelCase__ : Optional[Any] = AddedToken(__UpperCAmelCase ,lstrip=__UpperCAmelCase ,rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else bos_token lowerCAmelCase__ : Any = AddedToken(__UpperCAmelCase ,lstrip=__UpperCAmelCase ,rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else eos_token lowerCAmelCase__ : Optional[Any] = AddedToken(__UpperCAmelCase ,lstrip=__UpperCAmelCase ,rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else sep_token lowerCAmelCase__ : List[str] = AddedToken(__UpperCAmelCase ,lstrip=__UpperCAmelCase ,rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else cls_token lowerCAmelCase__ : List[Any] = AddedToken(__UpperCAmelCase ,lstrip=__UpperCAmelCase ,rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else unk_token lowerCAmelCase__ : List[str] = AddedToken(__UpperCAmelCase ,lstrip=__UpperCAmelCase ,rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowerCAmelCase__ : Union[str, Any] = AddedToken(__UpperCAmelCase ,lstrip=__UpperCAmelCase ,rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else mask_token super().__init__( errors=__UpperCAmelCase ,bos_token=__UpperCAmelCase ,eos_token=__UpperCAmelCase ,unk_token=__UpperCAmelCase ,sep_token=__UpperCAmelCase ,cls_token=__UpperCAmelCase ,pad_token=__UpperCAmelCase ,mask_token=__UpperCAmelCase ,add_prefix_space=__UpperCAmelCase ,**__UpperCAmelCase ,) with open(__UpperCAmelCase ,encoding="""utf-8""" ) as vocab_handle: lowerCAmelCase__ : int = json.load(__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = {v: k for k, v in self.encoder.items()} lowerCAmelCase__ : List[str] = errors # how to handle errors in decoding lowerCAmelCase__ : Optional[int] = bytes_to_unicode() lowerCAmelCase__ : List[Any] = {v: k for k, v in self.byte_encoder.items()} with open(__UpperCAmelCase ,encoding="""utf-8""" ) as merges_handle: lowerCAmelCase__ : List[str] = merges_handle.read().split("""\n""" )[1:-1] lowerCAmelCase__ : Dict = [tuple(merge.split() ) for merge in bpe_merges] lowerCAmelCase__ : Union[str, Any] = dict(zip(__UpperCAmelCase ,range(len(__UpperCAmelCase ) ) ) ) lowerCAmelCase__ : List[Any] = {} lowerCAmelCase__ : Optional[Any] = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions lowerCAmelCase__ : Tuple = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" ) @property def UpperCAmelCase_ ( self ) -> str: return len(self.encoder ) def UpperCAmelCase_ ( self ) -> Tuple: return dict(self.encoder ,**self.added_tokens_encoder ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[int]: if token in self.cache: return self.cache[token] lowerCAmelCase__ : Tuple = tuple(__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = get_pairs(__UpperCAmelCase ) if not pairs: return token while True: lowerCAmelCase__ : Any = min(__UpperCAmelCase ,key=lambda __UpperCAmelCase : self.bpe_ranks.get(__UpperCAmelCase ,float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break lowerCAmelCase__ , lowerCAmelCase__ : int = bigram lowerCAmelCase__ : Optional[int] = [] lowerCAmelCase__ : List[Any] = 0 while i < len(__UpperCAmelCase ): try: lowerCAmelCase__ : Union[str, Any] = word.index(__UpperCAmelCase ,__UpperCAmelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCAmelCase__ : Any = j if word[i] == first and i < len(__UpperCAmelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCAmelCase__ : Tuple = tuple(__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = new_word if len(__UpperCAmelCase ) == 1: break else: lowerCAmelCase__ : Any = get_pairs(__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = """ """.join(__UpperCAmelCase ) lowerCAmelCase__ : int = word return word def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]: lowerCAmelCase__ : int = [] for token in re.findall(self.pat ,__UpperCAmelCase ): lowerCAmelCase__ : Union[str, Any] = """""".join( self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__UpperCAmelCase ).split(""" """ ) ) return bpe_tokens def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Dict: return self.encoder.get(__UpperCAmelCase ,self.encoder.get(self.unk_token ) ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str: return self.decoder.get(__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Dict: lowerCAmelCase__ : List[Any] = """""".join(__UpperCAmelCase ) lowerCAmelCase__ : Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" ,errors=self.errors ) return text def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(__UpperCAmelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase__ : int = os.path.join( __UpperCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) lowerCAmelCase__ : Union[str, Any] = os.path.join( __UpperCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(__UpperCAmelCase ,"""w""" ,encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=__UpperCAmelCase ,ensure_ascii=__UpperCAmelCase ) + """\n""" ) lowerCAmelCase__ : Tuple = 0 with open(__UpperCAmelCase ,"""w""" ,encoding="""utf-8""" ) as writer: writer.write("""#version: 0.2\n""" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda __UpperCAmelCase : kv[1] ): if index != token_index: logger.warning( F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" """ Please check that the tokenizer is not corrupted!""" ) lowerCAmelCase__ : List[str] = token_index writer.write(""" """.join(__UpperCAmelCase ) + """\n""" ) index += 1 return vocab_file, merge_file def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCAmelCase__ : List[str] = [self.cls_token_id] lowerCAmelCase__ : Tuple = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCAmelCase ,token_ids_a=__UpperCAmelCase ,already_has_special_tokens=__UpperCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(__UpperCAmelCase )) + [1] return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1] def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> List[int]: lowerCAmelCase__ : Tuple = [self.sep_token_id] lowerCAmelCase__ : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=False ,**__UpperCAmelCase ) -> Any: lowerCAmelCase__ : Any = kwargs.pop("""add_prefix_space""" ,self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__UpperCAmelCase ) > 0 and not text[0].isspace()): lowerCAmelCase__ : Union[str, Any] = """ """ + text return (text, kwargs)
37
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class lowerCAmelCase_( unittest.TestCase ): '''simple docstring''' @slow def UpperCAmelCase_ ( self ) -> Union[str, Any]: lowerCAmelCase__ : Tuple = XLMRobertaModel.from_pretrained("""xlm-roberta-base""" ) lowerCAmelCase__ : Optional[int] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] ) # The dog is cute and lives in the garden house lowerCAmelCase__ : str = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim lowerCAmelCase__ : Dict = torch.tensor( [[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): lowerCAmelCase__ : Optional[int] = model(__UpperCAmelCase )["""last_hidden_state"""].detach() self.assertEqual(output.shape ,__UpperCAmelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] ,__UpperCAmelCase ,atol=1E-3 ) ) @slow def UpperCAmelCase_ ( self ) -> int: lowerCAmelCase__ : Union[str, Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-large""" ) lowerCAmelCase__ : Optional[Any] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] ) # The dog is cute and lives in the garden house lowerCAmelCase__ : Dict = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim lowerCAmelCase__ : Union[str, Any] = torch.tensor( [[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase )["""last_hidden_state"""].detach() self.assertEqual(output.shape ,__UpperCAmelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] ,__UpperCAmelCase ,atol=1E-3 ) )
37
1
'''simple docstring''' from __future__ import annotations from typing import Generic, TypeVar _lowerCAmelCase = TypeVar('''T''') class lowerCAmelCase_( Generic[T] ): '''simple docstring''' def __init__( self ,__UpperCAmelCase ) -> None: lowerCAmelCase__ : Optional[Any] = data lowerCAmelCase__ : Any = self lowerCAmelCase__ : Tuple = 0 class lowerCAmelCase_( Generic[T] ): '''simple docstring''' def __init__( self ) -> None: # map from node name to the node object lowerCAmelCase__ : dict[T, DisjointSetTreeNode[T]] = {} def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> None: # create a new set with x as its member lowerCAmelCase__ : Optional[Any] = DisjointSetTreeNode(__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> DisjointSetTreeNode[T]: # find the set x belongs to (with path-compression) lowerCAmelCase__ : int = self.map[data] if elem_ref != elem_ref.parent: lowerCAmelCase__ : Union[str, Any] = self.find_set(elem_ref.parent.data ) return elem_ref.parent def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> None: # helper function for union operation if nodea.rank > nodea.rank: lowerCAmelCase__ : Any = nodea else: lowerCAmelCase__ : List[Any] = nodea if nodea.rank == nodea.rank: nodea.rank += 1 def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> None: # merge 2 disjoint sets self.link(self.find_set(__UpperCAmelCase ) ,self.find_set(__UpperCAmelCase ) ) class lowerCAmelCase_( Generic[T] ): '''simple docstring''' def __init__( self ) -> None: # connections: map from the node to the neighbouring nodes (with weights) lowerCAmelCase__ : dict[T, dict[T, int]] = {} def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> None: # add a node ONLY if its not present in the graph if node not in self.connections: lowerCAmelCase__ : List[Any] = {} def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> None: # add an edge with the given weight self.add_node(__UpperCAmelCase ) self.add_node(__UpperCAmelCase ) lowerCAmelCase__ : Tuple = weight lowerCAmelCase__ : List[str] = weight def UpperCAmelCase_ ( self ) -> GraphUndirectedWeighted[T]: lowerCAmelCase__ : Tuple = [] lowerCAmelCase__ : Tuple = set() for start in self.connections: for end in self.connections[start]: if (start, end) not in seen: seen.add((end, start) ) edges.append((start, end, self.connections[start][end]) ) edges.sort(key=lambda __UpperCAmelCase : x[2] ) # creating the disjoint set lowerCAmelCase__ : List[Any] = DisjointSetTree[T]() for node in self.connections: disjoint_set.make_set(__UpperCAmelCase ) # MST generation lowerCAmelCase__ : List[str] = 0 lowerCAmelCase__ : Dict = 0 lowerCAmelCase__ : Dict = GraphUndirectedWeighted[T]() while num_edges < len(self.connections ) - 1: lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = edges[index] index += 1 lowerCAmelCase__ : Tuple = disjoint_set.find_set(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = disjoint_set.find_set(__UpperCAmelCase ) if parent_u != parent_v: num_edges += 1 graph.add_edge(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) disjoint_set.union(__UpperCAmelCase ,__UpperCAmelCase ) return graph
37
'''simple docstring''' from pickle import UnpicklingError import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict from ..utils import logging _lowerCAmelCase = logging.get_logger(__name__) def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" try: with open(UpperCamelCase , """rb""" ) as flax_state_f: lowerCAmelCase__ : Union[str, Any] = from_bytes(UpperCamelCase , flax_state_f.read() ) except UnpicklingError as e: try: with open(UpperCamelCase ) as f: if f.read().startswith("""version""" ): raise OSError( """You seem to have cloned a repository without having git-lfs installed. Please""" """ install git-lfs and run `git lfs install` followed by `git lfs pull` in the""" """ folder you cloned.""" ) else: raise ValueError from e except (UnicodeDecodeError, ValueError): raise EnvironmentError(f"""Unable to convert {model_file} to Flax deserializable object. """ ) return load_flax_weights_in_pytorch_model(UpperCamelCase , UpperCamelCase ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" try: import torch # noqa: F401 except ImportError: logger.error( """Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see""" """ https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation""" """ instructions.""" ) raise # check if we have bf16 weights lowerCAmelCase__ : str = flatten_dict(jax.tree_util.tree_map(lambda UpperCamelCase : x.dtype == jnp.bfloataa , UpperCamelCase ) ).values() if any(UpperCamelCase ): # convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( """Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """ """before loading those in PyTorch model.""" ) lowerCAmelCase__ : Dict = jax.tree_util.tree_map( lambda UpperCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , UpperCamelCase ) lowerCAmelCase__ : Any = """""" lowerCAmelCase__ : Any = flatten_dict(UpperCamelCase , sep=""".""" ) lowerCAmelCase__ : Optional[int] = pt_model.state_dict() # keep track of unexpected & missing keys lowerCAmelCase__ : Optional[Any] = [] lowerCAmelCase__ : int = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): lowerCAmelCase__ : Union[str, Any] = flax_key_tuple.split(""".""" ) if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4: lowerCAmelCase__ : Optional[int] = flax_key_tuple_array[:-1] + ["""weight"""] lowerCAmelCase__ : Any = jnp.transpose(UpperCamelCase , (3, 2, 0, 1) ) elif flax_key_tuple_array[-1] == "kernel": lowerCAmelCase__ : str = flax_key_tuple_array[:-1] + ["""weight"""] lowerCAmelCase__ : Any = flax_tensor.T elif flax_key_tuple_array[-1] == "scale": lowerCAmelCase__ : int = flax_key_tuple_array[:-1] + ["""weight"""] if "time_embedding" not in flax_key_tuple_array: for i, flax_key_tuple_string in enumerate(UpperCamelCase ): lowerCAmelCase__ : List[str] = ( flax_key_tuple_string.replace("""_0""" , """.0""" ) .replace("""_1""" , """.1""" ) .replace("""_2""" , """.2""" ) .replace("""_3""" , """.3""" ) .replace("""_4""" , """.4""" ) .replace("""_5""" , """.5""" ) .replace("""_6""" , """.6""" ) .replace("""_7""" , """.7""" ) .replace("""_8""" , """.8""" ) .replace("""_9""" , """.9""" ) ) lowerCAmelCase__ : Union[str, Any] = """.""".join(UpperCamelCase ) if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( f"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """ f"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" ) else: # add weight to pytorch dict lowerCAmelCase__ : int = np.asarray(UpperCamelCase ) if not isinstance(UpperCamelCase , np.ndarray ) else flax_tensor lowerCAmelCase__ : int = torch.from_numpy(UpperCamelCase ) # remove from missing keys missing_keys.remove(UpperCamelCase ) else: # weight is not expected by PyTorch model unexpected_keys.append(UpperCamelCase ) pt_model.load_state_dict(UpperCamelCase ) # re-transform missing_keys to list lowerCAmelCase__ : Optional[int] = list(UpperCamelCase ) if len(UpperCamelCase ) > 0: logger.warning( """Some weights of the Flax model were not used when initializing the PyTorch model""" f""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing""" f""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture""" """ (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This""" f""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect""" """ to be exactly identical (e.g. initializing a BertForSequenceClassification model from a""" """ FlaxBertForSequenceClassification model).""" ) if len(UpperCamelCase ) > 0: logger.warning( f"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly""" f""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to""" """ use it for predictions and inference.""" ) return pt_model
37
1
'''simple docstring''' import os import unittest from transformers import BatchEncoding from transformers.models.bert.tokenization_bert import ( BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer from transformers.testing_utils import require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): '''simple docstring''' __lowercase : Any = ProphetNetTokenizer __lowercase : List[Any] = False def UpperCAmelCase_ ( self ) -> str: super().setUp() lowerCAmelCase__ : str = [ """[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] lowerCAmelCase__ : Tuple = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Any: lowerCAmelCase__ : List[str] = """UNwant\u00E9d,running""" lowerCAmelCase__ : int = """unwanted, running""" return input_text, output_text def UpperCAmelCase_ ( self ) -> Optional[Any]: lowerCAmelCase__ : Union[str, Any] = self.tokenizer_class(self.vocab_file ) lowerCAmelCase__ : Tuple = tokenizer.tokenize("""UNwant\u00E9d,running""" ) self.assertListEqual(__UpperCAmelCase ,["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) ,[9, 6, 7, 12, 10, 11] ) def UpperCAmelCase_ ( self ) -> Any: lowerCAmelCase__ : Union[str, Any] = BasicTokenizer() self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) ,["""ah""", """\u535A""", """\u63A8""", """zz"""] ) def UpperCAmelCase_ ( self ) -> List[Any]: lowerCAmelCase__ : Tuple = BasicTokenizer(do_lower_case=__UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) ,["""hello""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] ) def UpperCAmelCase_ ( self ) -> Tuple: lowerCAmelCase__ : Dict = BasicTokenizer(do_lower_case=__UpperCAmelCase ,strip_accents=__UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hällo""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""h\u00E9llo"""] ) def UpperCAmelCase_ ( self ) -> Optional[Any]: lowerCAmelCase__ : Optional[int] = BasicTokenizer(do_lower_case=__UpperCAmelCase ,strip_accents=__UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hallo""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] ) def UpperCAmelCase_ ( self ) -> Dict: lowerCAmelCase__ : str = BasicTokenizer(do_lower_case=__UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hallo""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] ) def UpperCAmelCase_ ( self ) -> List[str]: lowerCAmelCase__ : Optional[int] = BasicTokenizer(do_lower_case=__UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) ,["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def UpperCAmelCase_ ( self ) -> Dict: lowerCAmelCase__ : Optional[Any] = BasicTokenizer(do_lower_case=__UpperCAmelCase ,strip_accents=__UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def UpperCAmelCase_ ( self ) -> Optional[Any]: lowerCAmelCase__ : Optional[int] = BasicTokenizer(do_lower_case=__UpperCAmelCase ,strip_accents=__UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def UpperCAmelCase_ ( self ) -> List[Any]: lowerCAmelCase__ : Optional[int] = BasicTokenizer(do_lower_case=__UpperCAmelCase ,never_split=["""[UNK]"""] ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) ,["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] ) def UpperCAmelCase_ ( self ) -> Optional[int]: lowerCAmelCase__ : List[Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""] lowerCAmelCase__ : List[Any] = {} for i, token in enumerate(__UpperCAmelCase ): lowerCAmelCase__ : int = i lowerCAmelCase__ : Dict = WordpieceTokenizer(vocab=__UpperCAmelCase ,unk_token="""[UNK]""" ) self.assertListEqual(tokenizer.tokenize("""""" ) ,[] ) self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) ,["""un""", """##want""", """##ed""", """runn""", """##ing"""] ) self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) ,["""[UNK]""", """runn""", """##ing"""] ) @require_torch def UpperCAmelCase_ ( self ) -> str: lowerCAmelCase__ : List[Any] = self.tokenizer_class.from_pretrained("""microsoft/prophetnet-large-uncased""" ) lowerCAmelCase__ : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] lowerCAmelCase__ : Optional[int] = [1037, 2146, 2_0423, 2005, 7680, 7849, 3989, 1012, 102] lowerCAmelCase__ : Any = tokenizer(__UpperCAmelCase ,padding=__UpperCAmelCase ,return_tensors="""pt""" ) self.assertIsInstance(__UpperCAmelCase ,__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = list(batch.input_ids.numpy()[0] ) self.assertListEqual(__UpperCAmelCase ,__UpperCAmelCase ) self.assertEqual((2, 9) ,batch.input_ids.shape ) self.assertEqual((2, 9) ,batch.attention_mask.shape ) def UpperCAmelCase_ ( self ) -> int: self.assertTrue(_is_whitespace(""" """ ) ) self.assertTrue(_is_whitespace("""\t""" ) ) self.assertTrue(_is_whitespace("""\r""" ) ) self.assertTrue(_is_whitespace("""\n""" ) ) self.assertTrue(_is_whitespace("""\u00A0""" ) ) self.assertFalse(_is_whitespace("""A""" ) ) self.assertFalse(_is_whitespace("""-""" ) ) def UpperCAmelCase_ ( self ) -> Optional[int]: self.assertTrue(_is_control("""\u0005""" ) ) self.assertFalse(_is_control("""A""" ) ) self.assertFalse(_is_control(""" """ ) ) self.assertFalse(_is_control("""\t""" ) ) self.assertFalse(_is_control("""\r""" ) ) def UpperCAmelCase_ ( self ) -> List[Any]: self.assertTrue(_is_punctuation("""-""" ) ) self.assertTrue(_is_punctuation("""$""" ) ) self.assertTrue(_is_punctuation("""`""" ) ) self.assertTrue(_is_punctuation(""".""" ) ) self.assertFalse(_is_punctuation("""A""" ) ) self.assertFalse(_is_punctuation(""" """ ) ) @slow def UpperCAmelCase_ ( self ) -> Optional[int]: lowerCAmelCase__ : Tuple = self.tokenizer_class.from_pretrained("""microsoft/prophetnet-large-uncased""" ) lowerCAmelCase__ : Any = tokenizer.encode("""sequence builders""" ,add_special_tokens=__UpperCAmelCase ) lowerCAmelCase__ : List[str] = tokenizer.encode("""multi-sequence build""" ,add_special_tokens=__UpperCAmelCase ) lowerCAmelCase__ : Any = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase ,__UpperCAmelCase ) assert encoded_sentence == text + [102] assert encoded_pair == text + [102] + text_a + [102]
37
'''simple docstring''' import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class lowerCAmelCase_: '''simple docstring''' def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=2 ,__UpperCAmelCase=True ,__UpperCAmelCase=False ,__UpperCAmelCase=10 ,__UpperCAmelCase=3 ,__UpperCAmelCase=32 * 4 ,__UpperCAmelCase=32 * 6 ,__UpperCAmelCase=4 ,__UpperCAmelCase=32 ,) -> str: lowerCAmelCase__ : Optional[int] = parent lowerCAmelCase__ : Optional[int] = batch_size lowerCAmelCase__ : Optional[int] = is_training lowerCAmelCase__ : Dict = use_auxiliary_loss lowerCAmelCase__ : Union[str, Any] = num_queries lowerCAmelCase__ : str = num_channels lowerCAmelCase__ : List[str] = min_size lowerCAmelCase__ : int = max_size lowerCAmelCase__ : Optional[Any] = num_labels lowerCAmelCase__ : List[Any] = mask_feature_size def UpperCAmelCase_ ( self ) -> Tuple: lowerCAmelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( __UpperCAmelCase ) lowerCAmelCase__ : str = torch.ones([self.batch_size, self.min_size, self.max_size] ,device=__UpperCAmelCase ) lowerCAmelCase__ : Any = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] ,device=__UpperCAmelCase ) > 0.5 ).float() lowerCAmelCase__ : Optional[int] = (torch.rand((self.batch_size, self.num_labels) ,device=__UpperCAmelCase ) > 0.5).long() lowerCAmelCase__ : Any = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def UpperCAmelCase_ ( self ) -> Dict: return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] ,) ,decoder_config=DetrConfig( decoder_ffn_dim=128 ,num_queries=self.num_queries ,decoder_attention_heads=2 ,d_model=self.mask_feature_size ,) ,mask_feature_size=self.mask_feature_size ,fpn_feature_size=self.mask_feature_size ,num_channels=self.num_channels ,num_labels=self.num_labels ,) def UpperCAmelCase_ ( self ) -> Optional[int]: lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.prepare_config_and_inputs() lowerCAmelCase__ : List[str] = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask} return config, inputs_dict def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Any: lowerCAmelCase__ : Optional[int] = output.encoder_hidden_states lowerCAmelCase__ : Optional[int] = output.pixel_decoder_hidden_states lowerCAmelCase__ : Dict = output.transformer_decoder_hidden_states self.parent.assertTrue(len(__UpperCAmelCase ) ,len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__UpperCAmelCase ) ,len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__UpperCAmelCase ) ,config.decoder_config.decoder_layers ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=False ) -> Optional[Any]: with torch.no_grad(): lowerCAmelCase__ : int = MaskFormerModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : str = model(pixel_values=__UpperCAmelCase ,pixel_mask=__UpperCAmelCase ) lowerCAmelCase__ : int = model(__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape ,(self.batch_size, self.num_queries, self.mask_feature_size) ,) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(__UpperCAmelCase ,__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[int]: lowerCAmelCase__ : Dict = MaskFormerForInstanceSegmentation(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() def comm_check_on_output(__UpperCAmelCase ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape ,(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) ,) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape ,(self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): lowerCAmelCase__ : List[Any] = model(pixel_values=__UpperCAmelCase ,pixel_mask=__UpperCAmelCase ) lowerCAmelCase__ : Dict = model(__UpperCAmelCase ) comm_check_on_output(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = model( pixel_values=__UpperCAmelCase ,pixel_mask=__UpperCAmelCase ,mask_labels=__UpperCAmelCase ,class_labels=__UpperCAmelCase ) comm_check_on_output(__UpperCAmelCase ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape ,torch.Size([1] ) ) @require_torch class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): '''simple docstring''' __lowercase : Optional[Any] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () __lowercase : int = ( {'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) __lowercase : Union[str, Any] = False __lowercase : Dict = False __lowercase : Tuple = False __lowercase : List[Any] = False def UpperCAmelCase_ ( self ) -> Optional[int]: lowerCAmelCase__ : str = MaskFormerModelTester(self ) lowerCAmelCase__ : List[Any] = ConfigTester(self ,config_class=__UpperCAmelCase ,has_text_modality=__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> List[str]: self.config_tester.run_common_tests() def UpperCAmelCase_ ( self ) -> Union[str, Any]: lowerCAmelCase__ , lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__UpperCAmelCase ,**__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> Optional[int]: lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__UpperCAmelCase ) @unittest.skip(reason="""MaskFormer does not use inputs_embeds""" ) def UpperCAmelCase_ ( self ) -> List[Any]: pass @unittest.skip(reason="""MaskFormer does not have a get_input_embeddings method""" ) def UpperCAmelCase_ ( self ) -> str: pass @unittest.skip(reason="""MaskFormer is not a generative model""" ) def UpperCAmelCase_ ( self ) -> Any: pass @unittest.skip(reason="""MaskFormer does not use token embeddings""" ) def UpperCAmelCase_ ( self ) -> List[str]: pass @require_torch_multi_gpu @unittest.skip( reason="""MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def UpperCAmelCase_ ( self ) -> List[str]: pass def UpperCAmelCase_ ( self ) -> Tuple: lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ : str = model_class(__UpperCAmelCase ) lowerCAmelCase__ : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase__ : Dict = [*signature.parameters.keys()] lowerCAmelCase__ : Tuple = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,__UpperCAmelCase ) @slow def UpperCAmelCase_ ( self ) -> Union[str, Any]: for model_name in ["facebook/maskformer-swin-small-coco"]: lowerCAmelCase__ : List[str] = MaskFormerModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> str: lowerCAmelCase__ : List[Any] = (self.model_tester.min_size,) * 2 lowerCAmelCase__ : Any = { """pixel_values""": torch.randn((2, 3, *size) ,device=__UpperCAmelCase ), """mask_labels""": torch.randn((2, 10, *size) ,device=__UpperCAmelCase ), """class_labels""": torch.zeros(2 ,10 ,device=__UpperCAmelCase ).long(), } lowerCAmelCase__ : Tuple = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = model(**__UpperCAmelCase ) self.assertTrue(outputs.loss is not None ) def UpperCAmelCase_ ( self ) -> str: lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__UpperCAmelCase ,**__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> Tuple: lowerCAmelCase__ , lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ : int = model_class(__UpperCAmelCase ).to(__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = model(**__UpperCAmelCase ,output_attentions=__UpperCAmelCase ) self.assertTrue(outputs.attentions is not None ) def UpperCAmelCase_ ( self ) -> int: if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss lowerCAmelCase__ : Dict = self.all_model_classes[1] lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() lowerCAmelCase__ : List[Any] = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.train() lowerCAmelCase__ : List[str] = model(__UpperCAmelCase ,mask_labels=__UpperCAmelCase ,class_labels=__UpperCAmelCase ).loss loss.backward() def UpperCAmelCase_ ( self ) -> List[str]: # only MaskFormerForInstanceSegmentation has the loss lowerCAmelCase__ : Tuple = self.all_model_classes[1] lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() lowerCAmelCase__ : Union[str, Any] = True lowerCAmelCase__ : Tuple = True lowerCAmelCase__ : Optional[Any] = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.train() lowerCAmelCase__ : Dict = model(__UpperCAmelCase ,mask_labels=__UpperCAmelCase ,class_labels=__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() lowerCAmelCase__ : str = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't lowerCAmelCase__ : Union[str, Any] = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() lowerCAmelCase__ : List[Any] = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=__UpperCAmelCase ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) _lowerCAmelCase = 1e-4 def _SCREAMING_SNAKE_CASE ( ): """simple docstring""" lowerCAmelCase__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_vision @slow class lowerCAmelCase_( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCAmelCase_ ( self ) -> List[Any]: return ( MaskFormerImageProcessor.from_pretrained("""facebook/maskformer-swin-small-coco""" ) if is_vision_available() else None ) def UpperCAmelCase_ ( self ) -> Any: lowerCAmelCase__ : Any = MaskFormerModel.from_pretrained("""facebook/maskformer-swin-small-coco""" ).to(__UpperCAmelCase ) lowerCAmelCase__ : str = self.default_image_processor lowerCAmelCase__ : str = prepare_img() lowerCAmelCase__ : Optional[int] = image_processor(__UpperCAmelCase ,return_tensors="""pt""" ).to(__UpperCAmelCase ) lowerCAmelCase__ : Dict = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__UpperCAmelCase ,(1, 3, 800, 1088) ) with torch.no_grad(): lowerCAmelCase__ : Union[str, Any] = model(**__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = torch.tensor( [[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(__UpperCAmelCase ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) ) lowerCAmelCase__ : Dict = torch.tensor( [[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(__UpperCAmelCase ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) ) lowerCAmelCase__ : Optional[int] = torch.tensor( [[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(__UpperCAmelCase ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) ) def UpperCAmelCase_ ( self ) -> Optional[Any]: lowerCAmelCase__ : List[Any] = ( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" ) .to(__UpperCAmelCase ) .eval() ) lowerCAmelCase__ : Optional[Any] = self.default_image_processor lowerCAmelCase__ : List[str] = prepare_img() lowerCAmelCase__ : str = image_processor(__UpperCAmelCase ,return_tensors="""pt""" ).to(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__UpperCAmelCase ,(1, 3, 800, 1088) ) with torch.no_grad(): lowerCAmelCase__ : List[Any] = model(**__UpperCAmelCase ) # masks_queries_logits lowerCAmelCase__ : Optional[int] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape ,(1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ,) lowerCAmelCase__ : Optional[int] = [ [-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3], [-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5], [-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2], ] lowerCAmelCase__ : Optional[int] = torch.tensor(__UpperCAmelCase ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) ) # class_queries_logits lowerCAmelCase__ : Tuple = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape ,(1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) lowerCAmelCase__ : Union[str, Any] = torch.tensor( [ [1.65_12E00, -5.25_72E00, -3.35_19E00], [3.61_69E-02, -5.90_25E00, -2.93_13E00], [1.07_66E-04, -7.76_30E00, -5.12_63E00], ] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) ) def UpperCAmelCase_ ( self ) -> str: lowerCAmelCase__ : List[Any] = ( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-resnet101-coco-stuff""" ) .to(__UpperCAmelCase ) .eval() ) lowerCAmelCase__ : Optional[Any] = self.default_image_processor lowerCAmelCase__ : int = prepare_img() lowerCAmelCase__ : Optional[Any] = image_processor(__UpperCAmelCase ,return_tensors="""pt""" ).to(__UpperCAmelCase ) lowerCAmelCase__ : str = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__UpperCAmelCase ,(1, 3, 800, 1088) ) with torch.no_grad(): lowerCAmelCase__ : str = model(**__UpperCAmelCase ) # masks_queries_logits lowerCAmelCase__ : Optional[Any] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape ,(1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ,) lowerCAmelCase__ : int = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]] lowerCAmelCase__ : List[str] = torch.tensor(__UpperCAmelCase ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) ) # class_queries_logits lowerCAmelCase__ : Optional[Any] = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape ,(1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) lowerCAmelCase__ : Tuple = torch.tensor( [[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) ) def UpperCAmelCase_ ( self ) -> Optional[Any]: lowerCAmelCase__ : str = ( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" ) .to(__UpperCAmelCase ) .eval() ) lowerCAmelCase__ : Dict = self.default_image_processor lowerCAmelCase__ : Union[str, Any] = image_processor( [np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] ,segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] ,return_tensors="""pt""" ,) lowerCAmelCase__ : Tuple = inputs["""pixel_values"""].to(__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = [el.to(__UpperCAmelCase ) for el in inputs["""mask_labels"""]] lowerCAmelCase__ : Union[str, Any] = [el.to(__UpperCAmelCase ) for el in inputs["""class_labels"""]] with torch.no_grad(): lowerCAmelCase__ : Any = model(**__UpperCAmelCase ) self.assertTrue(outputs.loss is not None )
37
1
'''simple docstring''' import torch from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : int = '''M-CLIP''' def __init__( self ,__UpperCAmelCase=1024 ,__UpperCAmelCase=768 ,**__UpperCAmelCase ) -> Dict: lowerCAmelCase__ : int = transformerDimSize lowerCAmelCase__ : str = imageDimSize super().__init__(**__UpperCAmelCase ) class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Tuple = MCLIPConfig def __init__( self ,__UpperCAmelCase ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple: super().__init__(__UpperCAmelCase ,*__UpperCAmelCase ,**__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = XLMRobertaModel(__UpperCAmelCase ) lowerCAmelCase__ : Tuple = torch.nn.Linear( in_features=config.transformerDimensions ,out_features=config.numDims ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Tuple: lowerCAmelCase__ : Optional[int] = self.transformer(input_ids=__UpperCAmelCase ,attention_mask=__UpperCAmelCase )[0] lowerCAmelCase__ : Optional[int] = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None] return self.LinearTransformation(__UpperCAmelCase ), embs
37
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { '''microsoft/focalnet-tiny''': '''https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json''', } class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Optional[Any] = '''focalnet''' def __init__( self ,__UpperCAmelCase=224 ,__UpperCAmelCase=4 ,__UpperCAmelCase=3 ,__UpperCAmelCase=96 ,__UpperCAmelCase=False ,__UpperCAmelCase=[192, 384, 768, 768] ,__UpperCAmelCase=[2, 2, 6, 2] ,__UpperCAmelCase=[2, 2, 2, 2] ,__UpperCAmelCase=[3, 3, 3, 3] ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=4.0 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=False ,__UpperCAmelCase=1E-4 ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=1E-5 ,__UpperCAmelCase=32 ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,**__UpperCAmelCase ,) -> Optional[Any]: super().__init__(**__UpperCAmelCase ) lowerCAmelCase__ : Dict = image_size lowerCAmelCase__ : int = patch_size lowerCAmelCase__ : str = num_channels lowerCAmelCase__ : Dict = embed_dim lowerCAmelCase__ : List[str] = use_conv_embed lowerCAmelCase__ : List[Any] = hidden_sizes lowerCAmelCase__ : Dict = depths lowerCAmelCase__ : List[str] = focal_levels lowerCAmelCase__ : List[str] = focal_windows lowerCAmelCase__ : Dict = hidden_act lowerCAmelCase__ : Dict = mlp_ratio lowerCAmelCase__ : Tuple = hidden_dropout_prob lowerCAmelCase__ : Tuple = drop_path_rate lowerCAmelCase__ : Dict = use_layerscale lowerCAmelCase__ : Optional[Any] = layerscale_value lowerCAmelCase__ : str = use_post_layernorm lowerCAmelCase__ : Union[str, Any] = use_post_layernorm_in_modulation lowerCAmelCase__ : int = normalize_modulator lowerCAmelCase__ : Optional[Any] = initializer_range lowerCAmelCase__ : List[str] = layer_norm_eps lowerCAmelCase__ : List[Any] = encoder_stride lowerCAmelCase__ : Dict = ["""stem"""] + [F"""stage{idx}""" for idx in range(1 ,len(self.depths ) + 1 )] lowerCAmelCase__ , lowerCAmelCase__ : Any = get_aligned_output_features_output_indices( out_features=__UpperCAmelCase ,out_indices=__UpperCAmelCase ,stage_names=self.stage_names )
37
1
'''simple docstring''' import argparse import re from flax.traverse_util import flatten_dict, unflatten_dict from tax import checkpoints from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model from transformers.utils import logging logging.set_verbosity_info() # should not include what is already done by the `from_pt` argument _lowerCAmelCase = { '''/attention/''': '''/0/SelfAttention/''', '''/self_attention/''': '''/0/SelfAttention/''', '''/encoder_decoder_attention/''': '''/1/EncDecAttention/''', '''value''': '''v''', '''query''': '''q''', '''key''': '''k''', '''out''': '''o''', '''pre_self_attention_layer_norm''': '''0/layer_norm''', '''pre_cross_attention_layer_norm''': '''1/layer_norm''', '''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong '''token_embedder''': '''shared''', '''encoder_norm''': '''final_layer_norm''', '''decoder_norm''': '''final_layer_norm''', '''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''', '''router/router_weights/w/''': '''router/classifier/''', '''roer/roer_weights/w/''': '''router/classifier/''', '''logits_dense''': '''lm_head''', } def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : List[Any] = list(s_dict.keys() ) for key in keys: lowerCAmelCase__ : Union[str, Any] = R""".*/layers_(\d+)""" lowerCAmelCase__ : str = key if re.match(UpperCamelCase , UpperCamelCase ): lowerCAmelCase__ : str = re.sub(R"""layers_(\d+)""" , R"""block/\1/layer""" , UpperCamelCase ) lowerCAmelCase__ : Optional[int] = R"""(encoder|decoder)\/""" if re.match(UpperCamelCase , UpperCamelCase ): lowerCAmelCase__ : Tuple = re.match(UpperCamelCase , UpperCamelCase ).groups() if groups[0] == "encoder": lowerCAmelCase__ : Tuple = re.sub(R"""/mlp/""" , R"""/1/mlp/""" , UpperCamelCase ) lowerCAmelCase__ : Optional[int] = re.sub(R"""/pre_mlp_layer_norm/""" , R"""/1/layer_norm/""" , UpperCamelCase ) elif groups[0] == "decoder": lowerCAmelCase__ : Dict = re.sub(R"""/mlp/""" , R"""/2/mlp/""" , UpperCamelCase ) lowerCAmelCase__ : List[Any] = re.sub(R"""/pre_mlp_layer_norm/""" , R"""/2/layer_norm/""" , UpperCamelCase ) # 2. Convert other classic mappings for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items(): if old_key in new_key: lowerCAmelCase__ : List[str] = new_key.replace(UpperCamelCase , UpperCamelCase ) print(f"""{key} -> {new_key}""" ) lowerCAmelCase__ : List[str] = s_dict.pop(UpperCamelCase ) if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: lowerCAmelCase__ : str = s_dict[ """encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight""" ].T if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: lowerCAmelCase__ : int = s_dict[ """decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight""" ].T # 3. Take extra care of the EXPERTS layer for key in list(s_dict.keys() ): if "expert" in key: lowerCAmelCase__ : Dict = s_dict[key].shape[0] lowerCAmelCase__ : Optional[Any] = s_dict[key] for idx in range(UpperCamelCase ): lowerCAmelCase__ : Optional[Any] = expert_weihts[idx] print(f"""{key} -> {key.replace('expert/' , 'nested fstring' )}""" ) s_dict.pop(UpperCamelCase ) return s_dict _lowerCAmelCase = { '''NUM_ENCODER_LAYERS''': '''num_layers''', '''NUM_DECODER_LAYERS''': '''num_decoder_layers''', '''NUM_HEADS''': '''num_heads''', '''HEAD_DIM''': '''d_kv''', '''EMBED_DIM''': '''d_model''', '''MLP_DIM''': '''d_ff''', '''NUM_SELECTED_EXPERTS''': '''num_selected_experts''', '''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''', '''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''', '''dense.MlpBlock.activations''': '''feed_forward_proj''', } def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" import regex as re with open(UpperCamelCase , """r""" ) as f: lowerCAmelCase__ : Dict = f.read() lowerCAmelCase__ : Tuple = re.findall(R"""(.*) = ([0-9.]*)""" , UpperCamelCase ) lowerCAmelCase__ : str = {} for param, value in regex_match: if param in GIN_TO_CONFIG_MAPPING and value != "": lowerCAmelCase__ : str = float(UpperCamelCase ) if """.""" in value else int(UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = re.findall(R"""(.*activations) = \(\'(.*)\',\)""" , UpperCamelCase )[0] lowerCAmelCase__ : Dict = str(activation[1] ) lowerCAmelCase__ : Union[str, Any] = num_experts lowerCAmelCase__ : Union[str, Any] = SwitchTransformersConfig(**UpperCamelCase ) return config def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase="./" , UpperCamelCase=8 ): """simple docstring""" print(f"""Loading flax weights from : {flax_checkpoint_path}""" ) lowerCAmelCase__ : Optional[int] = checkpoints.load_tax_checkpoint(UpperCamelCase ) if gin_file is not None: lowerCAmelCase__ : Dict = convert_gin_to_config(UpperCamelCase , UpperCamelCase ) else: lowerCAmelCase__ : Union[str, Any] = SwitchTransformersConfig.from_pretrained(UpperCamelCase ) lowerCAmelCase__ : Dict = SwitchTransformersForConditionalGeneration(UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = flax_params["""target"""] lowerCAmelCase__ : List[Any] = flatten_dict(UpperCamelCase , sep="""/""" ) lowerCAmelCase__ : Optional[int] = rename_keys(UpperCamelCase ) lowerCAmelCase__ : List[str] = unflatten_dict(UpperCamelCase , sep="""/""" ) # Load the flax params in the PT model load_flax_weights_in_pytorch_model(UpperCamelCase , UpperCamelCase ) print(f"""Save PyTorch model to {pytorch_dump_path}""" ) pt_model.save_pretrained(UpperCamelCase ) if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--switch_t5x_checkpoint_path''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the''' ''' model architecture. If not provided, a `gin_file` has to be provided.''' ), ) parser.add_argument( '''--gin_file''', default=None, type=str, required=False, help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''', ) parser.add_argument( '''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.''' ) parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''') _lowerCAmelCase = parser.parse_args() convert_flax_checkpoint_to_pytorch( args.switch_tax_checkpoint_path, args.config_name, args.gin_file, args.pytorch_dump_folder_path, args.num_experts, )
37
'''simple docstring''' import argparse import os.path as osp import re import torch from safetensors.torch import load_file, save_file # =================# # UNet Conversion # # =================# _lowerCAmelCase = [ # (stable-diffusion, HF Diffusers) ('''time_embed.0.weight''', '''time_embedding.linear_1.weight'''), ('''time_embed.0.bias''', '''time_embedding.linear_1.bias'''), ('''time_embed.2.weight''', '''time_embedding.linear_2.weight'''), ('''time_embed.2.bias''', '''time_embedding.linear_2.bias'''), ('''input_blocks.0.0.weight''', '''conv_in.weight'''), ('''input_blocks.0.0.bias''', '''conv_in.bias'''), ('''out.0.weight''', '''conv_norm_out.weight'''), ('''out.0.bias''', '''conv_norm_out.bias'''), ('''out.2.weight''', '''conv_out.weight'''), ('''out.2.bias''', '''conv_out.bias'''), ] _lowerCAmelCase = [ # (stable-diffusion, HF Diffusers) ('''in_layers.0''', '''norm1'''), ('''in_layers.2''', '''conv1'''), ('''out_layers.0''', '''norm2'''), ('''out_layers.3''', '''conv2'''), ('''emb_layers.1''', '''time_emb_proj'''), ('''skip_connection''', '''conv_shortcut'''), ] _lowerCAmelCase = [] # hardcoded number of downblocks and resnets/attentions... # would need smarter logic for other networks. for i in range(4): # loop over downblocks/upblocks for j in range(2): # loop over resnets/attentions for downblocks _lowerCAmelCase = F"""down_blocks.{i}.resnets.{j}.""" _lowerCAmelCase = F"""input_blocks.{3*i + j + 1}.0.""" unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix)) if i < 3: # no attention layers in down_blocks.3 _lowerCAmelCase = F"""down_blocks.{i}.attentions.{j}.""" _lowerCAmelCase = F"""input_blocks.{3*i + j + 1}.1.""" unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix)) for j in range(3): # loop over resnets/attentions for upblocks _lowerCAmelCase = F"""up_blocks.{i}.resnets.{j}.""" _lowerCAmelCase = F"""output_blocks.{3*i + j}.0.""" unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix)) if i > 0: # no attention layers in up_blocks.0 _lowerCAmelCase = F"""up_blocks.{i}.attentions.{j}.""" _lowerCAmelCase = F"""output_blocks.{3*i + j}.1.""" unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix)) if i < 3: # no downsample in down_blocks.3 _lowerCAmelCase = F"""down_blocks.{i}.downsamplers.0.conv.""" _lowerCAmelCase = F"""input_blocks.{3*(i+1)}.0.op.""" unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix)) # no upsample in up_blocks.3 _lowerCAmelCase = F"""up_blocks.{i}.upsamplers.0.""" _lowerCAmelCase = F"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}.""" unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix)) _lowerCAmelCase = '''mid_block.attentions.0.''' _lowerCAmelCase = '''middle_block.1.''' unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix)) for j in range(2): _lowerCAmelCase = F"""mid_block.resnets.{j}.""" _lowerCAmelCase = F"""middle_block.{2*j}.""" unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix)) def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Any = {k: k for k in unet_state_dict.keys()} for sd_name, hf_name in unet_conversion_map: lowerCAmelCase__ : Optional[int] = sd_name for k, v in mapping.items(): if "resnets" in k: for sd_part, hf_part in unet_conversion_map_resnet: lowerCAmelCase__ : Any = v.replace(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : List[Any] = v for k, v in mapping.items(): for sd_part, hf_part in unet_conversion_map_layer: lowerCAmelCase__ : List[Any] = v.replace(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = v lowerCAmelCase__ : Tuple = {v: unet_state_dict[k] for k, v in mapping.items()} return new_state_dict # ================# # VAE Conversion # # ================# _lowerCAmelCase = [ # (stable-diffusion, HF Diffusers) ('''nin_shortcut''', '''conv_shortcut'''), ('''norm_out''', '''conv_norm_out'''), ('''mid.attn_1.''', '''mid_block.attentions.0.'''), ] for i in range(4): # down_blocks have two resnets for j in range(2): _lowerCAmelCase = F"""encoder.down_blocks.{i}.resnets.{j}.""" _lowerCAmelCase = F"""encoder.down.{i}.block.{j}.""" vae_conversion_map.append((sd_down_prefix, hf_down_prefix)) if i < 3: _lowerCAmelCase = F"""down_blocks.{i}.downsamplers.0.""" _lowerCAmelCase = F"""down.{i}.downsample.""" vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix)) _lowerCAmelCase = F"""up_blocks.{i}.upsamplers.0.""" _lowerCAmelCase = F"""up.{3-i}.upsample.""" vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix)) # up_blocks have three resnets # also, up blocks in hf are numbered in reverse from sd for j in range(3): _lowerCAmelCase = F"""decoder.up_blocks.{i}.resnets.{j}.""" _lowerCAmelCase = F"""decoder.up.{3-i}.block.{j}.""" vae_conversion_map.append((sd_up_prefix, hf_up_prefix)) # this part accounts for mid blocks in both the encoder and the decoder for i in range(2): _lowerCAmelCase = F"""mid_block.resnets.{i}.""" _lowerCAmelCase = F"""mid.block_{i+1}.""" vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix)) _lowerCAmelCase = [ # (stable-diffusion, HF Diffusers) ('''norm.''', '''group_norm.'''), ('''q.''', '''query.'''), ('''k.''', '''key.'''), ('''v.''', '''value.'''), ('''proj_out.''', '''proj_attn.'''), ] def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" return w.reshape(*w.shape , 1 , 1 ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Optional[int] = {k: k for k in vae_state_dict.keys()} for k, v in mapping.items(): for sd_part, hf_part in vae_conversion_map: lowerCAmelCase__ : str = v.replace(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = v for k, v in mapping.items(): if "attentions" in k: for sd_part, hf_part in vae_conversion_map_attn: lowerCAmelCase__ : Dict = v.replace(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : List[Any] = v lowerCAmelCase__ : Union[str, Any] = {v: vae_state_dict[k] for k, v in mapping.items()} lowerCAmelCase__ : Tuple = ["""q""", """k""", """v""", """proj_out"""] for k, v in new_state_dict.items(): for weight_name in weights_to_convert: if f"""mid.attn_1.{weight_name}.weight""" in k: print(f"""Reshaping {k} for SD format""" ) lowerCAmelCase__ : Optional[int] = reshape_weight_for_sd(UpperCamelCase ) return new_state_dict # =========================# # Text Encoder Conversion # # =========================# _lowerCAmelCase = [ # (stable-diffusion, HF Diffusers) ('''resblocks.''', '''text_model.encoder.layers.'''), ('''ln_1''', '''layer_norm1'''), ('''ln_2''', '''layer_norm2'''), ('''.c_fc.''', '''.fc1.'''), ('''.c_proj.''', '''.fc2.'''), ('''.attn''', '''.self_attn'''), ('''ln_final.''', '''transformer.text_model.final_layer_norm.'''), ('''token_embedding.weight''', '''transformer.text_model.embeddings.token_embedding.weight'''), ('''positional_embedding''', '''transformer.text_model.embeddings.position_embedding.weight'''), ] _lowerCAmelCase = {re.escape(x[1]): x[0] for x in textenc_conversion_lst} _lowerCAmelCase = re.compile('''|'''.join(protected.keys())) # Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp _lowerCAmelCase = {'''q''': 0, '''k''': 1, '''v''': 2} def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Optional[Any] = {} lowerCAmelCase__ : int = {} lowerCAmelCase__ : List[Any] = {} for k, v in text_enc_dict.items(): if ( k.endswith(""".self_attn.q_proj.weight""" ) or k.endswith(""".self_attn.k_proj.weight""" ) or k.endswith(""".self_attn.v_proj.weight""" ) ): lowerCAmelCase__ : Optional[int] = k[: -len(""".q_proj.weight""" )] lowerCAmelCase__ : Tuple = k[-len("""q_proj.weight""" )] if k_pre not in capture_qkv_weight: lowerCAmelCase__ : List[Any] = [None, None, None] lowerCAmelCase__ : Dict = v continue if ( k.endswith(""".self_attn.q_proj.bias""" ) or k.endswith(""".self_attn.k_proj.bias""" ) or k.endswith(""".self_attn.v_proj.bias""" ) ): lowerCAmelCase__ : str = k[: -len(""".q_proj.bias""" )] lowerCAmelCase__ : List[str] = k[-len("""q_proj.bias""" )] if k_pre not in capture_qkv_bias: lowerCAmelCase__ : Union[str, Any] = [None, None, None] lowerCAmelCase__ : Any = v continue lowerCAmelCase__ : Dict = textenc_pattern.sub(lambda UpperCamelCase : protected[re.escape(m.group(0 ) )] , UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = v for k_pre, tensors in capture_qkv_weight.items(): if None in tensors: raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" ) lowerCAmelCase__ : Any = textenc_pattern.sub(lambda UpperCamelCase : protected[re.escape(m.group(0 ) )] , UpperCamelCase ) lowerCAmelCase__ : Tuple = torch.cat(UpperCamelCase ) for k_pre, tensors in capture_qkv_bias.items(): if None in tensors: raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" ) lowerCAmelCase__ : str = textenc_pattern.sub(lambda UpperCamelCase : protected[re.escape(m.group(0 ) )] , UpperCamelCase ) lowerCAmelCase__ : List[Any] = torch.cat(UpperCamelCase ) return new_state_dict def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" return text_enc_dict if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''') parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''') parser.add_argument( '''--use_safetensors''', action='''store_true''', help='''Save weights use safetensors, default is ckpt.''' ) _lowerCAmelCase = parser.parse_args() assert args.model_path is not None, "Must provide a model path!" assert args.checkpoint_path is not None, "Must provide a checkpoint path!" # Path for safetensors _lowerCAmelCase = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.safetensors''') _lowerCAmelCase = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.safetensors''') _lowerCAmelCase = osp.join(args.model_path, '''text_encoder''', '''model.safetensors''') # Load models from safetensors if it exists, if it doesn't pytorch if osp.exists(unet_path): _lowerCAmelCase = load_file(unet_path, device='''cpu''') else: _lowerCAmelCase = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.bin''') _lowerCAmelCase = torch.load(unet_path, map_location='''cpu''') if osp.exists(vae_path): _lowerCAmelCase = load_file(vae_path, device='''cpu''') else: _lowerCAmelCase = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.bin''') _lowerCAmelCase = torch.load(vae_path, map_location='''cpu''') if osp.exists(text_enc_path): _lowerCAmelCase = load_file(text_enc_path, device='''cpu''') else: _lowerCAmelCase = osp.join(args.model_path, '''text_encoder''', '''pytorch_model.bin''') _lowerCAmelCase = torch.load(text_enc_path, map_location='''cpu''') # Convert the UNet model _lowerCAmelCase = convert_unet_state_dict(unet_state_dict) _lowerCAmelCase = {'''model.diffusion_model.''' + k: v for k, v in unet_state_dict.items()} # Convert the VAE model _lowerCAmelCase = convert_vae_state_dict(vae_state_dict) _lowerCAmelCase = {'''first_stage_model.''' + k: v for k, v in vae_state_dict.items()} # Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper _lowerCAmelCase = '''text_model.encoder.layers.22.layer_norm2.bias''' in text_enc_dict if is_vaa_model: # Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm _lowerCAmelCase = {'''transformer.''' + k: v for k, v in text_enc_dict.items()} _lowerCAmelCase = convert_text_enc_state_dict_vaa(text_enc_dict) _lowerCAmelCase = {'''cond_stage_model.model.''' + k: v for k, v in text_enc_dict.items()} else: _lowerCAmelCase = convert_text_enc_state_dict(text_enc_dict) _lowerCAmelCase = {'''cond_stage_model.transformer.''' + k: v for k, v in text_enc_dict.items()} # Put together new checkpoint _lowerCAmelCase = {**unet_state_dict, **vae_state_dict, **text_enc_dict} if args.half: _lowerCAmelCase = {k: v.half() for k, v in state_dict.items()} if args.use_safetensors: save_file(state_dict, args.checkpoint_path) else: _lowerCAmelCase = {'''state_dict''': state_dict} torch.save(state_dict, args.checkpoint_path)
37
1
'''simple docstring''' from math import sqrt def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and ( number >= 0 ), "'number' must been an int and positive" lowerCAmelCase__ : int = True # 0 and 1 are none primes. if number <= 1: lowerCAmelCase__ : Optional[Any] = False for divisor in range(2 , int(round(sqrt(UpperCamelCase ) ) ) + 1 ): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: lowerCAmelCase__ : Any = False break # precondition assert isinstance(UpperCamelCase , UpperCamelCase ), "'status' must been from type bool" return status def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N lowerCAmelCase__ : List[str] = list(range(2 , n + 1 ) ) lowerCAmelCase__ : str = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(UpperCamelCase ) ): for j in range(i + 1 , len(UpperCamelCase ) ): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): lowerCAmelCase__ : List[Any] = 0 # filters actual prime numbers. lowerCAmelCase__ : List[Any] = [x for x in begin_list if x != 0] # precondition assert isinstance(UpperCamelCase , UpperCamelCase ), "'ans' must been from type list" return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and (n > 2), "'N' must been an int and > 2" lowerCAmelCase__ : List[str] = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2 , n + 1 ): if is_prime(UpperCamelCase ): ans.append(UpperCamelCase ) # precondition assert isinstance(UpperCamelCase , UpperCamelCase ), "'ans' must been from type list" return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and number >= 0, "'number' must been an int and >= 0" lowerCAmelCase__ : Optional[Any] = [] # this list will be returns of the function. # potential prime number factors. lowerCAmelCase__ : Dict = 2 lowerCAmelCase__ : Dict = number if number == 0 or number == 1: ans.append(UpperCamelCase ) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(UpperCamelCase ): while quotient != 1: if is_prime(UpperCamelCase ) and (quotient % factor == 0): ans.append(UpperCamelCase ) quotient /= factor else: factor += 1 else: ans.append(UpperCamelCase ) # precondition assert isinstance(UpperCamelCase , UpperCamelCase ), "'ans' must been from type list" return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and ( number >= 0 ), "'number' bust been an int and >= 0" lowerCAmelCase__ : Optional[int] = 0 # prime factorization of 'number' lowerCAmelCase__ : List[str] = prime_factorization(UpperCamelCase ) lowerCAmelCase__ : Any = max(UpperCamelCase ) # precondition assert isinstance(UpperCamelCase , UpperCamelCase ), "'ans' must been from type int" return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and ( number >= 0 ), "'number' bust been an int and >= 0" lowerCAmelCase__ : List[Any] = 0 # prime factorization of 'number' lowerCAmelCase__ : List[str] = prime_factorization(UpperCamelCase ) lowerCAmelCase__ : Optional[int] = min(UpperCamelCase ) # precondition assert isinstance(UpperCamelCase , UpperCamelCase ), "'ans' must been from type int" return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ), "'number' must been an int" assert isinstance(number % 2 == 0 , UpperCamelCase ), "compare bust been from type bool" return number % 2 == 0 def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ), "'number' must been an int" assert isinstance(number % 2 != 0 , UpperCamelCase ), "compare bust been from type bool" return number % 2 != 0 def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert ( isinstance(UpperCamelCase , UpperCamelCase ) and (number > 2) and is_even(UpperCamelCase ) ), "'number' must been an int, even and > 2" lowerCAmelCase__ : Dict = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' lowerCAmelCase__ : Dict = get_prime_numbers(UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = len(UpperCamelCase ) # run variable for while-loops. lowerCAmelCase__ : List[str] = 0 lowerCAmelCase__ : List[Any] = None # exit variable. for break up the loops lowerCAmelCase__ : Any = True while i < len_pn and loop: lowerCAmelCase__ : List[Any] = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: lowerCAmelCase__ : Optional[Any] = False ans.append(prime_numbers[i] ) ans.append(prime_numbers[j] ) j += 1 i += 1 # precondition assert ( isinstance(UpperCamelCase , UpperCamelCase ) and (len(UpperCamelCase ) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0] ) and is_prime(ans[1] ) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" assert ( isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase ) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." lowerCAmelCase__ : int = 0 while numbera != 0: lowerCAmelCase__ : Any = numbera % numbera lowerCAmelCase__ : str = numbera lowerCAmelCase__ : List[str] = rest # precondition assert isinstance(UpperCamelCase , UpperCamelCase ) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" assert ( isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase ) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." lowerCAmelCase__ : int = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' lowerCAmelCase__ : int = prime_factorization(UpperCamelCase ) lowerCAmelCase__ : Any = prime_factorization(UpperCamelCase ) elif numbera == 1 or numbera == 1: lowerCAmelCase__ : Optional[Any] = [] lowerCAmelCase__ : Dict = [] lowerCAmelCase__ : List[str] = max(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : Tuple = 0 lowerCAmelCase__ : str = 0 lowerCAmelCase__ : List[Any] = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: lowerCAmelCase__ : int = prime_fac_a.count(UpperCamelCase ) lowerCAmelCase__ : Any = prime_fac_a.count(UpperCamelCase ) for _ in range(max(UpperCamelCase , UpperCamelCase ) ): ans *= n else: lowerCAmelCase__ : Any = prime_fac_a.count(UpperCamelCase ) for _ in range(UpperCamelCase ): ans *= n done.append(UpperCamelCase ) # iterates through primeFac2 for n in prime_fac_a: if n not in done: lowerCAmelCase__ : Optional[int] = prime_fac_a.count(UpperCamelCase ) for _ in range(UpperCamelCase ): ans *= n done.append(UpperCamelCase ) # precondition assert isinstance(UpperCamelCase , UpperCamelCase ) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and (n >= 0), "'number' must been a positive int" lowerCAmelCase__ : Optional[Any] = 0 lowerCAmelCase__ : Tuple = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(UpperCamelCase ): ans += 1 # precondition assert isinstance(UpperCamelCase , UpperCamelCase ) and is_prime( UpperCamelCase ), "'ans' must been a prime number and from type int" return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" assert ( is_prime(UpperCamelCase ) and is_prime(UpperCamelCase ) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" lowerCAmelCase__ : Dict = p_number_a + 1 # jump to the next number lowerCAmelCase__ : List[Any] = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(UpperCamelCase ): number += 1 while number < p_number_a: ans.append(UpperCamelCase ) number += 1 # fetch the next prime number. while not is_prime(UpperCamelCase ): number += 1 # precondition assert ( isinstance(UpperCamelCase , UpperCamelCase ) and ans[0] != p_number_a and ans[len(UpperCamelCase ) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and (n >= 1), "'n' must been int and >= 1" lowerCAmelCase__ : List[Any] = [] # will be returned. for divisor in range(1 , n + 1 ): if n % divisor == 0: ans.append(UpperCamelCase ) # precondition assert ans[0] == 1 and ans[len(UpperCamelCase ) - 1] == n, "Error in function getDivisiors(...)" return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and ( number > 1 ), "'number' must been an int and >= 1" lowerCAmelCase__ : Optional[int] = get_divisors(UpperCamelCase ) # precondition assert ( isinstance(UpperCamelCase , UpperCamelCase ) and (divisors[0] == 1) and (divisors[len(UpperCamelCase ) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1] ) == number def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" assert ( isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase ) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. lowerCAmelCase__ : int = gcd(abs(UpperCamelCase ) , abs(UpperCamelCase ) ) # precondition assert ( isinstance(UpperCamelCase , UpperCamelCase ) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and (n >= 0), "'n' must been a int and >= 0" lowerCAmelCase__ : str = 1 # this will be return. for factor in range(1 , n + 1 ): ans *= factor return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and (n >= 0), "'n' must been an int and >= 0" lowerCAmelCase__ : List[Any] = 0 lowerCAmelCase__ : Any = 1 lowerCAmelCase__ : Optional[Any] = 1 # this will be return for _ in range(n - 1 ): lowerCAmelCase__ : Dict = ans ans += fiba lowerCAmelCase__ : str = tmp return ans
37
'''simple docstring''' import os from bleurt import score # From: git+https://github.com/google-research/bleurt.git import datasets _lowerCAmelCase = datasets.logging.get_logger(__name__) _lowerCAmelCase = '''\ @inproceedings{bleurt, title={BLEURT: Learning Robust Metrics for Text Generation}, author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh}, booktitle={ACL}, year={2020}, url={https://arxiv.org/abs/2004.04696} } ''' _lowerCAmelCase = '''\ BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018) and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune it for your specific application (the latter is expected to perform better). See the project\'s README at https://github.com/google-research/bleurt#readme for more information. ''' _lowerCAmelCase = ''' BLEURT score. Args: `predictions` (list of str): prediction/candidate sentences `references` (list of str): reference sentences `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None. Returns: \'scores\': List of scores. Examples: >>> predictions = ["hello there", "general kenobi"] >>> references = ["hello there", "general kenobi"] >>> bleurt = datasets.load_metric("bleurt") >>> results = bleurt.compute(predictions=predictions, references=references) >>> print([round(v, 2) for v in results["scores"]]) [1.03, 1.04] ''' _lowerCAmelCase = { '''bleurt-tiny-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip''', '''bleurt-tiny-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip''', '''bleurt-base-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip''', '''bleurt-base-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip''', '''bleurt-large-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip''', '''bleurt-large-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip''', '''BLEURT-20-D3''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip''', '''BLEURT-20-D6''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip''', '''BLEURT-20-D12''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip''', '''BLEURT-20''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip''', } @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_( datasets.Metric ): '''simple docstring''' def UpperCAmelCase_ ( self ) -> Union[str, Any]: return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,homepage="""https://github.com/google-research/bleurt""" ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { """predictions""": datasets.Value("""string""" ,id="""sequence""" ), """references""": datasets.Value("""string""" ,id="""sequence""" ), } ) ,codebase_urls=["""https://github.com/google-research/bleurt"""] ,reference_urls=["""https://github.com/google-research/bleurt""", """https://arxiv.org/abs/2004.04696"""] ,) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Tuple: # check that config name specifies a valid BLEURT model if self.config_name == "default": logger.warning( """Using default BLEURT-Base checkpoint for sequence maximum length 128. """ """You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512').""" ) lowerCAmelCase__ : str = """bleurt-base-128""" if self.config_name.lower() in CHECKPOINT_URLS: lowerCAmelCase__ : Union[str, Any] = self.config_name.lower() elif self.config_name.upper() in CHECKPOINT_URLS: lowerCAmelCase__ : List[Any] = self.config_name.upper() else: raise KeyError( F"""{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}""" ) # download the model checkpoint specified by self.config_name and set up the scorer lowerCAmelCase__ : int = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] ) lowerCAmelCase__ : Dict = score.BleurtScorer(os.path.join(__UpperCAmelCase ,__UpperCAmelCase ) ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Union[str, Any]: lowerCAmelCase__ : Union[str, Any] = self.scorer.score(references=__UpperCAmelCase ,candidates=__UpperCAmelCase ) return {"scores": scores}
37
1
'''simple docstring''' import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() _lowerCAmelCase = logging.get_logger(__name__) def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase=False ): """simple docstring""" lowerCAmelCase__ : List[str] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ("""cls_token""", """vit.embeddings.cls_token"""), ("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""), ("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""), ("""pos_embed""", """vit.embeddings.position_embeddings"""), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""norm.weight""", """layernorm.weight"""), ("""norm.bias""", """layernorm.bias"""), ("""pre_logits.fc.weight""", """pooler.dense.weight"""), ("""pre_logits.fc.bias""", """pooler.dense.bias"""), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" lowerCAmelCase__ : Tuple = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("""norm.weight""", """vit.layernorm.weight"""), ("""norm.bias""", """vit.layernorm.bias"""), ("""head.weight""", """classifier.weight"""), ("""head.bias""", """classifier.bias"""), ] ) return rename_keys def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase=False ): """simple docstring""" for i in range(config.num_hidden_layers ): if base_model: lowerCAmelCase__ : Union[str, Any] = """""" else: lowerCAmelCase__ : Dict = """vit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCAmelCase__ : int = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" ) lowerCAmelCase__ : List[Any] = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict lowerCAmelCase__ : List[Any] = in_proj_weight[ : config.hidden_size, : ] lowerCAmelCase__ : Dict = in_proj_bias[: config.hidden_size] lowerCAmelCase__ : List[str] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCAmelCase__ : str = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCAmelCase__ : Any = in_proj_weight[ -config.hidden_size :, : ] lowerCAmelCase__ : int = in_proj_bias[-config.hidden_size :] def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Optional[int] = ["""head.weight""", """head.bias"""] for k in ignore_keys: state_dict.pop(UpperCamelCase , UpperCamelCase ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Any = dct.pop(UpperCamelCase ) lowerCAmelCase__ : int = val def _SCREAMING_SNAKE_CASE ( ): """simple docstring""" lowerCAmelCase__ : Any = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowerCAmelCase__ : List[Any] = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw ) return im @torch.no_grad() def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Union[str, Any] = ViTConfig() lowerCAmelCase__ : Any = False # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size if vit_name[-5:] == "in21k": lowerCAmelCase__ : int = True lowerCAmelCase__ : Optional[int] = int(vit_name[-12:-10] ) lowerCAmelCase__ : Dict = int(vit_name[-9:-6] ) else: lowerCAmelCase__ : Union[str, Any] = 1000 lowerCAmelCase__ : Any = """huggingface/label-files""" lowerCAmelCase__ : List[Any] = """imagenet-1k-id2label.json""" lowerCAmelCase__ : Any = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="""dataset""" ) , """r""" ) ) lowerCAmelCase__ : Optional[int] = {int(UpperCamelCase ): v for k, v in idalabel.items()} lowerCAmelCase__ : int = idalabel lowerCAmelCase__ : Optional[Any] = {v: k for k, v in idalabel.items()} lowerCAmelCase__ : Optional[Any] = int(vit_name[-6:-4] ) lowerCAmelCase__ : Union[str, Any] = int(vit_name[-3:] ) # size of the architecture if "deit" in vit_name: if vit_name[9:].startswith("""tiny""" ): lowerCAmelCase__ : List[Any] = 192 lowerCAmelCase__ : Tuple = 768 lowerCAmelCase__ : List[Any] = 12 lowerCAmelCase__ : str = 3 elif vit_name[9:].startswith("""small""" ): lowerCAmelCase__ : List[str] = 384 lowerCAmelCase__ : List[Any] = 1536 lowerCAmelCase__ : Any = 12 lowerCAmelCase__ : List[str] = 6 else: pass else: if vit_name[4:].startswith("""small""" ): lowerCAmelCase__ : Optional[Any] = 768 lowerCAmelCase__ : Any = 2304 lowerCAmelCase__ : Tuple = 8 lowerCAmelCase__ : str = 8 elif vit_name[4:].startswith("""base""" ): pass elif vit_name[4:].startswith("""large""" ): lowerCAmelCase__ : Any = 1024 lowerCAmelCase__ : List[str] = 4096 lowerCAmelCase__ : Optional[Any] = 24 lowerCAmelCase__ : Optional[int] = 16 elif vit_name[4:].startswith("""huge""" ): lowerCAmelCase__ : List[str] = 1280 lowerCAmelCase__ : int = 5120 lowerCAmelCase__ : Optional[int] = 32 lowerCAmelCase__ : List[Any] = 16 # load original model from timm lowerCAmelCase__ : Optional[Any] = timm.create_model(UpperCamelCase , pretrained=UpperCamelCase ) timm_model.eval() # load state_dict of original model, remove and rename some keys lowerCAmelCase__ : Union[str, Any] = timm_model.state_dict() if base_model: remove_classification_head_(UpperCamelCase ) lowerCAmelCase__ : str = create_rename_keys(UpperCamelCase , UpperCamelCase ) for src, dest in rename_keys: rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase ) read_in_q_k_v(UpperCamelCase , UpperCamelCase , UpperCamelCase ) # load HuggingFace model if vit_name[-5:] == "in21k": lowerCAmelCase__ : Union[str, Any] = ViTModel(UpperCamelCase ).eval() else: lowerCAmelCase__ : Any = ViTForImageClassification(UpperCamelCase ).eval() model.load_state_dict(UpperCamelCase ) # Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor if "deit" in vit_name: lowerCAmelCase__ : int = DeiTImageProcessor(size=config.image_size ) else: lowerCAmelCase__ : Union[str, Any] = ViTImageProcessor(size=config.image_size ) lowerCAmelCase__ : Optional[Any] = image_processor(images=prepare_img() , return_tensors="""pt""" ) lowerCAmelCase__ : Tuple = encoding["""pixel_values"""] lowerCAmelCase__ : str = model(UpperCamelCase ) if base_model: lowerCAmelCase__ : int = timm_model.forward_features(UpperCamelCase ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(UpperCamelCase , outputs.pooler_output , atol=1e-3 ) else: lowerCAmelCase__ : Optional[Any] = timm_model(UpperCamelCase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(UpperCamelCase , outputs.logits , atol=1e-3 ) Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase ) print(f"""Saving model {vit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(UpperCamelCase ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(UpperCamelCase ) if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--vit_name''', default='''vit_base_patch16_224''', type=str, help='''Name of the ViT timm model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) _lowerCAmelCase = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
37
'''simple docstring''' import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch _lowerCAmelCase = logging.get_logger(__name__) class lowerCAmelCase_: '''simple docstring''' def __init__( self ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ) -> str: if not conversation_id: lowerCAmelCase__ : List[str] = uuid.uuida() if past_user_inputs is None: lowerCAmelCase__ : List[Any] = [] if generated_responses is None: lowerCAmelCase__ : str = [] lowerCAmelCase__ : uuid.UUID = conversation_id lowerCAmelCase__ : List[str] = past_user_inputs lowerCAmelCase__ : List[str] = generated_responses lowerCAmelCase__ : Optional[str] = text def __eq__( self ,__UpperCAmelCase ) -> Dict: if not isinstance(__UpperCAmelCase ,__UpperCAmelCase ): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = False ) -> Optional[Any]: if self.new_user_input: if overwrite: logger.warning( F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """ F"""with: \"{text}\".""" ) lowerCAmelCase__ : Optional[int] = text else: logger.warning( F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """ F"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" ) else: lowerCAmelCase__ : Optional[Any] = text def UpperCAmelCase_ ( self ) -> List[Any]: if self.new_user_input: self.past_user_inputs.append(self.new_user_input ) lowerCAmelCase__ : Union[str, Any] = None def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Tuple: self.generated_responses.append(__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> List[str]: for user_input, generated_response in zip(self.past_user_inputs ,self.generated_responses ): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__( self ) -> Tuple: lowerCAmelCase__ : Tuple = F"""Conversation id: {self.uuid} \n""" for is_user, text in self.iter_texts(): lowerCAmelCase__ : Any = """user""" if is_user else """bot""" output += F"""{name} >> {text} \n""" return output @add_end_docstrings( SCREAMING_SNAKE_CASE_ , R''' min_length_for_response (`int`, *optional*, defaults to 32): The minimum length (in number of tokens) for a response. minimum_tokens (`int`, *optional*, defaults to 10): The minimum length of tokens to leave for a response. ''' , ) class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple: super().__init__(*__UpperCAmelCase ,**__UpperCAmelCase ) if self.tokenizer.pad_token_id is None: lowerCAmelCase__ : Tuple = self.tokenizer.eos_token def UpperCAmelCase_ ( self ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,**__UpperCAmelCase ) -> Optional[int]: lowerCAmelCase__ : List[Any] = {} lowerCAmelCase__ : Optional[int] = {} lowerCAmelCase__ : List[str] = {} if min_length_for_response is not None: lowerCAmelCase__ : Any = min_length_for_response if minimum_tokens is not None: lowerCAmelCase__ : Optional[int] = minimum_tokens if "max_length" in generate_kwargs: lowerCAmelCase__ : Optional[Any] = generate_kwargs["""max_length"""] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: lowerCAmelCase__ : int = clean_up_tokenization_spaces if generate_kwargs: forward_params.update(__UpperCAmelCase ) return preprocess_params, forward_params, postprocess_params def __call__( self ,__UpperCAmelCase ,__UpperCAmelCase=0 ,**__UpperCAmelCase ) -> List[str]: lowerCAmelCase__ : Optional[int] = super().__call__(__UpperCAmelCase ,num_workers=__UpperCAmelCase ,**__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) and len(__UpperCAmelCase ) == 1: return outputs[0] return outputs def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=32 ) -> Dict[str, Any]: if not isinstance(__UpperCAmelCase ,__UpperCAmelCase ): raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" ) if conversation.new_user_input is None: raise ValueError( F"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """ """Add user inputs with the conversation's `add_user_input` method""" ) if hasattr(self.tokenizer ,"""_build_conversation_input_ids""" ): lowerCAmelCase__ : str = self.tokenizer._build_conversation_input_ids(__UpperCAmelCase ) else: # If the tokenizer cannot handle conversations, we default to only the old version lowerCAmelCase__ : List[Any] = self._legacy_parse_and_tokenize(__UpperCAmelCase ) if self.framework == "pt": lowerCAmelCase__ : List[Any] = torch.LongTensor([input_ids] ) elif self.framework == "tf": lowerCAmelCase__ : Dict = tf.constant([input_ids] ) return {"input_ids": input_ids, "conversation": conversation} def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=10 ,**__UpperCAmelCase ) -> Dict: lowerCAmelCase__ : Optional[Any] = generate_kwargs.get("""max_length""" ,self.model.config.max_length ) lowerCAmelCase__ : Optional[Any] = model_inputs["""input_ids"""].shape[1] if max_length - minimum_tokens < n: logger.warning(F"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" ) lowerCAmelCase__ : str = max_length - minimum_tokens lowerCAmelCase__ : Union[str, Any] = model_inputs["""input_ids"""][:, -trim:] if "attention_mask" in model_inputs: lowerCAmelCase__ : Tuple = model_inputs["""attention_mask"""][:, -trim:] lowerCAmelCase__ : str = model_inputs.pop("""conversation""" ) lowerCAmelCase__ : Union[str, Any] = max_length lowerCAmelCase__ : Any = self.model.generate(**__UpperCAmelCase ,**__UpperCAmelCase ) if self.model.config.is_encoder_decoder: lowerCAmelCase__ : int = 1 else: lowerCAmelCase__ : int = n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=True ) -> List[str]: lowerCAmelCase__ : Optional[int] = model_outputs["""output_ids"""] lowerCAmelCase__ : Tuple = self.tokenizer.decode( output_ids[0] ,skip_special_tokens=__UpperCAmelCase ,clean_up_tokenization_spaces=__UpperCAmelCase ,) lowerCAmelCase__ : Union[str, Any] = model_outputs["""conversation"""] conversation.mark_processed() conversation.append_response(__UpperCAmelCase ) return conversation def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Dict: lowerCAmelCase__ : Dict = self.tokenizer.eos_token_id lowerCAmelCase__ : int = [] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ) + [eos_token_id] ) else: input_ids.extend(self.tokenizer.encode(__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ) ) if len(__UpperCAmelCase ) > self.tokenizer.model_max_length: lowerCAmelCase__ : Optional[Any] = input_ids[-self.tokenizer.model_max_length :] return input_ids
37
1
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Optional[int] = hex_num.strip() if not hex_num: raise ValueError("""No value was passed to the function""" ) lowerCAmelCase__ : Optional[int] = hex_num[0] == """-""" if is_negative: lowerCAmelCase__ : Any = hex_num[1:] try: lowerCAmelCase__ : Optional[int] = int(UpperCamelCase , 16 ) except ValueError: raise ValueError("""Invalid value was passed to the function""" ) lowerCAmelCase__ : Optional[int] = """""" while int_num > 0: lowerCAmelCase__ : str = str(int_num % 2 ) + bin_str int_num >>= 1 return int(("""-""" + bin_str) if is_negative else bin_str ) if __name__ == "__main__": import doctest doctest.testmod()
37
'''simple docstring''' from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax _lowerCAmelCase = logging.get_logger(__name__) @add_end_docstrings(SCREAMING_SNAKE_CASE_ ) class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' def __init__( self ,**__UpperCAmelCase ) -> Tuple: super().__init__(**__UpperCAmelCase ) requires_backends(self ,"""vision""" ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == """tf""" else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self ,__UpperCAmelCase ,**__UpperCAmelCase ) -> str: return super().__call__(__UpperCAmelCase ,**__UpperCAmelCase ) def UpperCAmelCase_ ( self ,**__UpperCAmelCase ) -> str: lowerCAmelCase__ : List[Any] = {} if "candidate_labels" in kwargs: lowerCAmelCase__ : int = kwargs["""candidate_labels"""] if "hypothesis_template" in kwargs: lowerCAmelCase__ : Optional[int] = kwargs["""hypothesis_template"""] return preprocess_params, {}, {} def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=None ,__UpperCAmelCase="This is a photo of {}." ) -> int: lowerCAmelCase__ : str = load_image(__UpperCAmelCase ) lowerCAmelCase__ : Dict = self.image_processor(images=[image] ,return_tensors=self.framework ) lowerCAmelCase__ : List[Any] = candidate_labels lowerCAmelCase__ : List[str] = [hypothesis_template.format(__UpperCAmelCase ) for x in candidate_labels] lowerCAmelCase__ : Optional[Any] = self.tokenizer(__UpperCAmelCase ,return_tensors=self.framework ,padding=__UpperCAmelCase ) lowerCAmelCase__ : Tuple = [text_inputs] return inputs def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Union[str, Any]: lowerCAmelCase__ : Tuple = model_inputs.pop("""candidate_labels""" ) lowerCAmelCase__ : Union[str, Any] = model_inputs.pop("""text_inputs""" ) if isinstance(text_inputs[0] ,__UpperCAmelCase ): lowerCAmelCase__ : int = text_inputs[0] else: # Batching case. lowerCAmelCase__ : Dict = text_inputs[0][0] lowerCAmelCase__ : Any = self.model(**__UpperCAmelCase ,**__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = { """candidate_labels""": candidate_labels, """logits""": outputs.logits_per_image, } return model_outputs def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Any: lowerCAmelCase__ : Union[str, Any] = model_outputs.pop("""candidate_labels""" ) lowerCAmelCase__ : List[str] = model_outputs["""logits"""][0] if self.framework == "pt": lowerCAmelCase__ : List[str] = logits.softmax(dim=-1 ).squeeze(-1 ) lowerCAmelCase__ : Optional[Any] = probs.tolist() if not isinstance(__UpperCAmelCase ,__UpperCAmelCase ): lowerCAmelCase__ : Dict = [scores] elif self.framework == "tf": lowerCAmelCase__ : Any = stable_softmax(__UpperCAmelCase ,axis=-1 ) lowerCAmelCase__ : List[Any] = probs.numpy().tolist() else: raise ValueError(F"""Unsupported framework: {self.framework}""" ) lowerCAmelCase__ : Tuple = [ {"""score""": score, """label""": candidate_label} for score, candidate_label in sorted(zip(__UpperCAmelCase ,__UpperCAmelCase ) ,key=lambda __UpperCAmelCase : -x[0] ) ] return result
37
1
'''simple docstring''' from abc import ABC, abstractmethod from argparse import ArgumentParser class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' @staticmethod @abstractmethod def UpperCAmelCase_ ( __UpperCAmelCase ) -> Optional[Any]: raise NotImplementedError() @abstractmethod def UpperCAmelCase_ ( self ) -> int: raise NotImplementedError()
37
'''simple docstring''' import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , ) @pytest.mark.usefixtures('''sm_env''' ) @parameterized_class( [ { '''framework''': '''pytorch''', '''script''': '''run_glue.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.p3.16xlarge''', '''results''': {'''train_runtime''': 6_5_0, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6}, }, { '''framework''': '''pytorch''', '''script''': '''run_ddp.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.p3.16xlarge''', '''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6}, }, { '''framework''': '''tensorflow''', '''script''': '''run_tf_dist.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.p3.16xlarge''', '''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.6, '''eval_loss''': 0.7}, }, ] ) class lowerCAmelCase_( unittest.TestCase ): '''simple docstring''' def UpperCAmelCase_ ( self ) -> Optional[Any]: if self.framework == "pytorch": subprocess.run( F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() ,encoding="""utf-8""" ,check=__UpperCAmelCase ,) assert hasattr(self ,"""env""" ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]: lowerCAmelCase__ : Optional[int] = F"""{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}""" # distributed data settings lowerCAmelCase__ : Any = {"""smdistributed""": {"""dataparallel""": {"""enabled""": True}}} if self.script != """run_ddp.py""" else None # creates estimator return HuggingFace( entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=__UpperCAmelCase ,instance_count=__UpperCAmelCase ,instance_type=self.instance_type ,debugger_hook_config=__UpperCAmelCase ,hyperparameters={**self.env.distributed_hyperparameters, """model_name_or_path""": self.model_name_or_path} ,metric_definitions=self.env.metric_definitions ,distribution=__UpperCAmelCase ,py_version="""py36""" ,) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]: TrainingJobAnalytics(__UpperCAmelCase ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" ) @parameterized.expand([(2,)] ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Any: # create estimator lowerCAmelCase__ : List[Any] = self.create_estimator(__UpperCAmelCase ) # run training estimator.fit() # result dataframe lowerCAmelCase__ : Any = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis lowerCAmelCase__ : int = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] ) lowerCAmelCase__ : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping lowerCAmelCase__ : List[str] = ( Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" ,99_9999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy ) assert all(t <= self.results["""eval_loss"""] for t in eval_loss ) # dump tests result into json file to share in PR with open(F"""{estimator.latest_training_job.name}.json""" ,"""w""" ) as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} ,__UpperCAmelCase )
37
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { '''tiiuae/falcon-40b''': '''https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json''', '''tiiuae/falcon-7b''': '''https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json''', } class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Optional[Any] = '''falcon''' __lowercase : Optional[int] = ['''past_key_values'''] def __init__( self ,__UpperCAmelCase=6_5024 ,__UpperCAmelCase=4544 ,__UpperCAmelCase=32 ,__UpperCAmelCase=71 ,__UpperCAmelCase=1E-5 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=True ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=None ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=False ,__UpperCAmelCase=11 ,__UpperCAmelCase=11 ,**__UpperCAmelCase ,) -> Union[str, Any]: lowerCAmelCase__ : List[str] = vocab_size # Backward compatibility with n_embed kwarg lowerCAmelCase__ : List[str] = kwargs.pop("""n_embed""" ,__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = hidden_size if n_embed is None else n_embed lowerCAmelCase__ : List[Any] = num_hidden_layers lowerCAmelCase__ : Optional[Any] = num_attention_heads lowerCAmelCase__ : str = layer_norm_epsilon lowerCAmelCase__ : int = initializer_range lowerCAmelCase__ : str = use_cache lowerCAmelCase__ : str = hidden_dropout lowerCAmelCase__ : Tuple = attention_dropout lowerCAmelCase__ : Tuple = bos_token_id lowerCAmelCase__ : Union[str, Any] = eos_token_id lowerCAmelCase__ : Any = num_attention_heads if num_kv_heads is None else num_kv_heads lowerCAmelCase__ : int = alibi lowerCAmelCase__ : Any = new_decoder_architecture lowerCAmelCase__ : Optional[int] = multi_query # Ignored when new_decoder_architecture is True lowerCAmelCase__ : Union[str, Any] = parallel_attn lowerCAmelCase__ : str = bias super().__init__(bos_token_id=__UpperCAmelCase ,eos_token_id=__UpperCAmelCase ,**__UpperCAmelCase ) @property def UpperCAmelCase_ ( self ) -> str: return self.hidden_size // self.num_attention_heads @property def UpperCAmelCase_ ( self ) -> Tuple: return not self.alibi
37
'''simple docstring''' import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = {'''vocab_file''': '''spiece.model'''} _lowerCAmelCase = { '''vocab_file''': { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''', } } _lowerCAmelCase = { '''albert-base-v1''': 512, '''albert-large-v1''': 512, '''albert-xlarge-v1''': 512, '''albert-xxlarge-v1''': 512, '''albert-base-v2''': 512, '''albert-large-v2''': 512, '''albert-xlarge-v2''': 512, '''albert-xxlarge-v2''': 512, } _lowerCAmelCase = '''▁''' class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Any = VOCAB_FILES_NAMES __lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP __lowercase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=False ,__UpperCAmelCase="[CLS]" ,__UpperCAmelCase="[SEP]" ,__UpperCAmelCase="<unk>" ,__UpperCAmelCase="[SEP]" ,__UpperCAmelCase="<pad>" ,__UpperCAmelCase="[CLS]" ,__UpperCAmelCase="[MASK]" ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> None: # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. lowerCAmelCase__ : Tuple = ( AddedToken(__UpperCAmelCase ,lstrip=__UpperCAmelCase ,rstrip=__UpperCAmelCase ,normalized=__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else mask_token ) lowerCAmelCase__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=__UpperCAmelCase ,remove_space=__UpperCAmelCase ,keep_accents=__UpperCAmelCase ,bos_token=__UpperCAmelCase ,eos_token=__UpperCAmelCase ,unk_token=__UpperCAmelCase ,sep_token=__UpperCAmelCase ,pad_token=__UpperCAmelCase ,cls_token=__UpperCAmelCase ,mask_token=__UpperCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**__UpperCAmelCase ,) lowerCAmelCase__ : str = do_lower_case lowerCAmelCase__ : int = remove_space lowerCAmelCase__ : Tuple = keep_accents lowerCAmelCase__ : Any = vocab_file lowerCAmelCase__ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__UpperCAmelCase ) @property def UpperCAmelCase_ ( self ) -> Optional[int]: return len(self.sp_model ) def UpperCAmelCase_ ( self ) -> Optional[Any]: lowerCAmelCase__ : Union[str, Any] = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> Any: lowerCAmelCase__ : Optional[Any] = self.__dict__.copy() lowerCAmelCase__ : Optional[Any] = None return state def __setstate__( self ,__UpperCAmelCase ) -> List[Any]: lowerCAmelCase__ : List[str] = d # for backward compatibility if not hasattr(self ,"""sp_model_kwargs""" ): lowerCAmelCase__ : Union[str, Any] = {} lowerCAmelCase__ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[int]: if self.remove_space: lowerCAmelCase__ : int = """ """.join(inputs.strip().split() ) else: lowerCAmelCase__ : str = inputs lowerCAmelCase__ : Tuple = outputs.replace("""``""" ,"""\"""" ).replace("""''""" ,"""\"""" ) if not self.keep_accents: lowerCAmelCase__ : Any = unicodedata.normalize("""NFKD""" ,__UpperCAmelCase ) lowerCAmelCase__ : Dict = """""".join([c for c in outputs if not unicodedata.combining(__UpperCAmelCase )] ) if self.do_lower_case: lowerCAmelCase__ : Tuple = outputs.lower() return outputs def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[str]: lowerCAmelCase__ : List[str] = self.preprocess_text(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = self.sp_model.encode(__UpperCAmelCase ,out_type=__UpperCAmelCase ) lowerCAmelCase__ : str = [] for piece in pieces: if len(__UpperCAmelCase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit(): lowerCAmelCase__ : List[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(__UpperCAmelCase ,"""""" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: lowerCAmelCase__ : str = cur_pieces[1:] else: lowerCAmelCase__ : int = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(__UpperCAmelCase ) else: new_pieces.append(__UpperCAmelCase ) return new_pieces def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[str]: return self.sp_model.PieceToId(__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Any: return self.sp_model.IdToPiece(__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str: lowerCAmelCase__ : str = [] lowerCAmelCase__ : Tuple = """""" lowerCAmelCase__ : Tuple = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__UpperCAmelCase ) + token lowerCAmelCase__ : Union[str, Any] = True lowerCAmelCase__ : List[str] = [] else: current_sub_tokens.append(__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = False out_string += self.sp_model.decode(__UpperCAmelCase ) return out_string.strip() def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> List[int]: lowerCAmelCase__ : int = [self.sep_token_id] lowerCAmelCase__ : Dict = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCAmelCase ,token_ids_a=__UpperCAmelCase ,already_has_special_tokens=__UpperCAmelCase ) if token_ids_a is not None: return [1] + ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1] return [1] + ([0] * len(__UpperCAmelCase )) + [1] def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> List[int]: lowerCAmelCase__ : List[str] = [self.sep_token_id] lowerCAmelCase__ : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(__UpperCAmelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase__ : int = os.path.join( __UpperCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,__UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__UpperCAmelCase ,"""wb""" ) as fi: lowerCAmelCase__ : List[Any] = self.sp_model.serialized_model_proto() fi.write(__UpperCAmelCase ) return (out_vocab_file,)
37
1
'''simple docstring''' from .imports import is_rich_available if is_rich_available(): from rich.traceback import install install(show_locals=False) else: raise ModuleNotFoundError('''To use the rich extension, install rich with `pip install rich`''')
37
'''simple docstring''' import json import os from typing import Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', } _lowerCAmelCase = { '''vocab_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'''}, '''merges_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'''}, } _lowerCAmelCase = { '''ctrl''': 256, } _lowerCAmelCase = { '''Pregnancy''': 16_8629, '''Christianity''': 7675, '''Explain''': 10_6423, '''Fitness''': 6_3440, '''Saving''': 6_3163, '''Ask''': 2_7171, '''Ass''': 9_5985, '''Joke''': 16_3509, '''Questions''': 4_5622, '''Thoughts''': 4_9605, '''Retail''': 5_2342, '''Feminism''': 16_4338, '''Writing''': 1_1992, '''Atheism''': 19_2263, '''Netflix''': 4_8616, '''Computing''': 3_9639, '''Opinion''': 4_3213, '''Alone''': 4_4967, '''Funny''': 5_8917, '''Gaming''': 4_0358, '''Human''': 4088, '''India''': 1331, '''Joker''': 7_7138, '''Diet''': 3_6206, '''Legal''': 1_1859, '''Norman''': 4939, '''Tip''': 7_2689, '''Weight''': 5_2343, '''Movies''': 4_6273, '''Running''': 2_3425, '''Science''': 2090, '''Horror''': 3_7793, '''Confession''': 6_0572, '''Finance''': 1_2250, '''Politics''': 1_6360, '''Scary''': 19_1985, '''Support''': 1_2654, '''Technologies''': 3_2516, '''Teenage''': 6_6160, '''Event''': 3_2769, '''Learned''': 6_7460, '''Notion''': 18_2770, '''Wikipedia''': 3_7583, '''Books''': 6665, '''Extract''': 7_6050, '''Confessions''': 10_2701, '''Conspiracy''': 7_5932, '''Links''': 6_3674, '''Narcissus''': 15_0425, '''Relationship''': 5_4766, '''Relationships''': 13_4796, '''Reviews''': 4_1671, '''News''': 4256, '''Translation''': 2_6820, '''multilingual''': 12_8406, } def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Optional[int] = set() lowerCAmelCase__ : str = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCAmelCase__ : List[Any] = char lowerCAmelCase__ : Optional[Any] = set(UpperCamelCase ) return pairs class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Dict = VOCAB_FILES_NAMES __lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP __lowercase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowercase : Any = CONTROL_CODES def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase="<unk>" ,**__UpperCAmelCase ) -> Optional[int]: super().__init__(unk_token=__UpperCAmelCase ,**__UpperCAmelCase ) with open(__UpperCAmelCase ,encoding="""utf-8""" ) as vocab_handle: lowerCAmelCase__ : int = json.load(__UpperCAmelCase ) lowerCAmelCase__ : Any = {v: k for k, v in self.encoder.items()} with open(__UpperCAmelCase ,encoding="""utf-8""" ) as merges_handle: lowerCAmelCase__ : Union[str, Any] = merges_handle.read().split("""\n""" )[1:-1] lowerCAmelCase__ : Optional[Any] = [tuple(merge.split() ) for merge in merges] lowerCAmelCase__ : int = dict(zip(__UpperCAmelCase ,range(len(__UpperCAmelCase ) ) ) ) lowerCAmelCase__ : List[Any] = {} @property def UpperCAmelCase_ ( self ) -> Optional[Any]: return len(self.encoder ) def UpperCAmelCase_ ( self ) -> Optional[Any]: return dict(self.encoder ,**self.added_tokens_encoder ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> int: if token in self.cache: return self.cache[token] lowerCAmelCase__ : Optional[Any] = tuple(__UpperCAmelCase ) lowerCAmelCase__ : List[str] = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] ) lowerCAmelCase__ : Any = get_pairs(__UpperCAmelCase ) if not pairs: return token while True: lowerCAmelCase__ : List[str] = min(__UpperCAmelCase ,key=lambda __UpperCAmelCase : self.bpe_ranks.get(__UpperCAmelCase ,float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = bigram lowerCAmelCase__ : List[str] = [] lowerCAmelCase__ : Dict = 0 while i < len(__UpperCAmelCase ): try: lowerCAmelCase__ : Optional[Any] = word.index(__UpperCAmelCase ,__UpperCAmelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCAmelCase__ : Dict = j if word[i] == first and i < len(__UpperCAmelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCAmelCase__ : Tuple = tuple(__UpperCAmelCase ) lowerCAmelCase__ : Tuple = new_word if len(__UpperCAmelCase ) == 1: break else: lowerCAmelCase__ : Dict = get_pairs(__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = """@@ """.join(__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = word[:-4] lowerCAmelCase__ : Optional[Any] = word return word def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Dict: lowerCAmelCase__ : Optional[int] = [] lowerCAmelCase__ : Any = re.findall(R"""\S+\n?""" ,__UpperCAmelCase ) for token in words: split_tokens.extend(list(self.bpe(__UpperCAmelCase ).split(""" """ ) ) ) return split_tokens def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]: return self.encoder.get(__UpperCAmelCase ,self.encoder.get(self.unk_token ) ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Tuple: return self.decoder.get(__UpperCAmelCase ,self.unk_token ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Tuple: lowerCAmelCase__ : Any = """ """.join(__UpperCAmelCase ).replace("""@@ """ ,"""""" ).strip() return out_string def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(__UpperCAmelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase__ : List[Any] = os.path.join( __UpperCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) lowerCAmelCase__ : Optional[int] = os.path.join( __UpperCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(__UpperCAmelCase ,"""w""" ,encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=__UpperCAmelCase ,ensure_ascii=__UpperCAmelCase ) + """\n""" ) lowerCAmelCase__ : int = 0 with open(__UpperCAmelCase ,"""w""" ,encoding="""utf-8""" ) as writer: writer.write("""#version: 0.2\n""" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda __UpperCAmelCase : kv[1] ): if index != token_index: logger.warning( F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" """ Please check that the tokenizer is not corrupted!""" ) lowerCAmelCase__ : Dict = token_index writer.write(""" """.join(__UpperCAmelCase ) + """\n""" ) index += 1 return vocab_file, merge_file # def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True): # filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)) # tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens) # tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far) # return ''.join(tokens_generated_so_far)
37
1
'''simple docstring''' import inspect import tempfile from collections import OrderedDict, UserDict from collections.abc import MutableMapping from contextlib import ExitStack, contextmanager from dataclasses import fields from enum import Enum from typing import Any, ContextManager, List, Tuple import numpy as np from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy if is_flax_available(): import jax.numpy as jnp class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' def __get__( self ,__UpperCAmelCase ,__UpperCAmelCase=None ) -> Union[str, Any]: # See docs.python.org/3/howto/descriptor.html#properties if obj is None: return self if self.fget is None: raise AttributeError("""unreadable attribute""" ) lowerCAmelCase__ : Optional[Any] = """__cached_""" + self.fget.__name__ lowerCAmelCase__ : Union[str, Any] = getattr(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) if cached is None: lowerCAmelCase__ : List[str] = self.fget(__UpperCAmelCase ) setattr(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) return cached def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : List[Any] = val.lower() if val in {"y", "yes", "t", "true", "on", "1"}: return 1 if val in {"n", "no", "f", "false", "off", "0"}: return 0 raise ValueError(f"""invalid truth value {val!r}""" ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" if is_torch_fx_proxy(UpperCamelCase ): return True if is_torch_available(): import torch if isinstance(UpperCamelCase , torch.Tensor ): return True if is_tf_available(): import tensorflow as tf if isinstance(UpperCamelCase , tf.Tensor ): return True if is_flax_available(): import jax.numpy as jnp from jax.core import Tracer if isinstance(UpperCamelCase , (jnp.ndarray, Tracer) ): return True return isinstance(UpperCamelCase , np.ndarray ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" return isinstance(UpperCamelCase , np.ndarray ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" return _is_numpy(UpperCamelCase ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" import torch return isinstance(UpperCamelCase , torch.Tensor ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" return False if not is_torch_available() else _is_torch(UpperCamelCase ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" import torch return isinstance(UpperCamelCase , torch.device ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" return False if not is_torch_available() else _is_torch_device(UpperCamelCase ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" import torch if isinstance(UpperCamelCase , UpperCamelCase ): if hasattr(UpperCamelCase , UpperCamelCase ): lowerCAmelCase__ : Any = getattr(UpperCamelCase , UpperCamelCase ) else: return False return isinstance(UpperCamelCase , torch.dtype ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" return False if not is_torch_available() else _is_torch_dtype(UpperCamelCase ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" import tensorflow as tf return isinstance(UpperCamelCase , tf.Tensor ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" return False if not is_tf_available() else _is_tensorflow(UpperCamelCase ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" import tensorflow as tf # the `is_symbolic_tensor` predicate is only available starting with TF 2.14 if hasattr(UpperCamelCase , """is_symbolic_tensor""" ): return tf.is_symbolic_tensor(UpperCamelCase ) return type(UpperCamelCase ) == tf.Tensor def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" return False if not is_tf_available() else _is_tf_symbolic_tensor(UpperCamelCase ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" import jax.numpy as jnp # noqa: F811 return isinstance(UpperCamelCase , jnp.ndarray ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" return False if not is_flax_available() else _is_jax(UpperCamelCase ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" if isinstance(UpperCamelCase , (dict, UserDict) ): return {k: to_py_obj(UpperCamelCase ) for k, v in obj.items()} elif isinstance(UpperCamelCase , (list, tuple) ): return [to_py_obj(UpperCamelCase ) for o in obj] elif is_tf_tensor(UpperCamelCase ): return obj.numpy().tolist() elif is_torch_tensor(UpperCamelCase ): return obj.detach().cpu().tolist() elif is_jax_tensor(UpperCamelCase ): return np.asarray(UpperCamelCase ).tolist() elif isinstance(UpperCamelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays return obj.tolist() else: return obj def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" if isinstance(UpperCamelCase , (dict, UserDict) ): return {k: to_numpy(UpperCamelCase ) for k, v in obj.items()} elif isinstance(UpperCamelCase , (list, tuple) ): return np.array(UpperCamelCase ) elif is_tf_tensor(UpperCamelCase ): return obj.numpy() elif is_torch_tensor(UpperCamelCase ): return obj.detach().cpu().numpy() elif is_jax_tensor(UpperCamelCase ): return np.asarray(UpperCamelCase ) else: return obj class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' def UpperCAmelCase_ ( self ) -> Union[str, Any]: lowerCAmelCase__ : List[Any] = fields(self ) # Safety and consistency checks if not len(__UpperCAmelCase ): raise ValueError(F"""{self.__class__.__name__} has no fields.""" ) if not all(field.default is None for field in class_fields[1:] ): raise ValueError(F"""{self.__class__.__name__} should not have more than one required field.""" ) lowerCAmelCase__ : Optional[int] = getattr(self ,class_fields[0].name ) lowerCAmelCase__ : Optional[int] = all(getattr(self ,field.name ) is None for field in class_fields[1:] ) if other_fields_are_none and not is_tensor(__UpperCAmelCase ): if isinstance(__UpperCAmelCase ,__UpperCAmelCase ): lowerCAmelCase__ : List[str] = first_field.items() lowerCAmelCase__ : Union[str, Any] = True else: try: lowerCAmelCase__ : Union[str, Any] = iter(__UpperCAmelCase ) lowerCAmelCase__ : Tuple = True except TypeError: lowerCAmelCase__ : int = False # if we provided an iterator as first field and the iterator is a (key, value) iterator # set the associated fields if first_field_iterator: for idx, element in enumerate(__UpperCAmelCase ): if ( not isinstance(__UpperCAmelCase ,(list, tuple) ) or not len(__UpperCAmelCase ) == 2 or not isinstance(element[0] ,__UpperCAmelCase ) ): if idx == 0: # If we do not have an iterator of key/values, set it as attribute lowerCAmelCase__ : Any = first_field else: # If we have a mixed iterator, raise an error raise ValueError( F"""Cannot set key/value for {element}. It needs to be a tuple (key, value).""" ) break setattr(self ,element[0] ,element[1] ) if element[1] is not None: lowerCAmelCase__ : List[str] = element[1] elif first_field is not None: lowerCAmelCase__ : List[str] = first_field else: for field in class_fields: lowerCAmelCase__ : str = getattr(self ,field.name ) if v is not None: lowerCAmelCase__ : Optional[int] = v def __delitem__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[Any]: raise Exception(F"""You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.""" ) def UpperCAmelCase_ ( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple: raise Exception(F"""You cannot use ``setdefault`` on a {self.__class__.__name__} instance.""" ) def UpperCAmelCase_ ( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Dict: raise Exception(F"""You cannot use ``pop`` on a {self.__class__.__name__} instance.""" ) def UpperCAmelCase_ ( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Any: raise Exception(F"""You cannot use ``update`` on a {self.__class__.__name__} instance.""" ) def __getitem__( self ,__UpperCAmelCase ) -> Dict: if isinstance(__UpperCAmelCase ,__UpperCAmelCase ): lowerCAmelCase__ : List[str] = dict(self.items() ) return inner_dict[k] else: return self.to_tuple()[k] def __setattr__( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> List[Any]: if name in self.keys() and value is not None: # Don't call self.__setitem__ to avoid recursion errors super().__setitem__(__UpperCAmelCase ,__UpperCAmelCase ) super().__setattr__(__UpperCAmelCase ,__UpperCAmelCase ) def __setitem__( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Tuple: # Will raise a KeyException if needed super().__setitem__(__UpperCAmelCase ,__UpperCAmelCase ) # Don't call self.__setattr__ to avoid recursion errors super().__setattr__(__UpperCAmelCase ,__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> Tuple[Any]: return tuple(self[k] for k in self.keys() ) class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' @classmethod def UpperCAmelCase_ ( cls ,__UpperCAmelCase ) -> Optional[int]: raise ValueError( F"""{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}""" ) class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Union[str, Any] = '''longest''' __lowercase : int = '''max_length''' __lowercase : Optional[int] = '''do_not_pad''' class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Optional[int] = '''pt''' __lowercase : Any = '''tf''' __lowercase : Any = '''np''' __lowercase : Any = '''jax''' class lowerCAmelCase_: '''simple docstring''' def __init__( self ,__UpperCAmelCase ) -> Tuple: lowerCAmelCase__ : Union[str, Any] = context_managers lowerCAmelCase__ : int = ExitStack() def __enter__( self ) -> Optional[int]: for context_manager in self.context_managers: self.stack.enter_context(__UpperCAmelCase ) def __exit__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[str]: self.stack.__exit__(*__UpperCAmelCase ,**__UpperCAmelCase ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Union[str, Any] = infer_framework(UpperCamelCase ) if framework == "tf": lowerCAmelCase__ : str = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": lowerCAmelCase__ : str = inspect.signature(model_class.forward ) # PyTorch models else: lowerCAmelCase__ : Optional[Any] = inspect.signature(model_class.__call__ ) # Flax models for p in signature.parameters: if p == "return_loss" and signature.parameters[p].default is True: return True return False def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Union[str, Any] = model_class.__name__ lowerCAmelCase__ : Tuple = infer_framework(UpperCamelCase ) if framework == "tf": lowerCAmelCase__ : Any = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": lowerCAmelCase__ : Optional[int] = inspect.signature(model_class.forward ) # PyTorch models else: lowerCAmelCase__ : Union[str, Any] = inspect.signature(model_class.__call__ ) # Flax models if "QuestionAnswering" in model_name: return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")] else: return [p for p in signature.parameters if "label" in p] def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase = "" , UpperCamelCase = "." ): """simple docstring""" def _flatten_dict(UpperCamelCase , UpperCamelCase="" , UpperCamelCase="." ): for k, v in d.items(): lowerCAmelCase__ : str = str(UpperCamelCase ) + delimiter + str(UpperCamelCase ) if parent_key else k if v and isinstance(UpperCamelCase , UpperCamelCase ): yield from flatten_dict(UpperCamelCase , UpperCamelCase , delimiter=UpperCamelCase ).items() else: yield key, v return dict(_flatten_dict(UpperCamelCase , UpperCamelCase , UpperCamelCase ) ) @contextmanager def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase = False ): """simple docstring""" if use_temp_dir: with tempfile.TemporaryDirectory() as tmp_dir: yield tmp_dir else: yield working_dir def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase=None ): """simple docstring""" if is_numpy_array(UpperCamelCase ): return np.transpose(UpperCamelCase , axes=UpperCamelCase ) elif is_torch_tensor(UpperCamelCase ): return array.T if axes is None else array.permute(*UpperCamelCase ) elif is_tf_tensor(UpperCamelCase ): import tensorflow as tf return tf.transpose(UpperCamelCase , perm=UpperCamelCase ) elif is_jax_tensor(UpperCamelCase ): return jnp.transpose(UpperCamelCase , axes=UpperCamelCase ) else: raise ValueError(f"""Type not supported for transpose: {type(UpperCamelCase )}.""" ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" if is_numpy_array(UpperCamelCase ): return np.reshape(UpperCamelCase , UpperCamelCase ) elif is_torch_tensor(UpperCamelCase ): return array.reshape(*UpperCamelCase ) elif is_tf_tensor(UpperCamelCase ): import tensorflow as tf return tf.reshape(UpperCamelCase , UpperCamelCase ) elif is_jax_tensor(UpperCamelCase ): return jnp.reshape(UpperCamelCase , UpperCamelCase ) else: raise ValueError(f"""Type not supported for reshape: {type(UpperCamelCase )}.""" ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase=None ): """simple docstring""" if is_numpy_array(UpperCamelCase ): return np.squeeze(UpperCamelCase , axis=UpperCamelCase ) elif is_torch_tensor(UpperCamelCase ): return array.squeeze() if axis is None else array.squeeze(dim=UpperCamelCase ) elif is_tf_tensor(UpperCamelCase ): import tensorflow as tf return tf.squeeze(UpperCamelCase , axis=UpperCamelCase ) elif is_jax_tensor(UpperCamelCase ): return jnp.squeeze(UpperCamelCase , axis=UpperCamelCase ) else: raise ValueError(f"""Type not supported for squeeze: {type(UpperCamelCase )}.""" ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" if is_numpy_array(UpperCamelCase ): return np.expand_dims(UpperCamelCase , UpperCamelCase ) elif is_torch_tensor(UpperCamelCase ): return array.unsqueeze(dim=UpperCamelCase ) elif is_tf_tensor(UpperCamelCase ): import tensorflow as tf return tf.expand_dims(UpperCamelCase , axis=UpperCamelCase ) elif is_jax_tensor(UpperCamelCase ): return jnp.expand_dims(UpperCamelCase , axis=UpperCamelCase ) else: raise ValueError(f"""Type not supported for expand_dims: {type(UpperCamelCase )}.""" ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" if is_numpy_array(UpperCamelCase ): return np.size(UpperCamelCase ) elif is_torch_tensor(UpperCamelCase ): return array.numel() elif is_tf_tensor(UpperCamelCase ): import tensorflow as tf return tf.size(UpperCamelCase ) elif is_jax_tensor(UpperCamelCase ): return array.size else: raise ValueError(f"""Type not supported for expand_dims: {type(UpperCamelCase )}.""" ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" for key, value in auto_map.items(): if isinstance(UpperCamelCase , (tuple, list) ): lowerCAmelCase__ : Union[str, Any] = [f"""{repo_id}--{v}""" if (v is not None and """--""" not in v) else v for v in value] elif value is not None and "--" not in value: lowerCAmelCase__ : List[str] = f"""{repo_id}--{value}""" return auto_map def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" for base_class in inspect.getmro(UpperCamelCase ): lowerCAmelCase__ : Optional[Any] = base_class.__module__ lowerCAmelCase__ : Dict = base_class.__name__ if module.startswith("""tensorflow""" ) or module.startswith("""keras""" ) or name == "TFPreTrainedModel": return "tf" elif module.startswith("""torch""" ) or name == "PreTrainedModel": return "pt" elif module.startswith("""flax""" ) or module.startswith("""jax""" ) or name == "FlaxPreTrainedModel": return "flax" else: raise TypeError(f"""Could not infer framework from class {model_class}.""" )
37
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( UpperCamelCase = "The quick brown fox jumps over the lazy dog" , ): """simple docstring""" lowerCAmelCase__ : str = set() # Replace all the whitespace in our sentence lowerCAmelCase__ : Tuple = input_str.replace(""" """ , """""" ) for alpha in input_str: if "a" <= alpha.lower() <= "z": frequency.add(alpha.lower() ) return len(UpperCamelCase ) == 26 def _SCREAMING_SNAKE_CASE ( UpperCamelCase = "The quick brown fox jumps over the lazy dog" , ): """simple docstring""" lowerCAmelCase__ : Any = [False] * 26 for char in input_str: if char.islower(): lowerCAmelCase__ : Optional[Any] = True elif char.isupper(): lowerCAmelCase__ : Any = True return all(UpperCamelCase ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase = "The quick brown fox jumps over the lazy dog" , ): """simple docstring""" return len({char for char in input_str.lower() if char.isalpha()} ) == 26 def _SCREAMING_SNAKE_CASE ( ): """simple docstring""" from timeit import timeit lowerCAmelCase__ : Union[str, Any] = """from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest""" print(timeit("""is_pangram()""" , setup=UpperCamelCase ) ) print(timeit("""is_pangram_faster()""" , setup=UpperCamelCase ) ) print(timeit("""is_pangram_fastest()""" , setup=UpperCamelCase ) ) # 5.348480500048026, 2.6477354579837993, 1.8470395830227062 # 5.036091582966037, 2.644472333951853, 1.8869528750656173 if __name__ == "__main__": import doctest doctest.testmod() benchmark()
37
1
'''simple docstring''' # Lint as: python3 import dataclasses import re from dataclasses import dataclass from functools import total_ordering from typing import Optional, Union _lowerCAmelCase = re.compile(R'''^(?P<major>\d+)''' R'''\.(?P<minor>\d+)''' R'''\.(?P<patch>\d+)$''') @total_ordering @dataclass class lowerCAmelCase_: '''simple docstring''' __lowercase : str __lowercase : Optional[str] = None __lowercase : Optional[Union[str, int]] = None __lowercase : Optional[Union[str, int]] = None __lowercase : Optional[Union[str, int]] = None def UpperCAmelCase_ ( self ) -> List[Any]: lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Dict = _str_to_version_tuple(self.version_str ) def __repr__( self ) -> List[Any]: return F"""{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}""" @property def UpperCAmelCase_ ( self ) -> Optional[int]: return self.major, self.minor, self.patch def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Dict: if isinstance(__UpperCAmelCase ,__UpperCAmelCase ): return Version(__UpperCAmelCase ) elif isinstance(__UpperCAmelCase ,__UpperCAmelCase ): return other raise TypeError(F"""{other} (type {type(__UpperCAmelCase )}) cannot be compared to version.""" ) def __eq__( self ,__UpperCAmelCase ) -> str: try: lowerCAmelCase__ : Union[str, Any] = self._validate_operand(__UpperCAmelCase ) except (TypeError, ValueError): return False else: return self.tuple == other.tuple def __lt__( self ,__UpperCAmelCase ) -> Any: lowerCAmelCase__ : Optional[Any] = self._validate_operand(__UpperCAmelCase ) return self.tuple < other.tuple def __hash__( self ) -> List[str]: return hash(_version_tuple_to_str(self.tuple ) ) @classmethod def UpperCAmelCase_ ( cls ,__UpperCAmelCase ) -> Optional[Any]: lowerCAmelCase__ : List[str] = {f.name for f in dataclasses.fields(cls )} return cls(**{k: v for k, v in dic.items() if k in field_names} ) def UpperCAmelCase_ ( self ) -> str: return self.version_str def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Any = _VERSION_REG.match(UpperCamelCase ) if not res: raise ValueError(f"""Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits.""" ) return tuple(int(UpperCamelCase ) for v in [res.group("""major""" ), res.group("""minor""" ), res.group("""patch""" )] ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" return ".".join(str(UpperCamelCase ) for v in version_tuple )
37
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Tuple = abs(UpperCamelCase ) lowerCAmelCase__ : List[Any] = 0 while n > 0: res += n % 10 n //= 10 return res def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Union[str, Any] = abs(UpperCamelCase ) return n if n < 10 else n % 10 + sum_of_digits(n // 10 ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" return sum(int(UpperCamelCase ) for c in str(abs(UpperCamelCase ) ) ) def _SCREAMING_SNAKE_CASE ( ): """simple docstring""" from collections.abc import Callable from timeit import timeit def benchmark_a_function(UpperCamelCase , UpperCamelCase ) -> None: lowerCAmelCase__ : str = f"""{func.__name__}({value})""" lowerCAmelCase__ : str = timeit(f"""__main__.{call}""" , setup="""import __main__""" ) print(f"""{call:56} = {func(UpperCamelCase )} -- {timing:.4f} seconds""" ) for value in (262144, 1125899906842624, 1267650600228229401496703205376): for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact): benchmark_a_function(UpperCamelCase , UpperCamelCase ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
37
1
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = {'''vocab_file''': '''spm_char.model'''} _lowerCAmelCase = { '''vocab_file''': { '''microsoft/speecht5_asr''': '''https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model''', '''microsoft/speecht5_tts''': '''https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model''', '''microsoft/speecht5_vc''': '''https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model''', } } _lowerCAmelCase = { '''microsoft/speecht5_asr''': 1024, '''microsoft/speecht5_tts''': 1024, '''microsoft/speecht5_vc''': 1024, } class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Dict = VOCAB_FILES_NAMES __lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP __lowercase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowercase : str = ['''input_ids''', '''attention_mask'''] def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase="<s>" ,__UpperCAmelCase="</s>" ,__UpperCAmelCase="<unk>" ,__UpperCAmelCase="<pad>" ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> None: lowerCAmelCase__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__UpperCAmelCase ,eos_token=__UpperCAmelCase ,unk_token=__UpperCAmelCase ,pad_token=__UpperCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**__UpperCAmelCase ,) lowerCAmelCase__ : Dict = vocab_file lowerCAmelCase__ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__UpperCAmelCase ) @property def UpperCAmelCase_ ( self ) -> Union[str, Any]: return self.sp_model.get_piece_size() def UpperCAmelCase_ ( self ) -> Optional[int]: lowerCAmelCase__ : List[str] = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> List[str]: lowerCAmelCase__ : Any = self.__dict__.copy() lowerCAmelCase__ : Dict = None return state def __setstate__( self ,__UpperCAmelCase ) -> List[str]: lowerCAmelCase__ : List[str] = d # for backward compatibility if not hasattr(self ,"""sp_model_kwargs""" ): lowerCAmelCase__ : int = {} lowerCAmelCase__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[str]: return self.sp_model.encode(__UpperCAmelCase ,out_type=__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str: return self.sp_model.piece_to_id(__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str: lowerCAmelCase__ : Tuple = self.sp_model.IdToPiece(__UpperCAmelCase ) return token def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str: lowerCAmelCase__ : Union[str, Any] = [] lowerCAmelCase__ : Optional[Any] = """""" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(__UpperCAmelCase ) + token lowerCAmelCase__ : Dict = [] else: current_sub_tokens.append(__UpperCAmelCase ) out_string += self.sp_model.decode(__UpperCAmelCase ) return out_string.strip() def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=None ) -> List[int]: if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCAmelCase ,token_ids_a=__UpperCAmelCase ,already_has_special_tokens=__UpperCAmelCase ) lowerCAmelCase__ : str = [1] if token_ids_a is None: return ([0] * len(__UpperCAmelCase )) + suffix_ones return ([0] * len(__UpperCAmelCase )) + ([0] * len(__UpperCAmelCase )) + suffix_ones def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(__UpperCAmelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase__ : Dict = os.path.join( __UpperCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,__UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__UpperCAmelCase ,"""wb""" ) as fi: lowerCAmelCase__ : str = self.sp_model.serialized_model_proto() fi.write(__UpperCAmelCase ) return (out_vocab_file,)
37
'''simple docstring''' # coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers _lowerCAmelCase = '''3''' print('''Python version:''', sys.version) print('''transformers version:''', transformers.__version__) try: import torch print('''Torch version:''', torch.__version__) print('''Cuda available:''', torch.cuda.is_available()) print('''Cuda version:''', torch.version.cuda) print('''CuDNN version:''', torch.backends.cudnn.version()) print('''Number of GPUs available:''', torch.cuda.device_count()) print('''NCCL version:''', torch.cuda.nccl.version()) except ImportError: print('''Torch version:''', None) try: import deepspeed print('''DeepSpeed version:''', deepspeed.__version__) except ImportError: print('''DeepSpeed version:''', None) try: import tensorflow as tf print('''TensorFlow version:''', tf.__version__) print('''TF GPUs available:''', bool(tf.config.list_physical_devices('''GPU'''))) print('''Number of TF GPUs available:''', len(tf.config.list_physical_devices('''GPU'''))) except ImportError: print('''TensorFlow version:''', None)
37
1
'''simple docstring''' import copy import inspect import unittest from transformers import PretrainedConfig, SwiftFormerConfig from transformers.testing_utils import ( require_torch, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwiftFormerForImageClassification, SwiftFormerModel from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class lowerCAmelCase_: '''simple docstring''' def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=13 ,__UpperCAmelCase=3 ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=224 ,__UpperCAmelCase=1000 ,__UpperCAmelCase=[3, 3, 6, 4] ,__UpperCAmelCase=[48, 56, 112, 220] ,) -> Dict: lowerCAmelCase__ : Dict = parent lowerCAmelCase__ : str = batch_size lowerCAmelCase__ : Tuple = num_channels lowerCAmelCase__ : Dict = is_training lowerCAmelCase__ : Union[str, Any] = use_labels lowerCAmelCase__ : List[str] = hidden_dropout_prob lowerCAmelCase__ : str = attention_probs_dropout_prob lowerCAmelCase__ : Optional[int] = num_labels lowerCAmelCase__ : List[Any] = image_size lowerCAmelCase__ : Tuple = layer_depths lowerCAmelCase__ : str = embed_dims def UpperCAmelCase_ ( self ) -> Optional[int]: lowerCAmelCase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase__ : List[Any] = None if self.use_labels: lowerCAmelCase__ : Optional[Any] = ids_tensor([self.batch_size] ,self.num_labels ) lowerCAmelCase__ : Union[str, Any] = self.get_config() return config, pixel_values, labels def UpperCAmelCase_ ( self ) -> Dict: return SwiftFormerConfig( depths=self.layer_depths ,embed_dims=self.embed_dims ,mlp_ratio=4 ,downsamples=[True, True, True, True] ,hidden_act="""gelu""" ,num_labels=self.num_labels ,down_patch_size=3 ,down_stride=2 ,down_pad=1 ,drop_rate=0.0 ,drop_path_rate=0.0 ,use_layer_scale=__UpperCAmelCase ,layer_scale_init_value=1E-5 ,) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Tuple: lowerCAmelCase__ : Optional[int] = SwiftFormerModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : List[Any] = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.embed_dims[-1], 7, 7) ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> int: lowerCAmelCase__ : Any = self.num_labels lowerCAmelCase__ : Optional[int] = SwiftFormerForImageClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Optional[Any] = model(__UpperCAmelCase ,labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) lowerCAmelCase__ : List[Any] = SwiftFormerForImageClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase__ : List[Any] = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def UpperCAmelCase_ ( self ) -> str: ((lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__)) : str = self.prepare_config_and_inputs() lowerCAmelCase__ : Optional[Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): '''simple docstring''' __lowercase : Optional[int] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else () __lowercase : Optional[Any] = ( {'''feature-extraction''': SwiftFormerModel, '''image-classification''': SwiftFormerForImageClassification} if is_torch_available() else {} ) __lowercase : List[str] = False __lowercase : str = False __lowercase : str = False __lowercase : List[Any] = False __lowercase : Tuple = False def UpperCAmelCase_ ( self ) -> int: lowerCAmelCase__ : Optional[int] = SwiftFormerModelTester(self ) lowerCAmelCase__ : str = ConfigTester( self ,config_class=__UpperCAmelCase ,has_text_modality=__UpperCAmelCase ,hidden_size=37 ,num_attention_heads=12 ,num_hidden_layers=12 ,) def UpperCAmelCase_ ( self ) -> str: self.config_tester.run_common_tests() @unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: pass def UpperCAmelCase_ ( self ) -> Union[str, Any]: lowerCAmelCase__ , lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ : Dict = model_class(__UpperCAmelCase ) lowerCAmelCase__ : int = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__UpperCAmelCase ,nn.Linear ) ) def UpperCAmelCase_ ( self ) -> str: lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ : str = model_class(__UpperCAmelCase ) lowerCAmelCase__ : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase__ : List[str] = [*signature.parameters.keys()] lowerCAmelCase__ : str = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> Optional[int]: lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> Optional[Any]: lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase ) @slow def UpperCAmelCase_ ( self ) -> int: for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase__ : Optional[int] = SwiftFormerModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) @unittest.skip(reason="""SwiftFormer does not output attentions""" ) def UpperCAmelCase_ ( self ) -> Any: pass def UpperCAmelCase_ ( self ) -> Any: def check_hidden_states_output(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ): lowerCAmelCase__ : Optional[int] = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): lowerCAmelCase__ : Optional[int] = model(**self._prepare_for_class(__UpperCAmelCase ,__UpperCAmelCase ) ) lowerCAmelCase__ : str = outputs.hidden_states lowerCAmelCase__ : int = 8 self.assertEqual(len(__UpperCAmelCase ) ,__UpperCAmelCase ) # TODO # SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width) # with the width and height being successively divided by 2, after every 2 blocks for i in range(len(__UpperCAmelCase ) ): self.assertEqual( hidden_states[i].shape ,torch.Size( [ self.model_tester.batch_size, self.model_tester.embed_dims[i // 2], (self.model_tester.image_size // 4) // 2 ** (i // 2), (self.model_tester.image_size // 4) // 2 ** (i // 2), ] ) ,) lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ : List[str] = True check_hidden_states_output(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCAmelCase__ : Optional[Any] = True check_hidden_states_output(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> int: def _config_zero_init(__UpperCAmelCase ): lowerCAmelCase__ : Optional[Any] = copy.deepcopy(__UpperCAmelCase ) for key in configs_no_init.__dict__.keys(): if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key: setattr(__UpperCAmelCase ,__UpperCAmelCase ,1E-10 ) if isinstance(getattr(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) ,__UpperCAmelCase ): lowerCAmelCase__ : Dict = _config_zero_init(getattr(__UpperCAmelCase ,__UpperCAmelCase ) ) setattr(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) return configs_no_init lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase__ : Optional[int] = _config_zero_init(__UpperCAmelCase ) for model_class in self.all_model_classes: lowerCAmelCase__ : Dict = model_class(config=__UpperCAmelCase ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9) / 1E9).round().item() ,[0.0, 1.0] ,msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" ,) @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def UpperCAmelCase_ ( self ) -> Optional[Any]: pass def _SCREAMING_SNAKE_CASE ( ): """simple docstring""" lowerCAmelCase__ : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class lowerCAmelCase_( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCAmelCase_ ( self ) -> int: return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None @slow def UpperCAmelCase_ ( self ) -> Optional[Any]: lowerCAmelCase__ : Optional[int] = SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = self.default_image_processor lowerCAmelCase__ : Optional[Any] = prepare_img() lowerCAmelCase__ : List[Any] = image_processor(images=__UpperCAmelCase ,return_tensors="""pt""" ).to(__UpperCAmelCase ) # forward pass with torch.no_grad(): lowerCAmelCase__ : Optional[int] = model(**__UpperCAmelCase ) # verify the logits lowerCAmelCase__ : Any = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape ,__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = torch.tensor([[-2.17_03E00, 2.11_07E00, -2.08_11E00]] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__UpperCAmelCase ,atol=1E-4 ) )
37
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { '''facebook/xlm-roberta-xl''': '''https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json''', '''facebook/xlm-roberta-xxl''': '''https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json''', # See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl } class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : int = '''xlm-roberta-xl''' def __init__( self ,__UpperCAmelCase=25_0880 ,__UpperCAmelCase=2560 ,__UpperCAmelCase=36 ,__UpperCAmelCase=32 ,__UpperCAmelCase=1_0240 ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=514 ,__UpperCAmelCase=1 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=1E-05 ,__UpperCAmelCase=1 ,__UpperCAmelCase=0 ,__UpperCAmelCase=2 ,__UpperCAmelCase="absolute" ,__UpperCAmelCase=True ,__UpperCAmelCase=None ,**__UpperCAmelCase ,) -> str: super().__init__(pad_token_id=__UpperCAmelCase ,bos_token_id=__UpperCAmelCase ,eos_token_id=__UpperCAmelCase ,**__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = vocab_size lowerCAmelCase__ : int = hidden_size lowerCAmelCase__ : int = num_hidden_layers lowerCAmelCase__ : str = num_attention_heads lowerCAmelCase__ : int = hidden_act lowerCAmelCase__ : Dict = intermediate_size lowerCAmelCase__ : List[Any] = hidden_dropout_prob lowerCAmelCase__ : str = attention_probs_dropout_prob lowerCAmelCase__ : Optional[int] = max_position_embeddings lowerCAmelCase__ : List[str] = type_vocab_size lowerCAmelCase__ : List[Any] = initializer_range lowerCAmelCase__ : Tuple = layer_norm_eps lowerCAmelCase__ : int = position_embedding_type lowerCAmelCase__ : Optional[Any] = use_cache lowerCAmelCase__ : Optional[Any] = classifier_dropout class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' @property def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": lowerCAmelCase__ : Dict = {0: """batch""", 1: """choice""", 2: """sequence"""} else: lowerCAmelCase__ : Any = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
37
1
'''simple docstring''' from collections import deque from math import floor from random import random from time import time class lowerCAmelCase_: '''simple docstring''' def __init__( self ) -> int: lowerCAmelCase__ : Optional[int] = {} def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=1 ) -> Dict: if self.graph.get(__UpperCAmelCase ): if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: lowerCAmelCase__ : List[str] = [[w, v]] if not self.graph.get(__UpperCAmelCase ): lowerCAmelCase__ : Dict = [] def UpperCAmelCase_ ( self ) -> Tuple: return list(self.graph ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Union[str, Any]: if self.graph.get(__UpperCAmelCase ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase=-2 ,__UpperCAmelCase=-1 ) -> Optional[Any]: if s == d: return [] lowerCAmelCase__ : Any = [] lowerCAmelCase__ : Tuple = [] if s == -2: lowerCAmelCase__ : Any = list(self.graph )[0] stack.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: lowerCAmelCase__ : Any = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(__UpperCAmelCase ) return visited else: stack.append(node[1] ) visited.append(node[1] ) lowerCAmelCase__ : List[Any] = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(__UpperCAmelCase ) != 0: lowerCAmelCase__ : Any = stack[len(__UpperCAmelCase ) - 1] else: lowerCAmelCase__ : int = ss # check if se have reached the starting point if len(__UpperCAmelCase ) == 0: return visited def UpperCAmelCase_ ( self ,__UpperCAmelCase=-1 ) -> List[Any]: if c == -1: lowerCAmelCase__ : Tuple = floor(random() * 1_0000 ) + 10 for i in range(__UpperCAmelCase ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): lowerCAmelCase__ : int = floor(random() * c ) + 1 if n != i: self.add_pair(__UpperCAmelCase ,__UpperCAmelCase ,1 ) def UpperCAmelCase_ ( self ,__UpperCAmelCase=-2 ) -> int: lowerCAmelCase__ : List[Any] = deque() lowerCAmelCase__ : Tuple = [] if s == -2: lowerCAmelCase__ : Optional[int] = list(self.graph )[0] d.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) while d: lowerCAmelCase__ : Optional[int] = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str: lowerCAmelCase__ : Optional[Any] = 0 for x in self.graph: for y in self.graph[x]: if y[1] == u: count += 1 return count def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str: return len(self.graph[u] ) def UpperCAmelCase_ ( self ,__UpperCAmelCase=-2 ) -> Optional[Any]: lowerCAmelCase__ : Tuple = [] lowerCAmelCase__ : Dict = [] if s == -2: lowerCAmelCase__ : List[str] = list(self.graph )[0] stack.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = s lowerCAmelCase__ : Dict = [] while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: lowerCAmelCase__ : List[Any] = s for node in self.graph[s]: if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) lowerCAmelCase__ : Tuple = node[1] break # check if all the children are visited if s == ss: sorted_nodes.append(stack.pop() ) if len(__UpperCAmelCase ) != 0: lowerCAmelCase__ : int = stack[len(__UpperCAmelCase ) - 1] else: lowerCAmelCase__ : List[str] = ss # check if se have reached the starting point if len(__UpperCAmelCase ) == 0: return sorted_nodes def UpperCAmelCase_ ( self ) -> List[str]: lowerCAmelCase__ : Union[str, Any] = [] lowerCAmelCase__ : List[str] = [] lowerCAmelCase__ : List[Any] = list(self.graph )[0] stack.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = -2 lowerCAmelCase__ : List[str] = [] lowerCAmelCase__ : Optional[int] = s lowerCAmelCase__ : Union[str, Any] = False lowerCAmelCase__ : Dict = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: lowerCAmelCase__ : Union[str, Any] = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): lowerCAmelCase__ : Any = len(__UpperCAmelCase ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) lowerCAmelCase__ : Optional[int] = node[1] break # check if all the children are visited if s == ss: stack.pop() lowerCAmelCase__ : Dict = True if len(__UpperCAmelCase ) != 0: lowerCAmelCase__ : Optional[int] = stack[len(__UpperCAmelCase ) - 1] else: lowerCAmelCase__ : List[str] = False indirect_parents.append(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = s lowerCAmelCase__ : Dict = ss # check if se have reached the starting point if len(__UpperCAmelCase ) == 0: return list(__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> Tuple: lowerCAmelCase__ : Optional[int] = [] lowerCAmelCase__ : Tuple = [] lowerCAmelCase__ : Dict = list(self.graph )[0] stack.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) lowerCAmelCase__ : Tuple = -2 lowerCAmelCase__ : Tuple = [] lowerCAmelCase__ : List[Any] = s lowerCAmelCase__ : str = False lowerCAmelCase__ : int = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: lowerCAmelCase__ : Tuple = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): lowerCAmelCase__ : List[str] = len(__UpperCAmelCase ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) lowerCAmelCase__ : Union[str, Any] = node[1] break # check if all the children are visited if s == ss: stack.pop() lowerCAmelCase__ : List[str] = True if len(__UpperCAmelCase ) != 0: lowerCAmelCase__ : Union[str, Any] = stack[len(__UpperCAmelCase ) - 1] else: lowerCAmelCase__ : Optional[Any] = False indirect_parents.append(__UpperCAmelCase ) lowerCAmelCase__ : str = s lowerCAmelCase__ : List[str] = ss # check if se have reached the starting point if len(__UpperCAmelCase ) == 0: return False def UpperCAmelCase_ ( self ,__UpperCAmelCase=-2 ,__UpperCAmelCase=-1 ) -> int: lowerCAmelCase__ : Optional[Any] = time() self.dfs(__UpperCAmelCase ,__UpperCAmelCase ) lowerCAmelCase__ : Dict = time() return end - begin def UpperCAmelCase_ ( self ,__UpperCAmelCase=-2 ) -> Dict: lowerCAmelCase__ : str = time() self.bfs(__UpperCAmelCase ) lowerCAmelCase__ : List[str] = time() return end - begin class lowerCAmelCase_: '''simple docstring''' def __init__( self ) -> Optional[int]: lowerCAmelCase__ : int = {} def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=1 ) -> Optional[int]: # check if the u exists if self.graph.get(__UpperCAmelCase ): # if there already is a edge if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: # if u does not exist lowerCAmelCase__ : Tuple = [[w, v]] # add the other way if self.graph.get(__UpperCAmelCase ): # if there already is a edge if self.graph[v].count([w, u] ) == 0: self.graph[v].append([w, u] ) else: # if u does not exist lowerCAmelCase__ : List[Any] = [[w, u]] def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Tuple: if self.graph.get(__UpperCAmelCase ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(__UpperCAmelCase ) # the other way round if self.graph.get(__UpperCAmelCase ): for _ in self.graph[v]: if _[1] == u: self.graph[v].remove(__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase=-2 ,__UpperCAmelCase=-1 ) -> List[str]: if s == d: return [] lowerCAmelCase__ : Union[str, Any] = [] lowerCAmelCase__ : Optional[int] = [] if s == -2: lowerCAmelCase__ : str = list(self.graph )[0] stack.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) lowerCAmelCase__ : Any = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: lowerCAmelCase__ : Optional[Any] = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(__UpperCAmelCase ) return visited else: stack.append(node[1] ) visited.append(node[1] ) lowerCAmelCase__ : List[str] = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(__UpperCAmelCase ) != 0: lowerCAmelCase__ : Optional[int] = stack[len(__UpperCAmelCase ) - 1] else: lowerCAmelCase__ : Optional[Any] = ss # check if se have reached the starting point if len(__UpperCAmelCase ) == 0: return visited def UpperCAmelCase_ ( self ,__UpperCAmelCase=-1 ) -> Dict: if c == -1: lowerCAmelCase__ : Dict = floor(random() * 1_0000 ) + 10 for i in range(__UpperCAmelCase ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): lowerCAmelCase__ : Any = floor(random() * c ) + 1 if n != i: self.add_pair(__UpperCAmelCase ,__UpperCAmelCase ,1 ) def UpperCAmelCase_ ( self ,__UpperCAmelCase=-2 ) -> Tuple: lowerCAmelCase__ : Dict = deque() lowerCAmelCase__ : Optional[int] = [] if s == -2: lowerCAmelCase__ : Optional[int] = list(self.graph )[0] d.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) while d: lowerCAmelCase__ : Any = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str: return len(self.graph[u] ) def UpperCAmelCase_ ( self ) -> Any: lowerCAmelCase__ : Dict = [] lowerCAmelCase__ : List[str] = [] lowerCAmelCase__ : Optional[int] = list(self.graph )[0] stack.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) lowerCAmelCase__ : int = -2 lowerCAmelCase__ : int = [] lowerCAmelCase__ : List[str] = s lowerCAmelCase__ : Tuple = False lowerCAmelCase__ : Dict = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: lowerCAmelCase__ : Union[str, Any] = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): lowerCAmelCase__ : int = len(__UpperCAmelCase ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) lowerCAmelCase__ : str = node[1] break # check if all the children are visited if s == ss: stack.pop() lowerCAmelCase__ : Optional[Any] = True if len(__UpperCAmelCase ) != 0: lowerCAmelCase__ : Optional[Any] = stack[len(__UpperCAmelCase ) - 1] else: lowerCAmelCase__ : int = False indirect_parents.append(__UpperCAmelCase ) lowerCAmelCase__ : List[str] = s lowerCAmelCase__ : List[str] = ss # check if se have reached the starting point if len(__UpperCAmelCase ) == 0: return list(__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> Dict: lowerCAmelCase__ : Union[str, Any] = [] lowerCAmelCase__ : str = [] lowerCAmelCase__ : Optional[Any] = list(self.graph )[0] stack.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) lowerCAmelCase__ : Dict = -2 lowerCAmelCase__ : Optional[Any] = [] lowerCAmelCase__ : Any = s lowerCAmelCase__ : Optional[int] = False lowerCAmelCase__ : Dict = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: lowerCAmelCase__ : Optional[int] = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): lowerCAmelCase__ : List[Any] = len(__UpperCAmelCase ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) lowerCAmelCase__ : Tuple = node[1] break # check if all the children are visited if s == ss: stack.pop() lowerCAmelCase__ : Optional[int] = True if len(__UpperCAmelCase ) != 0: lowerCAmelCase__ : str = stack[len(__UpperCAmelCase ) - 1] else: lowerCAmelCase__ : Tuple = False indirect_parents.append(__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = s lowerCAmelCase__ : int = ss # check if se have reached the starting point if len(__UpperCAmelCase ) == 0: return False def UpperCAmelCase_ ( self ) -> str: return list(self.graph ) def UpperCAmelCase_ ( self ,__UpperCAmelCase=-2 ,__UpperCAmelCase=-1 ) -> List[str]: lowerCAmelCase__ : Tuple = time() self.dfs(__UpperCAmelCase ,__UpperCAmelCase ) lowerCAmelCase__ : int = time() return end - begin def UpperCAmelCase_ ( self ,__UpperCAmelCase=-2 ) -> int: lowerCAmelCase__ : Optional[int] = time() self.bfs(__UpperCAmelCase ) lowerCAmelCase__ : Any = time() return end - begin
37
'''simple docstring''' from __future__ import annotations import math from collections import Counter from string import ascii_lowercase def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ , lowerCAmelCase__ : List[str] = analyze_text(UpperCamelCase ) lowerCAmelCase__ : Optional[int] = list(""" """ + ascii_lowercase ) # what is our total sum of probabilities. lowerCAmelCase__ : List[Any] = sum(single_char_strings.values() ) # one length string lowerCAmelCase__ : Optional[int] = 0 # for each alpha we go in our dict and if it is in it we calculate entropy for ch in my_alphas: if ch in single_char_strings: lowerCAmelCase__ : List[Any] = single_char_strings[ch] lowerCAmelCase__ : List[Any] = my_str / all_sum my_fir_sum += prob * math.loga(UpperCamelCase ) # entropy formula. # print entropy print(f"""{round(-1 * my_fir_sum ):.1f}""" ) # two len string lowerCAmelCase__ : Dict = sum(two_char_strings.values() ) lowerCAmelCase__ : int = 0 # for each alpha (two in size) calculate entropy. for cha in my_alphas: for cha in my_alphas: lowerCAmelCase__ : Union[str, Any] = cha + cha if sequence in two_char_strings: lowerCAmelCase__ : Dict = two_char_strings[sequence] lowerCAmelCase__ : Tuple = int(UpperCamelCase ) / all_sum my_sec_sum += prob * math.loga(UpperCamelCase ) # print second entropy print(f"""{round(-1 * my_sec_sum ):.1f}""" ) # print the difference between them print(f"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Optional[Any] = Counter() # type: ignore lowerCAmelCase__ : Tuple = Counter() # type: ignore single_char_strings[text[-1]] += 1 # first case when we have space at start. two_char_strings[" " + text[0]] += 1 for i in range(0 , len(UpperCamelCase ) - 1 ): single_char_strings[text[i]] += 1 two_char_strings[text[i : i + 2]] += 1 return single_char_strings, two_char_strings def _SCREAMING_SNAKE_CASE ( ): """simple docstring""" import doctest doctest.testmod() # text = ( # "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark " # "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest " # "jointure saw horrible. He private he on be imagine suppose. Fertile " # "beloved evident through no service elderly is. Blind there if every no so " # "at. Own neglected you preferred way sincerity delivered his attempted. To " # "of message cottage windows do besides against uncivil. Delightful " # "unreserved impossible few estimating men favourable see entreaties. She " # "propriety immediate was improving. He or entrance humoured likewise " # "moderate. Much nor game son say feel. Fat make met can must form into " # "gate. Me we offending prevailed discovery. " # ) # calculate_prob(text) if __name__ == "__main__": main()
37
1
'''simple docstring''' import gc import unittest import numpy as np import torch from torch.backends.cuda import sdp_kernel from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) from diffusers.utils import randn_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): '''simple docstring''' __lowercase : Dict = ConsistencyModelPipeline __lowercase : Any = UNCONDITIONAL_IMAGE_GENERATION_PARAMS __lowercase : List[str] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS # Override required_optional_params to remove num_images_per_prompt __lowercase : List[Any] = frozenset( [ '''num_inference_steps''', '''generator''', '''latents''', '''output_type''', '''return_dict''', '''callback''', '''callback_steps''', ] ) @property def UpperCAmelCase_ ( self ) -> List[str]: lowerCAmelCase__ : str = UNetaDModel.from_pretrained( """diffusers/consistency-models-test""" ,subfolder="""test_unet""" ,) return unet @property def UpperCAmelCase_ ( self ) -> Any: lowerCAmelCase__ : Optional[Any] = UNetaDModel.from_pretrained( """diffusers/consistency-models-test""" ,subfolder="""test_unet_class_cond""" ,) return unet def UpperCAmelCase_ ( self ,__UpperCAmelCase=False ) -> int: if class_cond: lowerCAmelCase__ : Optional[Any] = self.dummy_cond_unet else: lowerCAmelCase__ : List[Any] = self.dummy_uncond_unet # Default to CM multistep sampler lowerCAmelCase__ : str = CMStochasticIterativeScheduler( num_train_timesteps=40 ,sigma_min=0.0_0_2 ,sigma_max=8_0.0 ,) lowerCAmelCase__ : Dict = { """unet""": unet, """scheduler""": scheduler, } return components def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=0 ) -> Optional[Any]: if str(__UpperCAmelCase ).startswith("""mps""" ): lowerCAmelCase__ : str = torch.manual_seed(__UpperCAmelCase ) else: lowerCAmelCase__ : Union[str, Any] = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = { """batch_size""": 1, """num_inference_steps""": None, """timesteps""": [22, 0], """generator""": generator, """output_type""": """np""", } return inputs def UpperCAmelCase_ ( self ) -> Dict: lowerCAmelCase__ : List[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ : Tuple = self.get_dummy_components() lowerCAmelCase__ : Tuple = ConsistencyModelPipeline(**__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) lowerCAmelCase__ : Tuple = self.get_dummy_inputs(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = pipe(**__UpperCAmelCase ).images assert image.shape == (1, 32, 32, 3) lowerCAmelCase__ : Tuple = image[0, -3:, -3:, -1] lowerCAmelCase__ : List[Any] = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def UpperCAmelCase_ ( self ) -> Optional[int]: lowerCAmelCase__ : int = """cpu""" # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ : str = self.get_dummy_components(class_cond=__UpperCAmelCase ) lowerCAmelCase__ : Dict = ConsistencyModelPipeline(**__UpperCAmelCase ) lowerCAmelCase__ : int = pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) lowerCAmelCase__ : int = self.get_dummy_inputs(__UpperCAmelCase ) lowerCAmelCase__ : int = 0 lowerCAmelCase__ : Optional[Any] = pipe(**__UpperCAmelCase ).images assert image.shape == (1, 32, 32, 3) lowerCAmelCase__ : List[Any] = image[0, -3:, -3:, -1] lowerCAmelCase__ : int = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def UpperCAmelCase_ ( self ) -> List[str]: lowerCAmelCase__ : str = """cpu""" # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ : Any = self.get_dummy_components() lowerCAmelCase__ : Dict = ConsistencyModelPipeline(**__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = self.get_dummy_inputs(__UpperCAmelCase ) lowerCAmelCase__ : int = 1 lowerCAmelCase__ : Optional[int] = None lowerCAmelCase__ : Union[str, Any] = pipe(**__UpperCAmelCase ).images assert image.shape == (1, 32, 32, 3) lowerCAmelCase__ : Optional[int] = image[0, -3:, -3:, -1] lowerCAmelCase__ : Optional[Any] = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def UpperCAmelCase_ ( self ) -> Optional[int]: lowerCAmelCase__ : int = """cpu""" # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ : Any = self.get_dummy_components(class_cond=__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = ConsistencyModelPipeline(**__UpperCAmelCase ) lowerCAmelCase__ : Tuple = pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = self.get_dummy_inputs(__UpperCAmelCase ) lowerCAmelCase__ : Tuple = 1 lowerCAmelCase__ : List[str] = None lowerCAmelCase__ : Optional[Any] = 0 lowerCAmelCase__ : Optional[Any] = pipe(**__UpperCAmelCase ).images assert image.shape == (1, 32, 32, 3) lowerCAmelCase__ : Union[str, Any] = image[0, -3:, -3:, -1] lowerCAmelCase__ : Union[str, Any] = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 @slow @require_torch_gpu class lowerCAmelCase_( unittest.TestCase ): '''simple docstring''' def UpperCAmelCase_ ( self ) -> Dict: super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase_ ( self ,__UpperCAmelCase=0 ,__UpperCAmelCase=False ,__UpperCAmelCase="cpu" ,__UpperCAmelCase=torch.floataa ,__UpperCAmelCase=(1, 3, 64, 64) ) -> Optional[int]: lowerCAmelCase__ : Dict = torch.manual_seed(__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = { """num_inference_steps""": None, """timesteps""": [22, 0], """class_labels""": 0, """generator""": generator, """output_type""": """np""", } if get_fixed_latents: lowerCAmelCase__ : Optional[Any] = self.get_fixed_latents(seed=__UpperCAmelCase ,device=__UpperCAmelCase ,dtype=__UpperCAmelCase ,shape=__UpperCAmelCase ) lowerCAmelCase__ : int = latents return inputs def UpperCAmelCase_ ( self ,__UpperCAmelCase=0 ,__UpperCAmelCase="cpu" ,__UpperCAmelCase=torch.floataa ,__UpperCAmelCase=(1, 3, 64, 64) ) -> Any: if type(__UpperCAmelCase ) == str: lowerCAmelCase__ : List[str] = torch.device(__UpperCAmelCase ) lowerCAmelCase__ : Tuple = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) lowerCAmelCase__ : str = randn_tensor(__UpperCAmelCase ,generator=__UpperCAmelCase ,device=__UpperCAmelCase ,dtype=__UpperCAmelCase ) return latents def UpperCAmelCase_ ( self ) -> Tuple: lowerCAmelCase__ : Optional[Any] = UNetaDModel.from_pretrained("""diffusers/consistency_models""" ,subfolder="""diffusers_cd_imagenet64_l2""" ) lowerCAmelCase__ : Tuple = CMStochasticIterativeScheduler( num_train_timesteps=40 ,sigma_min=0.0_0_2 ,sigma_max=8_0.0 ,) lowerCAmelCase__ : str = ConsistencyModelPipeline(unet=__UpperCAmelCase ,scheduler=__UpperCAmelCase ) pipe.to(torch_device=__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) lowerCAmelCase__ : str = self.get_inputs() lowerCAmelCase__ : Tuple = pipe(**__UpperCAmelCase ).images assert image.shape == (1, 64, 64, 3) lowerCAmelCase__ : List[str] = image[0, -3:, -3:, -1] lowerCAmelCase__ : int = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def UpperCAmelCase_ ( self ) -> Union[str, Any]: lowerCAmelCase__ : List[str] = UNetaDModel.from_pretrained("""diffusers/consistency_models""" ,subfolder="""diffusers_cd_imagenet64_l2""" ) lowerCAmelCase__ : str = CMStochasticIterativeScheduler( num_train_timesteps=40 ,sigma_min=0.0_0_2 ,sigma_max=8_0.0 ,) lowerCAmelCase__ : List[Any] = ConsistencyModelPipeline(unet=__UpperCAmelCase ,scheduler=__UpperCAmelCase ) pipe.to(torch_device=__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = self.get_inputs() lowerCAmelCase__ : Any = 1 lowerCAmelCase__ : Dict = None lowerCAmelCase__ : Dict = pipe(**__UpperCAmelCase ).images assert image.shape == (1, 64, 64, 3) lowerCAmelCase__ : str = image[0, -3:, -3:, -1] lowerCAmelCase__ : Optional[Any] = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 @require_torch_a def UpperCAmelCase_ ( self ) -> Dict: lowerCAmelCase__ : Any = UNetaDModel.from_pretrained("""diffusers/consistency_models""" ,subfolder="""diffusers_cd_imagenet64_l2""" ) lowerCAmelCase__ : Tuple = CMStochasticIterativeScheduler( num_train_timesteps=40 ,sigma_min=0.0_0_2 ,sigma_max=8_0.0 ,) lowerCAmelCase__ : Union[str, Any] = ConsistencyModelPipeline(unet=__UpperCAmelCase ,scheduler=__UpperCAmelCase ) pipe.to(torch_device=__UpperCAmelCase ,torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) lowerCAmelCase__ : int = self.get_inputs(get_fixed_latents=__UpperCAmelCase ,device=__UpperCAmelCase ) # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=__UpperCAmelCase ,enable_math=__UpperCAmelCase ,enable_mem_efficient=__UpperCAmelCase ): lowerCAmelCase__ : Optional[Any] = pipe(**__UpperCAmelCase ).images assert image.shape == (1, 64, 64, 3) lowerCAmelCase__ : int = image[0, -3:, -3:, -1] lowerCAmelCase__ : Optional[int] = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 @require_torch_a def UpperCAmelCase_ ( self ) -> Optional[int]: lowerCAmelCase__ : Optional[int] = UNetaDModel.from_pretrained("""diffusers/consistency_models""" ,subfolder="""diffusers_cd_imagenet64_l2""" ) lowerCAmelCase__ : List[Any] = CMStochasticIterativeScheduler( num_train_timesteps=40 ,sigma_min=0.0_0_2 ,sigma_max=8_0.0 ,) lowerCAmelCase__ : Optional[int] = ConsistencyModelPipeline(unet=__UpperCAmelCase ,scheduler=__UpperCAmelCase ) pipe.to(torch_device=__UpperCAmelCase ,torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = self.get_inputs(get_fixed_latents=__UpperCAmelCase ,device=__UpperCAmelCase ) lowerCAmelCase__ : Dict = 1 lowerCAmelCase__ : List[Any] = None # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=__UpperCAmelCase ,enable_math=__UpperCAmelCase ,enable_mem_efficient=__UpperCAmelCase ): lowerCAmelCase__ : Any = pipe(**__UpperCAmelCase ).images assert image.shape == (1, 64, 64, 3) lowerCAmelCase__ : int = image[0, -3:, -3:, -1] lowerCAmelCase__ : Dict = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
37
'''simple docstring''' import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase=1 ): """simple docstring""" if n_shave_prefix_segments >= 0: return ".".join(path.split(""".""" )[n_shave_prefix_segments:] ) else: return ".".join(path.split(""".""" )[:n_shave_prefix_segments] ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase=0 ): """simple docstring""" lowerCAmelCase__ : Union[str, Any] = [] for old_item in old_list: lowerCAmelCase__ : Optional[Any] = old_item.replace("""in_layers.0""" , """norm1""" ) lowerCAmelCase__ : Optional[int] = new_item.replace("""in_layers.2""" , """conv1""" ) lowerCAmelCase__ : Dict = new_item.replace("""out_layers.0""" , """norm2""" ) lowerCAmelCase__ : str = new_item.replace("""out_layers.3""" , """conv2""" ) lowerCAmelCase__ : str = new_item.replace("""emb_layers.1""" , """time_emb_proj""" ) lowerCAmelCase__ : Optional[Any] = new_item.replace("""skip_connection""" , """conv_shortcut""" ) lowerCAmelCase__ : Union[str, Any] = shave_segments(UpperCamelCase , n_shave_prefix_segments=UpperCamelCase ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase=0 ): """simple docstring""" lowerCAmelCase__ : int = [] for old_item in old_list: lowerCAmelCase__ : List[str] = old_item lowerCAmelCase__ : int = new_item.replace("""norm.weight""" , """group_norm.weight""" ) lowerCAmelCase__ : Optional[Any] = new_item.replace("""norm.bias""" , """group_norm.bias""" ) lowerCAmelCase__ : Optional[Any] = new_item.replace("""proj_out.weight""" , """proj_attn.weight""" ) lowerCAmelCase__ : int = new_item.replace("""proj_out.bias""" , """proj_attn.bias""" ) lowerCAmelCase__ : str = shave_segments(UpperCamelCase , n_shave_prefix_segments=UpperCamelCase ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): lowerCAmelCase__ : Any = old_checkpoint[path] lowerCAmelCase__ : int = old_tensor.shape[0] // 3 lowerCAmelCase__ : int = (-1, channels) if len(old_tensor.shape ) == 3 else (-1) lowerCAmelCase__ : Tuple = old_tensor.shape[0] // config["""num_head_channels"""] // 3 lowerCAmelCase__ : List[Any] = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = old_tensor.split(channels // num_heads , dim=1 ) lowerCAmelCase__ : int = query.reshape(UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = key.reshape(UpperCamelCase ) lowerCAmelCase__ : Optional[int] = value.reshape(UpperCamelCase ) for path in paths: lowerCAmelCase__ : Any = path["""new"""] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here lowerCAmelCase__ : Any = new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" ) lowerCAmelCase__ : Any = new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" ) lowerCAmelCase__ : Any = new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" ) if additional_replacements is not None: for replacement in additional_replacements: lowerCAmelCase__ : Any = new_path.replace(replacement["""old"""] , replacement["""new"""] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: lowerCAmelCase__ : List[Any] = old_checkpoint[path["""old"""]][:, :, 0] else: lowerCAmelCase__ : Dict = old_checkpoint[path["""old"""]] def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : str = {} lowerCAmelCase__ : str = checkpoint["""time_embed.0.weight"""] lowerCAmelCase__ : List[Any] = checkpoint["""time_embed.0.bias"""] lowerCAmelCase__ : int = checkpoint["""time_embed.2.weight"""] lowerCAmelCase__ : List[str] = checkpoint["""time_embed.2.bias"""] lowerCAmelCase__ : str = checkpoint["""input_blocks.0.0.weight"""] lowerCAmelCase__ : Any = checkpoint["""input_blocks.0.0.bias"""] lowerCAmelCase__ : Union[str, Any] = checkpoint["""out.0.weight"""] lowerCAmelCase__ : Union[str, Any] = checkpoint["""out.0.bias"""] lowerCAmelCase__ : str = checkpoint["""out.2.weight"""] lowerCAmelCase__ : Tuple = checkpoint["""out.2.bias"""] # Retrieves the keys for the input blocks only lowerCAmelCase__ : Optional[Any] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} ) lowerCAmelCase__ : Optional[Any] = { layer_id: [key for key in checkpoint if f"""input_blocks.{layer_id}""" in key] for layer_id in range(UpperCamelCase ) } # Retrieves the keys for the middle blocks only lowerCAmelCase__ : Union[str, Any] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} ) lowerCAmelCase__ : Union[str, Any] = { layer_id: [key for key in checkpoint if f"""middle_block.{layer_id}""" in key] for layer_id in range(UpperCamelCase ) } # Retrieves the keys for the output blocks only lowerCAmelCase__ : List[str] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} ) lowerCAmelCase__ : List[Any] = { layer_id: [key for key in checkpoint if f"""output_blocks.{layer_id}""" in key] for layer_id in range(UpperCamelCase ) } for i in range(1 , UpperCamelCase ): lowerCAmelCase__ : Dict = (i - 1) // (config["""num_res_blocks"""] + 1) lowerCAmelCase__ : Tuple = (i - 1) % (config["""num_res_blocks"""] + 1) lowerCAmelCase__ : Optional[int] = [key for key in input_blocks[i] if f"""input_blocks.{i}.0""" in key] lowerCAmelCase__ : Optional[Any] = [key for key in input_blocks[i] if f"""input_blocks.{i}.1""" in key] if f"""input_blocks.{i}.0.op.weight""" in checkpoint: lowerCAmelCase__ : Optional[int] = checkpoint[ f"""input_blocks.{i}.0.op.weight""" ] lowerCAmelCase__ : Tuple = checkpoint[ f"""input_blocks.{i}.0.op.bias""" ] continue lowerCAmelCase__ : Optional[Any] = renew_resnet_paths(UpperCamelCase ) lowerCAmelCase__ : Dict = {"""old""": f"""input_blocks.{i}.0""", """new""": f"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""} lowerCAmelCase__ : Optional[Any] = {"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""} assign_to_checkpoint( UpperCamelCase , UpperCamelCase , UpperCamelCase , additional_replacements=[meta_path, resnet_op] , config=UpperCamelCase ) if len(UpperCamelCase ): lowerCAmelCase__ : Optional[Any] = renew_attention_paths(UpperCamelCase ) lowerCAmelCase__ : Tuple = { """old""": f"""input_blocks.{i}.1""", """new""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}""", } lowerCAmelCase__ : List[str] = { f"""input_blocks.{i}.1.qkv.bias""": { """key""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""", """query""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""", """value""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""", }, f"""input_blocks.{i}.1.qkv.weight""": { """key""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""", """query""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""", """value""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""", }, } assign_to_checkpoint( UpperCamelCase , UpperCamelCase , UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=UpperCamelCase , config=UpperCamelCase , ) lowerCAmelCase__ : Dict = middle_blocks[0] lowerCAmelCase__ : Union[str, Any] = middle_blocks[1] lowerCAmelCase__ : Dict = middle_blocks[2] lowerCAmelCase__ : Any = renew_resnet_paths(UpperCamelCase ) assign_to_checkpoint(UpperCamelCase , UpperCamelCase , UpperCamelCase , config=UpperCamelCase ) lowerCAmelCase__ : Dict = renew_resnet_paths(UpperCamelCase ) assign_to_checkpoint(UpperCamelCase , UpperCamelCase , UpperCamelCase , config=UpperCamelCase ) lowerCAmelCase__ : Optional[int] = renew_attention_paths(UpperCamelCase ) lowerCAmelCase__ : Optional[int] = { """middle_block.1.qkv.bias""": { """key""": """mid_block.attentions.0.key.bias""", """query""": """mid_block.attentions.0.query.bias""", """value""": """mid_block.attentions.0.value.bias""", }, """middle_block.1.qkv.weight""": { """key""": """mid_block.attentions.0.key.weight""", """query""": """mid_block.attentions.0.query.weight""", """value""": """mid_block.attentions.0.value.weight""", }, } assign_to_checkpoint( UpperCamelCase , UpperCamelCase , UpperCamelCase , attention_paths_to_split=UpperCamelCase , config=UpperCamelCase ) for i in range(UpperCamelCase ): lowerCAmelCase__ : Tuple = i // (config["""num_res_blocks"""] + 1) lowerCAmelCase__ : List[str] = i % (config["""num_res_blocks"""] + 1) lowerCAmelCase__ : int = [shave_segments(UpperCamelCase , 2 ) for name in output_blocks[i]] lowerCAmelCase__ : Union[str, Any] = {} for layer in output_block_layers: lowerCAmelCase__ , lowerCAmelCase__ : Any = layer.split(""".""" )[0], shave_segments(UpperCamelCase , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(UpperCamelCase ) else: lowerCAmelCase__ : str = [layer_name] if len(UpperCamelCase ) > 1: lowerCAmelCase__ : str = [key for key in output_blocks[i] if f"""output_blocks.{i}.0""" in key] lowerCAmelCase__ : Dict = [key for key in output_blocks[i] if f"""output_blocks.{i}.1""" in key] lowerCAmelCase__ : Optional[int] = renew_resnet_paths(UpperCamelCase ) lowerCAmelCase__ : int = renew_resnet_paths(UpperCamelCase ) lowerCAmelCase__ : Optional[int] = {"""old""": f"""output_blocks.{i}.0""", """new""": f"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""} assign_to_checkpoint(UpperCamelCase , UpperCamelCase , UpperCamelCase , additional_replacements=[meta_path] , config=UpperCamelCase ) if ["conv.weight", "conv.bias"] in output_block_list.values(): lowerCAmelCase__ : List[Any] = list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] ) lowerCAmelCase__ : int = checkpoint[ f"""output_blocks.{i}.{index}.conv.weight""" ] lowerCAmelCase__ : int = checkpoint[ f"""output_blocks.{i}.{index}.conv.bias""" ] # Clear attentions as they have been attributed above. if len(UpperCamelCase ) == 2: lowerCAmelCase__ : Tuple = [] if len(UpperCamelCase ): lowerCAmelCase__ : Dict = renew_attention_paths(UpperCamelCase ) lowerCAmelCase__ : Tuple = { """old""": f"""output_blocks.{i}.1""", """new""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}""", } lowerCAmelCase__ : Tuple = { f"""output_blocks.{i}.1.qkv.bias""": { """key""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""", """query""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""", """value""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""", }, f"""output_blocks.{i}.1.qkv.weight""": { """key""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""", """query""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""", """value""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""", }, } assign_to_checkpoint( UpperCamelCase , UpperCamelCase , UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=UpperCamelCase , ) else: lowerCAmelCase__ : int = renew_resnet_paths(UpperCamelCase , n_shave_prefix_segments=1 ) for path in resnet_0_paths: lowerCAmelCase__ : Tuple = """.""".join(["""output_blocks""", str(UpperCamelCase ), path["""old"""]] ) lowerCAmelCase__ : List[Any] = """.""".join(["""up_blocks""", str(UpperCamelCase ), """resnets""", str(UpperCamelCase ), path["""new"""]] ) lowerCAmelCase__ : Union[str, Any] = checkpoint[old_path] return new_checkpoint if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the architecture.''', ) parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') _lowerCAmelCase = parser.parse_args() _lowerCAmelCase = torch.load(args.checkpoint_path) with open(args.config_file) as f: _lowerCAmelCase = json.loads(f.read()) _lowerCAmelCase = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] _lowerCAmelCase = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: _lowerCAmelCase = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) _lowerCAmelCase = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) _lowerCAmelCase = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
37
1
'''simple docstring''' import multiprocessing import os from typing import BinaryIO, Optional, Union import fsspec from .. import Dataset, Features, NamedSplit, config from ..formatting import query_table from ..packaged_modules.json.json import Json from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = False ,__UpperCAmelCase = False ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> str: super().__init__( __UpperCAmelCase ,split=__UpperCAmelCase ,features=__UpperCAmelCase ,cache_dir=__UpperCAmelCase ,keep_in_memory=__UpperCAmelCase ,streaming=__UpperCAmelCase ,num_proc=__UpperCAmelCase ,**__UpperCAmelCase ,) lowerCAmelCase__ : Optional[int] = field lowerCAmelCase__ : Tuple = path_or_paths if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else {self.split: path_or_paths} lowerCAmelCase__ : int = Json( cache_dir=__UpperCAmelCase ,data_files=__UpperCAmelCase ,features=__UpperCAmelCase ,field=__UpperCAmelCase ,**__UpperCAmelCase ,) def UpperCAmelCase_ ( self ) -> Optional[int]: # Build iterable dataset if self.streaming: lowerCAmelCase__ : Union[str, Any] = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: lowerCAmelCase__ : int = None lowerCAmelCase__ : int = None lowerCAmelCase__ : Dict = None lowerCAmelCase__ : Union[str, Any] = None self.builder.download_and_prepare( download_config=__UpperCAmelCase ,download_mode=__UpperCAmelCase ,verification_mode=__UpperCAmelCase ,base_path=__UpperCAmelCase ,num_proc=self.num_proc ,) lowerCAmelCase__ : List[str] = self.builder.as_dataset( split=self.split ,verification_mode=__UpperCAmelCase ,in_memory=self.keep_in_memory ) return dataset class lowerCAmelCase_: '''simple docstring''' def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> List[str]: if num_proc is not None and num_proc <= 0: raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" ) lowerCAmelCase__ : Union[str, Any] = dataset lowerCAmelCase__ : int = path_or_buf lowerCAmelCase__ : Union[str, Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE lowerCAmelCase__ : Tuple = num_proc lowerCAmelCase__ : Optional[int] = """utf-8""" lowerCAmelCase__ : Tuple = to_json_kwargs def UpperCAmelCase_ ( self ) -> int: lowerCAmelCase__ : List[str] = self.to_json_kwargs.pop("""path_or_buf""" ,__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = self.to_json_kwargs.pop("""orient""" ,"""records""" ) lowerCAmelCase__ : List[str] = self.to_json_kwargs.pop("""lines""" ,True if orient == """records""" else False ) lowerCAmelCase__ : List[Any] = self.to_json_kwargs.pop("""index""" ,False if orient in ["""split""", """table"""] else True ) lowerCAmelCase__ : Union[str, Any] = self.to_json_kwargs.pop("""compression""" ,__UpperCAmelCase ) if compression not in [None, "infer", "gzip", "bz2", "xz"]: raise NotImplementedError(F"""`datasets` currently does not support {compression} compression""" ) if isinstance(self.path_or_buf ,(str, bytes, os.PathLike) ): with fsspec.open(self.path_or_buf ,"""wb""" ,compression=__UpperCAmelCase ) as buffer: lowerCAmelCase__ : Optional[Any] = self._write(file_obj=__UpperCAmelCase ,orient=__UpperCAmelCase ,lines=__UpperCAmelCase ,index=__UpperCAmelCase ,**self.to_json_kwargs ) else: if compression: raise NotImplementedError( F"""The compression parameter is not supported when writing to a buffer, but compression={compression}""" """ was passed. Please provide a local path instead.""" ) lowerCAmelCase__ : Any = self._write( file_obj=self.path_or_buf ,orient=__UpperCAmelCase ,lines=__UpperCAmelCase ,index=__UpperCAmelCase ,**self.to_json_kwargs ) return written def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[Any]: lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Dict = args lowerCAmelCase__ : Optional[Any] = query_table( table=self.dataset.data ,key=slice(__UpperCAmelCase ,offset + self.batch_size ) ,indices=self.dataset._indices ,) lowerCAmelCase__ : Any = batch.to_pandas().to_json( path_or_buf=__UpperCAmelCase ,orient=__UpperCAmelCase ,lines=__UpperCAmelCase ,index=__UpperCAmelCase ,**__UpperCAmelCase ) if not json_str.endswith("""\n""" ): json_str += "\n" return json_str.encode(self.encoding ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,**__UpperCAmelCase ,) -> int: lowerCAmelCase__ : Union[str, Any] = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 ,len(self.dataset ) ,self.batch_size ) ,unit="""ba""" ,disable=not logging.is_progress_bar_enabled() ,desc="""Creating json from Arrow format""" ,): lowerCAmelCase__ : Tuple = self._batch_json((offset, orient, lines, index, to_json_kwargs) ) written += file_obj.write(__UpperCAmelCase ) else: lowerCAmelCase__ , lowerCAmelCase__ : Any = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for json_str in logging.tqdm( pool.imap( self._batch_json ,[(offset, orient, lines, index, to_json_kwargs) for offset in range(0 ,__UpperCAmelCase ,__UpperCAmelCase )] ,) ,total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size ,unit="""ba""" ,disable=not logging.is_progress_bar_enabled() ,desc="""Creating json from Arrow format""" ,): written += file_obj.write(__UpperCAmelCase ) return written
37
'''simple docstring''' from math import sqrt def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and ( number >= 0 ), "'number' must been an int and positive" lowerCAmelCase__ : int = True # 0 and 1 are none primes. if number <= 1: lowerCAmelCase__ : Optional[Any] = False for divisor in range(2 , int(round(sqrt(UpperCamelCase ) ) ) + 1 ): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: lowerCAmelCase__ : Any = False break # precondition assert isinstance(UpperCamelCase , UpperCamelCase ), "'status' must been from type bool" return status def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N lowerCAmelCase__ : List[str] = list(range(2 , n + 1 ) ) lowerCAmelCase__ : str = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(UpperCamelCase ) ): for j in range(i + 1 , len(UpperCamelCase ) ): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): lowerCAmelCase__ : List[Any] = 0 # filters actual prime numbers. lowerCAmelCase__ : List[Any] = [x for x in begin_list if x != 0] # precondition assert isinstance(UpperCamelCase , UpperCamelCase ), "'ans' must been from type list" return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and (n > 2), "'N' must been an int and > 2" lowerCAmelCase__ : List[str] = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2 , n + 1 ): if is_prime(UpperCamelCase ): ans.append(UpperCamelCase ) # precondition assert isinstance(UpperCamelCase , UpperCamelCase ), "'ans' must been from type list" return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and number >= 0, "'number' must been an int and >= 0" lowerCAmelCase__ : Optional[Any] = [] # this list will be returns of the function. # potential prime number factors. lowerCAmelCase__ : Dict = 2 lowerCAmelCase__ : Dict = number if number == 0 or number == 1: ans.append(UpperCamelCase ) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(UpperCamelCase ): while quotient != 1: if is_prime(UpperCamelCase ) and (quotient % factor == 0): ans.append(UpperCamelCase ) quotient /= factor else: factor += 1 else: ans.append(UpperCamelCase ) # precondition assert isinstance(UpperCamelCase , UpperCamelCase ), "'ans' must been from type list" return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and ( number >= 0 ), "'number' bust been an int and >= 0" lowerCAmelCase__ : Optional[int] = 0 # prime factorization of 'number' lowerCAmelCase__ : List[str] = prime_factorization(UpperCamelCase ) lowerCAmelCase__ : Any = max(UpperCamelCase ) # precondition assert isinstance(UpperCamelCase , UpperCamelCase ), "'ans' must been from type int" return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and ( number >= 0 ), "'number' bust been an int and >= 0" lowerCAmelCase__ : List[Any] = 0 # prime factorization of 'number' lowerCAmelCase__ : List[str] = prime_factorization(UpperCamelCase ) lowerCAmelCase__ : Optional[int] = min(UpperCamelCase ) # precondition assert isinstance(UpperCamelCase , UpperCamelCase ), "'ans' must been from type int" return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ), "'number' must been an int" assert isinstance(number % 2 == 0 , UpperCamelCase ), "compare bust been from type bool" return number % 2 == 0 def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ), "'number' must been an int" assert isinstance(number % 2 != 0 , UpperCamelCase ), "compare bust been from type bool" return number % 2 != 0 def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert ( isinstance(UpperCamelCase , UpperCamelCase ) and (number > 2) and is_even(UpperCamelCase ) ), "'number' must been an int, even and > 2" lowerCAmelCase__ : Dict = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' lowerCAmelCase__ : Dict = get_prime_numbers(UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = len(UpperCamelCase ) # run variable for while-loops. lowerCAmelCase__ : List[str] = 0 lowerCAmelCase__ : List[Any] = None # exit variable. for break up the loops lowerCAmelCase__ : Any = True while i < len_pn and loop: lowerCAmelCase__ : List[Any] = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: lowerCAmelCase__ : Optional[Any] = False ans.append(prime_numbers[i] ) ans.append(prime_numbers[j] ) j += 1 i += 1 # precondition assert ( isinstance(UpperCamelCase , UpperCamelCase ) and (len(UpperCamelCase ) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0] ) and is_prime(ans[1] ) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" assert ( isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase ) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." lowerCAmelCase__ : int = 0 while numbera != 0: lowerCAmelCase__ : Any = numbera % numbera lowerCAmelCase__ : str = numbera lowerCAmelCase__ : List[str] = rest # precondition assert isinstance(UpperCamelCase , UpperCamelCase ) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" assert ( isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase ) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." lowerCAmelCase__ : int = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' lowerCAmelCase__ : int = prime_factorization(UpperCamelCase ) lowerCAmelCase__ : Any = prime_factorization(UpperCamelCase ) elif numbera == 1 or numbera == 1: lowerCAmelCase__ : Optional[Any] = [] lowerCAmelCase__ : Dict = [] lowerCAmelCase__ : List[str] = max(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : Tuple = 0 lowerCAmelCase__ : str = 0 lowerCAmelCase__ : List[Any] = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: lowerCAmelCase__ : int = prime_fac_a.count(UpperCamelCase ) lowerCAmelCase__ : Any = prime_fac_a.count(UpperCamelCase ) for _ in range(max(UpperCamelCase , UpperCamelCase ) ): ans *= n else: lowerCAmelCase__ : Any = prime_fac_a.count(UpperCamelCase ) for _ in range(UpperCamelCase ): ans *= n done.append(UpperCamelCase ) # iterates through primeFac2 for n in prime_fac_a: if n not in done: lowerCAmelCase__ : Optional[int] = prime_fac_a.count(UpperCamelCase ) for _ in range(UpperCamelCase ): ans *= n done.append(UpperCamelCase ) # precondition assert isinstance(UpperCamelCase , UpperCamelCase ) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and (n >= 0), "'number' must been a positive int" lowerCAmelCase__ : Optional[Any] = 0 lowerCAmelCase__ : Tuple = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(UpperCamelCase ): ans += 1 # precondition assert isinstance(UpperCamelCase , UpperCamelCase ) and is_prime( UpperCamelCase ), "'ans' must been a prime number and from type int" return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" assert ( is_prime(UpperCamelCase ) and is_prime(UpperCamelCase ) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" lowerCAmelCase__ : Dict = p_number_a + 1 # jump to the next number lowerCAmelCase__ : List[Any] = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(UpperCamelCase ): number += 1 while number < p_number_a: ans.append(UpperCamelCase ) number += 1 # fetch the next prime number. while not is_prime(UpperCamelCase ): number += 1 # precondition assert ( isinstance(UpperCamelCase , UpperCamelCase ) and ans[0] != p_number_a and ans[len(UpperCamelCase ) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and (n >= 1), "'n' must been int and >= 1" lowerCAmelCase__ : List[Any] = [] # will be returned. for divisor in range(1 , n + 1 ): if n % divisor == 0: ans.append(UpperCamelCase ) # precondition assert ans[0] == 1 and ans[len(UpperCamelCase ) - 1] == n, "Error in function getDivisiors(...)" return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and ( number > 1 ), "'number' must been an int and >= 1" lowerCAmelCase__ : Optional[int] = get_divisors(UpperCamelCase ) # precondition assert ( isinstance(UpperCamelCase , UpperCamelCase ) and (divisors[0] == 1) and (divisors[len(UpperCamelCase ) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1] ) == number def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" assert ( isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase ) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. lowerCAmelCase__ : int = gcd(abs(UpperCamelCase ) , abs(UpperCamelCase ) ) # precondition assert ( isinstance(UpperCamelCase , UpperCamelCase ) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and (n >= 0), "'n' must been a int and >= 0" lowerCAmelCase__ : str = 1 # this will be return. for factor in range(1 , n + 1 ): ans *= factor return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and (n >= 0), "'n' must been an int and >= 0" lowerCAmelCase__ : List[Any] = 0 lowerCAmelCase__ : Any = 1 lowerCAmelCase__ : Optional[Any] = 1 # this will be return for _ in range(n - 1 ): lowerCAmelCase__ : Dict = ans ans += fiba lowerCAmelCase__ : str = tmp return ans
37
1
'''simple docstring''' from random import randint from tempfile import TemporaryFile import numpy as np def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Any = 0 if start < end: lowerCAmelCase__ : List[Any] = randint(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : int = a[end] lowerCAmelCase__ : Union[str, Any] = a[pivot] lowerCAmelCase__ : List[str] = temp lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = _in_place_partition(UpperCamelCase , UpperCamelCase , UpperCamelCase ) count += _in_place_quick_sort(UpperCamelCase , UpperCamelCase , p - 1 ) count += _in_place_quick_sort(UpperCamelCase , p + 1 , UpperCamelCase ) return count def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Optional[Any] = 0 lowerCAmelCase__ : Optional[int] = randint(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : Optional[int] = a[end] lowerCAmelCase__ : Union[str, Any] = a[pivot] lowerCAmelCase__ : List[str] = temp lowerCAmelCase__ : Dict = start - 1 for index in range(UpperCamelCase , UpperCamelCase ): count += 1 if a[index] < a[end]: # check if current val is less than pivot value lowerCAmelCase__ : Tuple = new_pivot_index + 1 lowerCAmelCase__ : str = a[new_pivot_index] lowerCAmelCase__ : Dict = a[index] lowerCAmelCase__ : Dict = temp lowerCAmelCase__ : Union[str, Any] = a[new_pivot_index + 1] lowerCAmelCase__ : Any = a[end] lowerCAmelCase__ : Optional[int] = temp return new_pivot_index + 1, count _lowerCAmelCase = TemporaryFile() _lowerCAmelCase = 100 # 1000 elements are to be sorted _lowerCAmelCase , _lowerCAmelCase = 0, 1 # mean and standard deviation _lowerCAmelCase = np.random.normal(mu, sigma, p) np.save(outfile, X) print('''The array is''') print(X) outfile.seek(0) # using the same array _lowerCAmelCase = np.load(outfile) _lowerCAmelCase = len(M) - 1 _lowerCAmelCase = _in_place_quick_sort(M, 0, r) print( '''No of Comparisons for 100 elements selected from a standard normal distribution''' '''is :''' ) print(z)
37
'''simple docstring''' from sklearn.metrics import fa_score, matthews_corrcoef import datasets from .record_evaluation import evaluate as evaluate_record _lowerCAmelCase = '''\ @article{wang2019superglue, title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems}, author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R}, journal={arXiv preprint arXiv:1905.00537}, year={2019} } ''' _lowerCAmelCase = '''\ SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after GLUE with a new set of more difficult language understanding tasks, improved resources, and a new public leaderboard. ''' _lowerCAmelCase = ''' Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset. Args: predictions: list of predictions to score. Depending on the SuperGlUE subset: - for \'record\': list of question-answer dictionaries with the following keys: - \'idx\': index of the question as specified by the dataset - \'prediction_text\': the predicted answer text - for \'multirc\': list of question-answer dictionaries with the following keys: - \'idx\': index of the question-answer pair as specified by the dataset - \'prediction\': the predicted answer label - otherwise: list of predicted labels references: list of reference labels. Depending on the SuperGLUE subset: - for \'record\': list of question-answers dictionaries with the following keys: - \'idx\': index of the question as specified by the dataset - \'answers\': list of possible answers - otherwise: list of reference labels Returns: depending on the SuperGLUE subset: - for \'record\': - \'exact_match\': Exact match between answer and gold answer - \'f1\': F1 score - for \'multirc\': - \'exact_match\': Exact match between answer and gold answer - \'f1_m\': Per-question macro-F1 score - \'f1_a\': Average F1 score over all answers - for \'axb\': \'matthews_correlation\': Matthew Correlation - for \'cb\': - \'accuracy\': Accuracy - \'f1\': F1 score - for all others: - \'accuracy\': Accuracy Examples: >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"] >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\') >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0, \'f1\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\') >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}] >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'exact_match\': 1.0, \'f1\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\') >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\') >>> references = [0, 1] >>> predictions = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'matthews_correlation\': 1.0} ''' def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" return float((preds == labels).mean() ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase="binary" ): """simple docstring""" lowerCAmelCase__ : Any = simple_accuracy(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : Tuple = float(fa_score(y_true=UpperCamelCase , y_pred=UpperCamelCase , average=UpperCamelCase ) ) return { "accuracy": acc, "f1": fa, } def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : List[str] = {} for id_pred, label in zip(UpperCamelCase , UpperCamelCase ): lowerCAmelCase__ : str = f"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}""" lowerCAmelCase__ : Dict = id_pred["""prediction"""] if question_id in question_map: question_map[question_id].append((pred, label) ) else: lowerCAmelCase__ : Optional[int] = [(pred, label)] lowerCAmelCase__ , lowerCAmelCase__ : int = [], [] for question, preds_labels in question_map.items(): lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = zip(*UpperCamelCase ) lowerCAmelCase__ : List[Any] = fa_score(y_true=UpperCamelCase , y_pred=UpperCamelCase , average="""macro""" ) fas.append(UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = int(sum(pred == label for pred, label in preds_labels ) == len(UpperCamelCase ) ) ems.append(UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = float(sum(UpperCamelCase ) / len(UpperCamelCase ) ) lowerCAmelCase__ : List[Any] = sum(UpperCamelCase ) / len(UpperCamelCase ) lowerCAmelCase__ : Dict = float(fa_score(y_true=UpperCamelCase , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) ) return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a} @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_( datasets.Metric ): '''simple docstring''' def UpperCAmelCase_ ( self ) -> Optional[Any]: if self.config_name not in [ "boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg", ]: raise KeyError( """You should supply a configuration name selected in """ """[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" ) return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(self._get_feature_types() ) ,codebase_urls=[] ,reference_urls=[] ,format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None ,) def UpperCAmelCase_ ( self ) -> str: if self.config_name == "record": return { "predictions": { "idx": { "passage": datasets.Value("""int64""" ), "query": datasets.Value("""int64""" ), }, "prediction_text": datasets.Value("""string""" ), }, "references": { "idx": { "passage": datasets.Value("""int64""" ), "query": datasets.Value("""int64""" ), }, "answers": datasets.Sequence(datasets.Value("""string""" ) ), }, } elif self.config_name == "multirc": return { "predictions": { "idx": { "answer": datasets.Value("""int64""" ), "paragraph": datasets.Value("""int64""" ), "question": datasets.Value("""int64""" ), }, "prediction": datasets.Value("""int64""" ), }, "references": datasets.Value("""int64""" ), } else: return { "predictions": datasets.Value("""int64""" ), "references": datasets.Value("""int64""" ), } def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Any: if self.config_name == "axb": return {"matthews_correlation": matthews_corrcoef(__UpperCAmelCase ,__UpperCAmelCase )} elif self.config_name == "cb": return acc_and_fa(__UpperCAmelCase ,__UpperCAmelCase ,fa_avg="""macro""" ) elif self.config_name == "record": lowerCAmelCase__ : Optional[Any] = [ { """qas""": [ {"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]} for ref in references ] } ] lowerCAmelCase__ : Union[str, Any] = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions} return evaluate_record(__UpperCAmelCase ,__UpperCAmelCase )[0] elif self.config_name == "multirc": return evaluate_multirc(__UpperCAmelCase ,__UpperCAmelCase ) elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]: return {"accuracy": simple_accuracy(__UpperCAmelCase ,__UpperCAmelCase )} else: raise KeyError( """You should supply a configuration name selected in """ """[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
37
1
'''simple docstring''' from statistics import mean import numpy as np def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : str = 0 # Number of processes finished lowerCAmelCase__ : Tuple = 0 # Displays the finished process. # If it is 0, the performance is completed if it is 1, before the performance. lowerCAmelCase__ : List[str] = [0] * no_of_process # List to include calculation results lowerCAmelCase__ : str = [0] * no_of_process # Sort by arrival time. lowerCAmelCase__ : Optional[int] = [burst_time[i] for i in np.argsort(UpperCamelCase )] lowerCAmelCase__ : Tuple = [process_name[i] for i in np.argsort(UpperCamelCase )] arrival_time.sort() while no_of_process > finished_process_count: lowerCAmelCase__ : Union[str, Any] = 0 while finished_process[i] == 1: i += 1 if current_time < arrival_time[i]: lowerCAmelCase__ : int = arrival_time[i] lowerCAmelCase__ : Dict = 0 # Index showing the location of the process being performed lowerCAmelCase__ : Tuple = 0 # Saves the current response ratio. lowerCAmelCase__ : List[str] = 0 for i in range(0 , UpperCamelCase ): if finished_process[i] == 0 and arrival_time[i] <= current_time: lowerCAmelCase__ : Union[str, Any] = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[ i ] if response_ratio < temp: lowerCAmelCase__ : Tuple = temp lowerCAmelCase__ : Any = i # Calculate the turn around time lowerCAmelCase__ : Dict = current_time + burst_time[loc] - arrival_time[loc] current_time += burst_time[loc] # Indicates that the process has been performed. lowerCAmelCase__ : str = 1 # Increase finished_process_count by 1 finished_process_count += 1 return turn_around_time def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Any = [0] * no_of_process for i in range(0 , UpperCamelCase ): lowerCAmelCase__ : str = turn_around_time[i] - burst_time[i] return waiting_time if __name__ == "__main__": _lowerCAmelCase = 5 _lowerCAmelCase = ['''A''', '''B''', '''C''', '''D''', '''E'''] _lowerCAmelCase = [1, 2, 3, 4, 5] _lowerCAmelCase = [1, 2, 3, 4, 5] _lowerCAmelCase = calculate_turn_around_time( process_name, arrival_time, burst_time, no_of_process ) _lowerCAmelCase = calculate_waiting_time( process_name, turn_around_time, burst_time, no_of_process ) print('''Process name \tArrival time \tBurst time \tTurn around time \tWaiting time''') for i in range(0, no_of_process): print( F"""{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t""" F"""{turn_around_time[i]}\t\t\t{waiting_time[i]}""" ) print(F"""average waiting time : {mean(waiting_time):.5f}""") print(F"""average turn around time : {mean(turn_around_time):.5f}""")
37
'''simple docstring''' import unittest from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryDirectory from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline from transformers.convert_graph_to_onnx import ( convert, ensure_valid_input, generate_identified_filename, infer_shapes, quantize, ) from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow class lowerCAmelCase_: '''simple docstring''' def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[Any]: return None class lowerCAmelCase_: '''simple docstring''' def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Tuple: return None class lowerCAmelCase_( unittest.TestCase ): '''simple docstring''' __lowercase : Dict = [ # (model_name, model_kwargs) ('''bert-base-cased''', {}), ('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore ] @require_tf @slow def UpperCAmelCase_ ( self ) -> int: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__UpperCAmelCase ,"""tf""" ,12 ,**__UpperCAmelCase ) @require_torch @slow def UpperCAmelCase_ ( self ) -> Union[str, Any]: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__UpperCAmelCase ,"""pt""" ,12 ,**__UpperCAmelCase ) @require_torch @slow def UpperCAmelCase_ ( self ) -> Any: from transformers import BertModel lowerCAmelCase__ : Optional[int] = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""] with NamedTemporaryFile(mode="""w+t""" ) as vocab_file: vocab_file.write("""\n""".join(__UpperCAmelCase ) ) vocab_file.flush() lowerCAmelCase__ : Dict = BertTokenizerFast(vocab_file.name ) with TemporaryDirectory() as bert_save_dir: lowerCAmelCase__ : Tuple = BertModel(BertConfig(vocab_size=len(__UpperCAmelCase ) ) ) model.save_pretrained(__UpperCAmelCase ) self._test_export(__UpperCAmelCase ,"""pt""" ,12 ,__UpperCAmelCase ) @require_tf @slow def UpperCAmelCase_ ( self ) -> List[str]: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: lowerCAmelCase__ : Dict = self._test_export(__UpperCAmelCase ,"""tf""" ,12 ,**__UpperCAmelCase ) lowerCAmelCase__ : List[str] = quantize(Path(__UpperCAmelCase ) ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__UpperCAmelCase ).stat().st_size: self.fail("""Quantized model is bigger than initial ONNX model""" ) @require_torch @slow def UpperCAmelCase_ ( self ) -> List[Any]: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: lowerCAmelCase__ : Any = self._test_export(__UpperCAmelCase ,"""pt""" ,12 ,**__UpperCAmelCase ) lowerCAmelCase__ : Dict = quantize(__UpperCAmelCase ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__UpperCAmelCase ).stat().st_size: self.fail("""Quantized model is bigger than initial ONNX model""" ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=None ,**__UpperCAmelCase ) -> Optional[Any]: try: # Compute path with TemporaryDirectory() as tempdir: lowerCAmelCase__ : Optional[int] = Path(__UpperCAmelCase ).joinpath("""model.onnx""" ) # Remove folder if exists if path.parent.exists(): path.parent.rmdir() # Export convert(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,**__UpperCAmelCase ) return path except Exception as e: self.fail(__UpperCAmelCase ) @require_torch @require_tokenizers @slow def UpperCAmelCase_ ( self ) -> Union[str, Any]: from transformers import BertModel lowerCAmelCase__ : List[Any] = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) ) lowerCAmelCase__ : Union[str, Any] = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" ) self._test_infer_dynamic_axis(__UpperCAmelCase ,__UpperCAmelCase ,"""pt""" ) @require_tf @require_tokenizers @slow def UpperCAmelCase_ ( self ) -> Optional[int]: from transformers import TFBertModel lowerCAmelCase__ : int = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) ) lowerCAmelCase__ : Optional[int] = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" ) self._test_infer_dynamic_axis(__UpperCAmelCase ,__UpperCAmelCase ,"""tf""" ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Tuple: lowerCAmelCase__ : Any = FeatureExtractionPipeline(__UpperCAmelCase ,__UpperCAmelCase ) lowerCAmelCase__ : List[str] = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""] lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = infer_shapes(__UpperCAmelCase ,__UpperCAmelCase ) # Assert all variables are present self.assertEqual(len(__UpperCAmelCase ) ,len(__UpperCAmelCase ) ) self.assertTrue(all(var_name in shapes for var_name in variable_names ) ) self.assertSequenceEqual(variable_names[:3] ,__UpperCAmelCase ) self.assertSequenceEqual(variable_names[3:] ,__UpperCAmelCase ) # Assert inputs are {0: batch, 1: sequence} for var_name in ["input_ids", "token_type_ids", "attention_mask"]: self.assertDictEqual(shapes[var_name] ,{0: """batch""", 1: """sequence"""} ) # Assert outputs are {0: batch, 1: sequence} and {0: batch} self.assertDictEqual(shapes["""output_0"""] ,{0: """batch""", 1: """sequence"""} ) self.assertDictEqual(shapes["""output_1"""] ,{0: """batch"""} ) def UpperCAmelCase_ ( self ) -> Optional[int]: lowerCAmelCase__ : List[str] = ["""input_ids""", """attention_mask""", """token_type_ids"""] lowerCAmelCase__ : Union[str, Any] = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]} lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = ensure_valid_input(FuncContiguousArgs() ,__UpperCAmelCase ,__UpperCAmelCase ) # Should have exactly the same number of args (all are valid) self.assertEqual(len(__UpperCAmelCase ) ,3 ) # Should have exactly the same input names self.assertEqual(set(__UpperCAmelCase ) ,set(__UpperCAmelCase ) ) # Parameter should be reordered according to their respective place in the function: # (input_ids, token_type_ids, attention_mask) self.assertEqual(__UpperCAmelCase ,(tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) ) # Generated args are interleaved with another args (for instance parameter "past" in GPT2) lowerCAmelCase__ , lowerCAmelCase__ : int = ensure_valid_input(FuncNonContiguousArgs() ,__UpperCAmelCase ,__UpperCAmelCase ) # Should have exactly the one arg (all before the one not provided "some_other_args") self.assertEqual(len(__UpperCAmelCase ) ,1 ) self.assertEqual(len(__UpperCAmelCase ) ,1 ) # Should have only "input_ids" self.assertEqual(inputs_args[0] ,tokens["""input_ids"""] ) self.assertEqual(ordered_input_names[0] ,"""input_ids""" ) def UpperCAmelCase_ ( self ) -> Tuple: lowerCAmelCase__ : Dict = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) ,"""-test""" ) self.assertEqual("""/home/something/my_fake_model-test.onnx""" ,generated.as_posix() )
37
1
'''simple docstring''' from __future__ import annotations from typing import Any class lowerCAmelCase_: '''simple docstring''' def __init__( self ,__UpperCAmelCase ) -> None: lowerCAmelCase__ : List[str] = num_of_nodes lowerCAmelCase__ : list[list[int]] = [] lowerCAmelCase__ : dict[int, int] = {} def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> None: self.m_edges.append([u_node, v_node, weight] ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> int: if self.m_component[u_node] == u_node: return u_node return self.find_component(self.m_component[u_node] ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> None: if self.m_component[u_node] != u_node: for k in self.m_component: lowerCAmelCase__ : Optional[Any] = self.find_component(__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> None: if component_size[u_node] <= component_size[v_node]: lowerCAmelCase__ : Dict = v_node component_size[v_node] += component_size[u_node] self.set_component(__UpperCAmelCase ) elif component_size[u_node] >= component_size[v_node]: lowerCAmelCase__ : Union[str, Any] = self.find_component(__UpperCAmelCase ) component_size[u_node] += component_size[v_node] self.set_component(__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> None: lowerCAmelCase__ : Union[str, Any] = [] lowerCAmelCase__ : Any = 0 lowerCAmelCase__ : list[Any] = [-1] * self.m_num_of_nodes # A list of components (initialized to all of the nodes) for node in range(self.m_num_of_nodes ): self.m_component.update({node: node} ) component_size.append(1 ) lowerCAmelCase__ : str = self.m_num_of_nodes while num_of_components > 1: for edge in self.m_edges: lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = edge lowerCAmelCase__ : Union[str, Any] = self.m_component[u] lowerCAmelCase__ : str = self.m_component[v] if u_component != v_component: for component in (u_component, v_component): if ( minimum_weight_edge[component] == -1 or minimum_weight_edge[component][2] > w ): lowerCAmelCase__ : str = [u, v, w] for edge in minimum_weight_edge: if isinstance(__UpperCAmelCase ,__UpperCAmelCase ): lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = edge lowerCAmelCase__ : Optional[int] = self.m_component[u] lowerCAmelCase__ : Union[str, Any] = self.m_component[v] if u_component != v_component: mst_weight += w self.union(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" ) num_of_components -= 1 lowerCAmelCase__ : Tuple = [-1] * self.m_num_of_nodes print(F"""The total weight of the minimal spanning tree is: {mst_weight}""" ) def _SCREAMING_SNAKE_CASE ( ): """simple docstring""" if __name__ == "__main__": import doctest doctest.testmod()
37
'''simple docstring''' from maths.prime_factors import prime_factors def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" if not isinstance(UpperCamelCase , UpperCamelCase ): lowerCAmelCase__ : int = f"""Input value of [number={number}] must be an integer""" raise TypeError(UpperCamelCase ) if number < 1: raise ValueError("""Input must be a positive integer""" ) return -1 if len(prime_factors(UpperCamelCase ) ) % 2 else 1 if __name__ == "__main__": import doctest doctest.testmod()
37
1
'''simple docstring''' from maths.prime_factors import prime_factors def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" if not isinstance(UpperCamelCase , UpperCamelCase ): lowerCAmelCase__ : int = f"""Input value of [number={number}] must be an integer""" raise TypeError(UpperCamelCase ) if number < 1: raise ValueError("""Input must be a positive integer""" ) return -1 if len(prime_factors(UpperCamelCase ) ) % 2 else 1 if __name__ == "__main__": import doctest doctest.testmod()
37
'''simple docstring''' import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = {'''vocab_file''': '''spiece.model'''} _lowerCAmelCase = { '''vocab_file''': { '''AI-Sweden/gpt-sw3-126m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model''', '''AI-Sweden/gpt-sw3-350m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model''', '''AI-Sweden/gpt-sw3-1.6b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model''', '''AI-Sweden/gpt-sw3-6.7b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model''', '''AI-Sweden/gpt-sw3-20b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model''', } } _lowerCAmelCase = { '''AI-Sweden/gpt-sw3-126m''': 2048, '''AI-Sweden/gpt-sw3-350m''': 2048, '''AI-Sweden/gpt-sw3-1.6b''': 2048, '''AI-Sweden/gpt-sw3-6.7b''': 2048, '''AI-Sweden/gpt-sw3-20b''': 2048, } class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Dict = VOCAB_FILES_NAMES __lowercase : str = PRETRAINED_VOCAB_FILES_MAP __lowercase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowercase : Optional[int] = ['''input_ids''', '''attention_mask'''] def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> None: lowerCAmelCase__ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs lowerCAmelCase__ : Dict = kwargs.get("""name_or_path""" ) if name_or_path is None: logger.warning( """name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,""" """ you are testing the model, this can safely be ignored""" ) lowerCAmelCase__ : Tuple = """None""" # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing lowerCAmelCase__ : Union[str, Any] = """<|endoftext|>""" if eos_token is None else eos_token lowerCAmelCase__ : Dict = """<unk>""" if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: lowerCAmelCase__ : Any = unk_token if pad_token is None else pad_token lowerCAmelCase__ : Dict = eos_token if bos_token is None else bos_token else: lowerCAmelCase__ : List[str] = """<pad>""" if pad_token is None else pad_token lowerCAmelCase__ : Optional[int] = """<s>""" if bos_token is None else bos_token super().__init__( do_lower_case=__UpperCAmelCase ,remove_space=__UpperCAmelCase ,keep_accents=__UpperCAmelCase ,bos_token=__UpperCAmelCase ,eos_token=__UpperCAmelCase ,unk_token=__UpperCAmelCase ,pad_token=__UpperCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**__UpperCAmelCase ,) lowerCAmelCase__ : Optional[int] = do_lower_case lowerCAmelCase__ : Dict = remove_space lowerCAmelCase__ : Optional[Any] = keep_accents lowerCAmelCase__ : int = vocab_file lowerCAmelCase__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__UpperCAmelCase ) # Used for whitespace normalization in input texts # fmt : off lowerCAmelCase__ : int = {""" """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """""", """„"""} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing lowerCAmelCase__ : List[str] = re.compile( F"""[{''.join(map(__UpperCAmelCase ,list(range(0 ,9 ) ) + list(range(11 ,32 ) ) + list(range(127 ,160 ) ) + [160, 173, 8203] ) )}]""" ) def __getstate__( self ) -> Any: lowerCAmelCase__ : int = self.__dict__.copy() lowerCAmelCase__ : Optional[int] = None return state def __setstate__( self ,__UpperCAmelCase ) -> List[str]: lowerCAmelCase__ : List[str] = d # for backward compatibility if not hasattr(self ,"""sp_model_kwargs""" ): lowerCAmelCase__ : Tuple = {} lowerCAmelCase__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def UpperCAmelCase_ ( self ) -> int: return len(self.sp_model ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str: lowerCAmelCase__ : Tuple = self.non_printing_characters_re.sub("""""" ,__UpperCAmelCase ) # Normalize whitespaces lowerCAmelCase__ : List[Any] = """""".join([char if char not in self.whitespaces else """ """ for char in text] ) # NFC Unicode normalization lowerCAmelCase__ : List[Any] = unicodedata.normalize("""NFC""" ,__UpperCAmelCase ) return text def UpperCAmelCase_ ( self ,__UpperCAmelCase ,**__UpperCAmelCase ) -> List[str]: lowerCAmelCase__ : List[Any] = self.preprocess_text(__UpperCAmelCase ) return self.sp_model.encode(__UpperCAmelCase ,out_type=__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> int: return self.sp_model.PieceToId(__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str: return self.sp_model.IdToPiece(__UpperCAmelCase ) @staticmethod def UpperCAmelCase_ ( __UpperCAmelCase ) -> str: return out_string def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str: lowerCAmelCase__ : int = [] lowerCAmelCase__ : Optional[int] = """""" lowerCAmelCase__ : Tuple = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__UpperCAmelCase ) + token lowerCAmelCase__ : Union[str, Any] = True lowerCAmelCase__ : Optional[Any] = [] else: current_sub_tokens.append(__UpperCAmelCase ) lowerCAmelCase__ : Any = False out_string += self.sp_model.decode(__UpperCAmelCase ) return out_string def UpperCAmelCase_ ( self ) -> Dict[str, int]: lowerCAmelCase__ : Optional[int] = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(__UpperCAmelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase__ : Optional[int] = os.path.join( __UpperCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,__UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__UpperCAmelCase ,"""wb""" ) as fi: lowerCAmelCase__ : str = self.sp_model.serialized_model_proto() fi.write(__UpperCAmelCase ) return (out_vocab_file,) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]: if isinstance(__UpperCAmelCase ,__UpperCAmelCase ): lowerCAmelCase__ : Tuple = self.preprocess_text(__UpperCAmelCase ) lowerCAmelCase__ : int = self.sp_model.encode(__UpperCAmelCase ) else: lowerCAmelCase__ : int = [self.preprocess_text(__UpperCAmelCase ) for t in text] lowerCAmelCase__ : Any = self.sp_model.encode(__UpperCAmelCase ) if return_tensors is True or return_tensors == "pt": lowerCAmelCase__ : Tuple = torch.tensor(__UpperCAmelCase ) return token_ids def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str: return self.sp_model.decode(__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[int]: lowerCAmelCase__ : List[Any] = [F"""User: {text}""" if is_user else F"""Bot: {text}""" for is_user, text in conversation.iter_texts()] lowerCAmelCase__ : Any = ( F"""{self.eos_token}{self.bos_token}""" + F"""{self.bos_token}""".join(__UpperCAmelCase ) + F"""{self.bos_token}Bot:""" ) return self.encode(text=__UpperCAmelCase )
37
1
'''simple docstring''' import shutil import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_tf_cross_test, require_tf, require_torch, require_torchvision, require_vision, ) from transformers.utils import is_tf_available, is_torch_available, is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, SamImageProcessor, SamProcessor if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf @require_vision @require_torchvision class lowerCAmelCase_( unittest.TestCase ): '''simple docstring''' def UpperCAmelCase_ ( self ) -> List[str]: lowerCAmelCase__ : int = tempfile.mkdtemp() lowerCAmelCase__ : Union[str, Any] = SamImageProcessor() lowerCAmelCase__ : int = SamProcessor(__UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) def UpperCAmelCase_ ( self ,**__UpperCAmelCase ) -> int: return AutoProcessor.from_pretrained(self.tmpdirname ,**__UpperCAmelCase ).image_processor def UpperCAmelCase_ ( self ) -> Tuple: shutil.rmtree(self.tmpdirname ) def UpperCAmelCase_ ( self ) -> Any: lowerCAmelCase__ : Dict = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )] lowerCAmelCase__ : Optional[int] = [Image.fromarray(np.moveaxis(__UpperCAmelCase ,0 ,-1 ) ) for x in image_inputs] return image_inputs def UpperCAmelCase_ ( self ) -> int: lowerCAmelCase__ : List[str] = SamProcessor(image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowerCAmelCase__ : List[str] = self.get_image_processor(do_normalize=__UpperCAmelCase ,padding_value=1.0 ) lowerCAmelCase__ : Union[str, Any] = SamProcessor.from_pretrained(self.tmpdirname ,do_normalize=__UpperCAmelCase ,padding_value=1.0 ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> List[str]: lowerCAmelCase__ : Dict = self.get_image_processor() lowerCAmelCase__ : List[str] = SamProcessor(image_processor=__UpperCAmelCase ) lowerCAmelCase__ : int = self.prepare_image_inputs() lowerCAmelCase__ : List[Any] = image_processor(__UpperCAmelCase ,return_tensors="""np""" ) lowerCAmelCase__ : int = processor(images=__UpperCAmelCase ,return_tensors="""np""" ) input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor input_feat_extract.pop("""reshaped_input_sizes""" ) # pop original_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 ) @require_torch def UpperCAmelCase_ ( self ) -> List[Any]: lowerCAmelCase__ : List[str] = self.get_image_processor() lowerCAmelCase__ : Optional[Any] = SamProcessor(image_processor=__UpperCAmelCase ) lowerCAmelCase__ : int = [torch.ones((1, 3, 5, 5) )] lowerCAmelCase__ : Union[str, Any] = [[1764, 2646]] lowerCAmelCase__ : List[Any] = [[683, 1024]] lowerCAmelCase__ : List[str] = processor.post_process_masks(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) self.assertEqual(masks[0].shape ,(1, 3, 1764, 2646) ) lowerCAmelCase__ : int = processor.post_process_masks( __UpperCAmelCase ,torch.tensor(__UpperCAmelCase ) ,torch.tensor(__UpperCAmelCase ) ) self.assertEqual(masks[0].shape ,(1, 3, 1764, 2646) ) # should also work with np lowerCAmelCase__ : List[Any] = [np.ones((1, 3, 5, 5) )] lowerCAmelCase__ : Dict = processor.post_process_masks(__UpperCAmelCase ,np.array(__UpperCAmelCase ) ,np.array(__UpperCAmelCase ) ) self.assertEqual(masks[0].shape ,(1, 3, 1764, 2646) ) lowerCAmelCase__ : int = [[1, 0], [0, 1]] with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : List[str] = processor.post_process_masks(__UpperCAmelCase ,np.array(__UpperCAmelCase ) ,np.array(__UpperCAmelCase ) ) @require_vision @require_tf class lowerCAmelCase_( unittest.TestCase ): '''simple docstring''' def UpperCAmelCase_ ( self ) -> Union[str, Any]: lowerCAmelCase__ : Optional[Any] = tempfile.mkdtemp() lowerCAmelCase__ : Optional[Any] = SamImageProcessor() lowerCAmelCase__ : Optional[int] = SamProcessor(__UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) def UpperCAmelCase_ ( self ,**__UpperCAmelCase ) -> List[Any]: return AutoProcessor.from_pretrained(self.tmpdirname ,**__UpperCAmelCase ).image_processor def UpperCAmelCase_ ( self ) -> Dict: shutil.rmtree(self.tmpdirname ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: lowerCAmelCase__ : str = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )] lowerCAmelCase__ : Optional[Any] = [Image.fromarray(np.moveaxis(__UpperCAmelCase ,0 ,-1 ) ) for x in image_inputs] return image_inputs def UpperCAmelCase_ ( self ) -> Optional[int]: lowerCAmelCase__ : Union[str, Any] = SamProcessor(image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowerCAmelCase__ : Dict = self.get_image_processor(do_normalize=__UpperCAmelCase ,padding_value=1.0 ) lowerCAmelCase__ : List[Any] = SamProcessor.from_pretrained(self.tmpdirname ,do_normalize=__UpperCAmelCase ,padding_value=1.0 ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> List[str]: lowerCAmelCase__ : Tuple = self.get_image_processor() lowerCAmelCase__ : int = SamProcessor(image_processor=__UpperCAmelCase ) lowerCAmelCase__ : int = self.prepare_image_inputs() lowerCAmelCase__ : Any = image_processor(__UpperCAmelCase ,return_tensors="""np""" ) lowerCAmelCase__ : Union[str, Any] = processor(images=__UpperCAmelCase ,return_tensors="""np""" ) input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor input_feat_extract.pop("""reshaped_input_sizes""" ) # pop reshaped_input_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 ) @require_tf def UpperCAmelCase_ ( self ) -> Any: lowerCAmelCase__ : Union[str, Any] = self.get_image_processor() lowerCAmelCase__ : Optional[int] = SamProcessor(image_processor=__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = [tf.ones((1, 3, 5, 5) )] lowerCAmelCase__ : str = [[1764, 2646]] lowerCAmelCase__ : Optional[Any] = [[683, 1024]] lowerCAmelCase__ : Optional[Any] = processor.post_process_masks(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,return_tensors="""tf""" ) self.assertEqual(masks[0].shape ,(1, 3, 1764, 2646) ) lowerCAmelCase__ : Union[str, Any] = processor.post_process_masks( __UpperCAmelCase ,tf.convert_to_tensor(__UpperCAmelCase ) ,tf.convert_to_tensor(__UpperCAmelCase ) ,return_tensors="""tf""" ,) self.assertEqual(masks[0].shape ,(1, 3, 1764, 2646) ) # should also work with np lowerCAmelCase__ : Union[str, Any] = [np.ones((1, 3, 5, 5) )] lowerCAmelCase__ : Any = processor.post_process_masks( __UpperCAmelCase ,np.array(__UpperCAmelCase ) ,np.array(__UpperCAmelCase ) ,return_tensors="""tf""" ) self.assertEqual(masks[0].shape ,(1, 3, 1764, 2646) ) lowerCAmelCase__ : List[Any] = [[1, 0], [0, 1]] with self.assertRaises(tf.errors.InvalidArgumentError ): lowerCAmelCase__ : List[Any] = processor.post_process_masks( __UpperCAmelCase ,np.array(__UpperCAmelCase ) ,np.array(__UpperCAmelCase ) ,return_tensors="""tf""" ) @require_vision @require_torchvision class lowerCAmelCase_( unittest.TestCase ): '''simple docstring''' def UpperCAmelCase_ ( self ) -> int: lowerCAmelCase__ : Any = tempfile.mkdtemp() lowerCAmelCase__ : Optional[Any] = SamImageProcessor() lowerCAmelCase__ : Tuple = SamProcessor(__UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) def UpperCAmelCase_ ( self ,**__UpperCAmelCase ) -> List[Any]: return AutoProcessor.from_pretrained(self.tmpdirname ,**__UpperCAmelCase ).image_processor def UpperCAmelCase_ ( self ) -> Optional[Any]: shutil.rmtree(self.tmpdirname ) def UpperCAmelCase_ ( self ) -> str: lowerCAmelCase__ : Optional[Any] = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )] lowerCAmelCase__ : Any = [Image.fromarray(np.moveaxis(__UpperCAmelCase ,0 ,-1 ) ) for x in image_inputs] return image_inputs @is_pt_tf_cross_test def UpperCAmelCase_ ( self ) -> Any: lowerCAmelCase__ : Any = self.get_image_processor() lowerCAmelCase__ : List[Any] = SamProcessor(image_processor=__UpperCAmelCase ) lowerCAmelCase__ : Dict = np.random.randint(0 ,2 ,size=(1, 3, 5, 5) ).astype(np.floataa ) lowerCAmelCase__ : int = [tf.convert_to_tensor(__UpperCAmelCase )] lowerCAmelCase__ : Dict = [torch.tensor(__UpperCAmelCase )] lowerCAmelCase__ : Optional[int] = [[1764, 2646]] lowerCAmelCase__ : Dict = [[683, 1024]] lowerCAmelCase__ : Optional[Any] = processor.post_process_masks( __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,return_tensors="""tf""" ) lowerCAmelCase__ : List[str] = processor.post_process_masks( __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,return_tensors="""pt""" ) self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) ) @is_pt_tf_cross_test def UpperCAmelCase_ ( self ) -> List[Any]: lowerCAmelCase__ : Tuple = self.get_image_processor() lowerCAmelCase__ : Tuple = SamProcessor(image_processor=__UpperCAmelCase ) lowerCAmelCase__ : int = self.prepare_image_inputs() lowerCAmelCase__ : Union[str, Any] = image_processor(__UpperCAmelCase ,return_tensors="""pt""" )["""pixel_values"""].numpy() lowerCAmelCase__ : Dict = processor(images=__UpperCAmelCase ,return_tensors="""pt""" )["""pixel_values"""].numpy() lowerCAmelCase__ : Optional[Any] = image_processor(__UpperCAmelCase ,return_tensors="""tf""" )["""pixel_values"""].numpy() lowerCAmelCase__ : int = processor(images=__UpperCAmelCase ,return_tensors="""tf""" )["""pixel_values"""].numpy() self.assertTrue(np.allclose(__UpperCAmelCase ,__UpperCAmelCase ) ) self.assertTrue(np.allclose(__UpperCAmelCase ,__UpperCAmelCase ) ) self.assertTrue(np.allclose(__UpperCAmelCase ,__UpperCAmelCase ) )
37
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class lowerCAmelCase_( unittest.TestCase ): '''simple docstring''' @slow def UpperCAmelCase_ ( self ) -> Union[str, Any]: lowerCAmelCase__ : Tuple = XLMRobertaModel.from_pretrained("""xlm-roberta-base""" ) lowerCAmelCase__ : Optional[int] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] ) # The dog is cute and lives in the garden house lowerCAmelCase__ : str = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim lowerCAmelCase__ : Dict = torch.tensor( [[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): lowerCAmelCase__ : Optional[int] = model(__UpperCAmelCase )["""last_hidden_state"""].detach() self.assertEqual(output.shape ,__UpperCAmelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] ,__UpperCAmelCase ,atol=1E-3 ) ) @slow def UpperCAmelCase_ ( self ) -> int: lowerCAmelCase__ : Union[str, Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-large""" ) lowerCAmelCase__ : Optional[Any] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] ) # The dog is cute and lives in the garden house lowerCAmelCase__ : Dict = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim lowerCAmelCase__ : Union[str, Any] = torch.tensor( [[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase )["""last_hidden_state"""].detach() self.assertEqual(output.shape ,__UpperCAmelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] ,__UpperCAmelCase ,atol=1E-3 ) )
37
1
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" if discount_rate < 0: raise ValueError("""Discount rate cannot be negative""" ) if not cash_flows: raise ValueError("""Cash flows list cannot be empty""" ) lowerCAmelCase__ : Optional[Any] = sum( cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(UpperCamelCase ) ) return round(UpperCamelCase , ndigits=2 ) if __name__ == "__main__": import doctest doctest.testmod()
37
'''simple docstring''' from pickle import UnpicklingError import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict from ..utils import logging _lowerCAmelCase = logging.get_logger(__name__) def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" try: with open(UpperCamelCase , """rb""" ) as flax_state_f: lowerCAmelCase__ : Union[str, Any] = from_bytes(UpperCamelCase , flax_state_f.read() ) except UnpicklingError as e: try: with open(UpperCamelCase ) as f: if f.read().startswith("""version""" ): raise OSError( """You seem to have cloned a repository without having git-lfs installed. Please""" """ install git-lfs and run `git lfs install` followed by `git lfs pull` in the""" """ folder you cloned.""" ) else: raise ValueError from e except (UnicodeDecodeError, ValueError): raise EnvironmentError(f"""Unable to convert {model_file} to Flax deserializable object. """ ) return load_flax_weights_in_pytorch_model(UpperCamelCase , UpperCamelCase ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" try: import torch # noqa: F401 except ImportError: logger.error( """Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see""" """ https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation""" """ instructions.""" ) raise # check if we have bf16 weights lowerCAmelCase__ : str = flatten_dict(jax.tree_util.tree_map(lambda UpperCamelCase : x.dtype == jnp.bfloataa , UpperCamelCase ) ).values() if any(UpperCamelCase ): # convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( """Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """ """before loading those in PyTorch model.""" ) lowerCAmelCase__ : Dict = jax.tree_util.tree_map( lambda UpperCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , UpperCamelCase ) lowerCAmelCase__ : Any = """""" lowerCAmelCase__ : Any = flatten_dict(UpperCamelCase , sep=""".""" ) lowerCAmelCase__ : Optional[int] = pt_model.state_dict() # keep track of unexpected & missing keys lowerCAmelCase__ : Optional[Any] = [] lowerCAmelCase__ : int = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): lowerCAmelCase__ : Union[str, Any] = flax_key_tuple.split(""".""" ) if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4: lowerCAmelCase__ : Optional[int] = flax_key_tuple_array[:-1] + ["""weight"""] lowerCAmelCase__ : Any = jnp.transpose(UpperCamelCase , (3, 2, 0, 1) ) elif flax_key_tuple_array[-1] == "kernel": lowerCAmelCase__ : str = flax_key_tuple_array[:-1] + ["""weight"""] lowerCAmelCase__ : Any = flax_tensor.T elif flax_key_tuple_array[-1] == "scale": lowerCAmelCase__ : int = flax_key_tuple_array[:-1] + ["""weight"""] if "time_embedding" not in flax_key_tuple_array: for i, flax_key_tuple_string in enumerate(UpperCamelCase ): lowerCAmelCase__ : List[str] = ( flax_key_tuple_string.replace("""_0""" , """.0""" ) .replace("""_1""" , """.1""" ) .replace("""_2""" , """.2""" ) .replace("""_3""" , """.3""" ) .replace("""_4""" , """.4""" ) .replace("""_5""" , """.5""" ) .replace("""_6""" , """.6""" ) .replace("""_7""" , """.7""" ) .replace("""_8""" , """.8""" ) .replace("""_9""" , """.9""" ) ) lowerCAmelCase__ : Union[str, Any] = """.""".join(UpperCamelCase ) if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( f"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """ f"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" ) else: # add weight to pytorch dict lowerCAmelCase__ : int = np.asarray(UpperCamelCase ) if not isinstance(UpperCamelCase , np.ndarray ) else flax_tensor lowerCAmelCase__ : int = torch.from_numpy(UpperCamelCase ) # remove from missing keys missing_keys.remove(UpperCamelCase ) else: # weight is not expected by PyTorch model unexpected_keys.append(UpperCamelCase ) pt_model.load_state_dict(UpperCamelCase ) # re-transform missing_keys to list lowerCAmelCase__ : Optional[int] = list(UpperCamelCase ) if len(UpperCamelCase ) > 0: logger.warning( """Some weights of the Flax model were not used when initializing the PyTorch model""" f""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing""" f""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture""" """ (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This""" f""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect""" """ to be exactly identical (e.g. initializing a BertForSequenceClassification model from a""" """ FlaxBertForSequenceClassification model).""" ) if len(UpperCamelCase ) > 0: logger.warning( f"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly""" f""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to""" """ use it for predictions and inference.""" ) return pt_model
37
1
'''simple docstring''' import argparse import os.path as osp import re import torch from safetensors.torch import load_file, save_file # =================# # UNet Conversion # # =================# _lowerCAmelCase = [ # (stable-diffusion, HF Diffusers) ('''time_embed.0.weight''', '''time_embedding.linear_1.weight'''), ('''time_embed.0.bias''', '''time_embedding.linear_1.bias'''), ('''time_embed.2.weight''', '''time_embedding.linear_2.weight'''), ('''time_embed.2.bias''', '''time_embedding.linear_2.bias'''), ('''input_blocks.0.0.weight''', '''conv_in.weight'''), ('''input_blocks.0.0.bias''', '''conv_in.bias'''), ('''out.0.weight''', '''conv_norm_out.weight'''), ('''out.0.bias''', '''conv_norm_out.bias'''), ('''out.2.weight''', '''conv_out.weight'''), ('''out.2.bias''', '''conv_out.bias'''), ] _lowerCAmelCase = [ # (stable-diffusion, HF Diffusers) ('''in_layers.0''', '''norm1'''), ('''in_layers.2''', '''conv1'''), ('''out_layers.0''', '''norm2'''), ('''out_layers.3''', '''conv2'''), ('''emb_layers.1''', '''time_emb_proj'''), ('''skip_connection''', '''conv_shortcut'''), ] _lowerCAmelCase = [] # hardcoded number of downblocks and resnets/attentions... # would need smarter logic for other networks. for i in range(4): # loop over downblocks/upblocks for j in range(2): # loop over resnets/attentions for downblocks _lowerCAmelCase = F"""down_blocks.{i}.resnets.{j}.""" _lowerCAmelCase = F"""input_blocks.{3*i + j + 1}.0.""" unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix)) if i < 3: # no attention layers in down_blocks.3 _lowerCAmelCase = F"""down_blocks.{i}.attentions.{j}.""" _lowerCAmelCase = F"""input_blocks.{3*i + j + 1}.1.""" unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix)) for j in range(3): # loop over resnets/attentions for upblocks _lowerCAmelCase = F"""up_blocks.{i}.resnets.{j}.""" _lowerCAmelCase = F"""output_blocks.{3*i + j}.0.""" unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix)) if i > 0: # no attention layers in up_blocks.0 _lowerCAmelCase = F"""up_blocks.{i}.attentions.{j}.""" _lowerCAmelCase = F"""output_blocks.{3*i + j}.1.""" unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix)) if i < 3: # no downsample in down_blocks.3 _lowerCAmelCase = F"""down_blocks.{i}.downsamplers.0.conv.""" _lowerCAmelCase = F"""input_blocks.{3*(i+1)}.0.op.""" unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix)) # no upsample in up_blocks.3 _lowerCAmelCase = F"""up_blocks.{i}.upsamplers.0.""" _lowerCAmelCase = F"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}.""" unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix)) _lowerCAmelCase = '''mid_block.attentions.0.''' _lowerCAmelCase = '''middle_block.1.''' unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix)) for j in range(2): _lowerCAmelCase = F"""mid_block.resnets.{j}.""" _lowerCAmelCase = F"""middle_block.{2*j}.""" unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix)) def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Any = {k: k for k in unet_state_dict.keys()} for sd_name, hf_name in unet_conversion_map: lowerCAmelCase__ : Optional[int] = sd_name for k, v in mapping.items(): if "resnets" in k: for sd_part, hf_part in unet_conversion_map_resnet: lowerCAmelCase__ : Any = v.replace(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : List[Any] = v for k, v in mapping.items(): for sd_part, hf_part in unet_conversion_map_layer: lowerCAmelCase__ : List[Any] = v.replace(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = v lowerCAmelCase__ : Tuple = {v: unet_state_dict[k] for k, v in mapping.items()} return new_state_dict # ================# # VAE Conversion # # ================# _lowerCAmelCase = [ # (stable-diffusion, HF Diffusers) ('''nin_shortcut''', '''conv_shortcut'''), ('''norm_out''', '''conv_norm_out'''), ('''mid.attn_1.''', '''mid_block.attentions.0.'''), ] for i in range(4): # down_blocks have two resnets for j in range(2): _lowerCAmelCase = F"""encoder.down_blocks.{i}.resnets.{j}.""" _lowerCAmelCase = F"""encoder.down.{i}.block.{j}.""" vae_conversion_map.append((sd_down_prefix, hf_down_prefix)) if i < 3: _lowerCAmelCase = F"""down_blocks.{i}.downsamplers.0.""" _lowerCAmelCase = F"""down.{i}.downsample.""" vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix)) _lowerCAmelCase = F"""up_blocks.{i}.upsamplers.0.""" _lowerCAmelCase = F"""up.{3-i}.upsample.""" vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix)) # up_blocks have three resnets # also, up blocks in hf are numbered in reverse from sd for j in range(3): _lowerCAmelCase = F"""decoder.up_blocks.{i}.resnets.{j}.""" _lowerCAmelCase = F"""decoder.up.{3-i}.block.{j}.""" vae_conversion_map.append((sd_up_prefix, hf_up_prefix)) # this part accounts for mid blocks in both the encoder and the decoder for i in range(2): _lowerCAmelCase = F"""mid_block.resnets.{i}.""" _lowerCAmelCase = F"""mid.block_{i+1}.""" vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix)) _lowerCAmelCase = [ # (stable-diffusion, HF Diffusers) ('''norm.''', '''group_norm.'''), ('''q.''', '''query.'''), ('''k.''', '''key.'''), ('''v.''', '''value.'''), ('''proj_out.''', '''proj_attn.'''), ] def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" return w.reshape(*w.shape , 1 , 1 ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Optional[int] = {k: k for k in vae_state_dict.keys()} for k, v in mapping.items(): for sd_part, hf_part in vae_conversion_map: lowerCAmelCase__ : str = v.replace(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = v for k, v in mapping.items(): if "attentions" in k: for sd_part, hf_part in vae_conversion_map_attn: lowerCAmelCase__ : Dict = v.replace(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : List[Any] = v lowerCAmelCase__ : Union[str, Any] = {v: vae_state_dict[k] for k, v in mapping.items()} lowerCAmelCase__ : Tuple = ["""q""", """k""", """v""", """proj_out"""] for k, v in new_state_dict.items(): for weight_name in weights_to_convert: if f"""mid.attn_1.{weight_name}.weight""" in k: print(f"""Reshaping {k} for SD format""" ) lowerCAmelCase__ : Optional[int] = reshape_weight_for_sd(UpperCamelCase ) return new_state_dict # =========================# # Text Encoder Conversion # # =========================# _lowerCAmelCase = [ # (stable-diffusion, HF Diffusers) ('''resblocks.''', '''text_model.encoder.layers.'''), ('''ln_1''', '''layer_norm1'''), ('''ln_2''', '''layer_norm2'''), ('''.c_fc.''', '''.fc1.'''), ('''.c_proj.''', '''.fc2.'''), ('''.attn''', '''.self_attn'''), ('''ln_final.''', '''transformer.text_model.final_layer_norm.'''), ('''token_embedding.weight''', '''transformer.text_model.embeddings.token_embedding.weight'''), ('''positional_embedding''', '''transformer.text_model.embeddings.position_embedding.weight'''), ] _lowerCAmelCase = {re.escape(x[1]): x[0] for x in textenc_conversion_lst} _lowerCAmelCase = re.compile('''|'''.join(protected.keys())) # Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp _lowerCAmelCase = {'''q''': 0, '''k''': 1, '''v''': 2} def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Optional[Any] = {} lowerCAmelCase__ : int = {} lowerCAmelCase__ : List[Any] = {} for k, v in text_enc_dict.items(): if ( k.endswith(""".self_attn.q_proj.weight""" ) or k.endswith(""".self_attn.k_proj.weight""" ) or k.endswith(""".self_attn.v_proj.weight""" ) ): lowerCAmelCase__ : Optional[int] = k[: -len(""".q_proj.weight""" )] lowerCAmelCase__ : Tuple = k[-len("""q_proj.weight""" )] if k_pre not in capture_qkv_weight: lowerCAmelCase__ : List[Any] = [None, None, None] lowerCAmelCase__ : Dict = v continue if ( k.endswith(""".self_attn.q_proj.bias""" ) or k.endswith(""".self_attn.k_proj.bias""" ) or k.endswith(""".self_attn.v_proj.bias""" ) ): lowerCAmelCase__ : str = k[: -len(""".q_proj.bias""" )] lowerCAmelCase__ : List[str] = k[-len("""q_proj.bias""" )] if k_pre not in capture_qkv_bias: lowerCAmelCase__ : Union[str, Any] = [None, None, None] lowerCAmelCase__ : Any = v continue lowerCAmelCase__ : Dict = textenc_pattern.sub(lambda UpperCamelCase : protected[re.escape(m.group(0 ) )] , UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = v for k_pre, tensors in capture_qkv_weight.items(): if None in tensors: raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" ) lowerCAmelCase__ : Any = textenc_pattern.sub(lambda UpperCamelCase : protected[re.escape(m.group(0 ) )] , UpperCamelCase ) lowerCAmelCase__ : Tuple = torch.cat(UpperCamelCase ) for k_pre, tensors in capture_qkv_bias.items(): if None in tensors: raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" ) lowerCAmelCase__ : str = textenc_pattern.sub(lambda UpperCamelCase : protected[re.escape(m.group(0 ) )] , UpperCamelCase ) lowerCAmelCase__ : List[Any] = torch.cat(UpperCamelCase ) return new_state_dict def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" return text_enc_dict if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''') parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''') parser.add_argument( '''--use_safetensors''', action='''store_true''', help='''Save weights use safetensors, default is ckpt.''' ) _lowerCAmelCase = parser.parse_args() assert args.model_path is not None, "Must provide a model path!" assert args.checkpoint_path is not None, "Must provide a checkpoint path!" # Path for safetensors _lowerCAmelCase = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.safetensors''') _lowerCAmelCase = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.safetensors''') _lowerCAmelCase = osp.join(args.model_path, '''text_encoder''', '''model.safetensors''') # Load models from safetensors if it exists, if it doesn't pytorch if osp.exists(unet_path): _lowerCAmelCase = load_file(unet_path, device='''cpu''') else: _lowerCAmelCase = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.bin''') _lowerCAmelCase = torch.load(unet_path, map_location='''cpu''') if osp.exists(vae_path): _lowerCAmelCase = load_file(vae_path, device='''cpu''') else: _lowerCAmelCase = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.bin''') _lowerCAmelCase = torch.load(vae_path, map_location='''cpu''') if osp.exists(text_enc_path): _lowerCAmelCase = load_file(text_enc_path, device='''cpu''') else: _lowerCAmelCase = osp.join(args.model_path, '''text_encoder''', '''pytorch_model.bin''') _lowerCAmelCase = torch.load(text_enc_path, map_location='''cpu''') # Convert the UNet model _lowerCAmelCase = convert_unet_state_dict(unet_state_dict) _lowerCAmelCase = {'''model.diffusion_model.''' + k: v for k, v in unet_state_dict.items()} # Convert the VAE model _lowerCAmelCase = convert_vae_state_dict(vae_state_dict) _lowerCAmelCase = {'''first_stage_model.''' + k: v for k, v in vae_state_dict.items()} # Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper _lowerCAmelCase = '''text_model.encoder.layers.22.layer_norm2.bias''' in text_enc_dict if is_vaa_model: # Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm _lowerCAmelCase = {'''transformer.''' + k: v for k, v in text_enc_dict.items()} _lowerCAmelCase = convert_text_enc_state_dict_vaa(text_enc_dict) _lowerCAmelCase = {'''cond_stage_model.model.''' + k: v for k, v in text_enc_dict.items()} else: _lowerCAmelCase = convert_text_enc_state_dict(text_enc_dict) _lowerCAmelCase = {'''cond_stage_model.transformer.''' + k: v for k, v in text_enc_dict.items()} # Put together new checkpoint _lowerCAmelCase = {**unet_state_dict, **vae_state_dict, **text_enc_dict} if args.half: _lowerCAmelCase = {k: v.half() for k, v in state_dict.items()} if args.use_safetensors: save_file(state_dict, args.checkpoint_path) else: _lowerCAmelCase = {'''state_dict''': state_dict} torch.save(state_dict, args.checkpoint_path)
37
'''simple docstring''' import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class lowerCAmelCase_: '''simple docstring''' def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=2 ,__UpperCAmelCase=True ,__UpperCAmelCase=False ,__UpperCAmelCase=10 ,__UpperCAmelCase=3 ,__UpperCAmelCase=32 * 4 ,__UpperCAmelCase=32 * 6 ,__UpperCAmelCase=4 ,__UpperCAmelCase=32 ,) -> str: lowerCAmelCase__ : Optional[int] = parent lowerCAmelCase__ : Optional[int] = batch_size lowerCAmelCase__ : Optional[int] = is_training lowerCAmelCase__ : Dict = use_auxiliary_loss lowerCAmelCase__ : Union[str, Any] = num_queries lowerCAmelCase__ : str = num_channels lowerCAmelCase__ : List[str] = min_size lowerCAmelCase__ : int = max_size lowerCAmelCase__ : Optional[Any] = num_labels lowerCAmelCase__ : List[Any] = mask_feature_size def UpperCAmelCase_ ( self ) -> Tuple: lowerCAmelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( __UpperCAmelCase ) lowerCAmelCase__ : str = torch.ones([self.batch_size, self.min_size, self.max_size] ,device=__UpperCAmelCase ) lowerCAmelCase__ : Any = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] ,device=__UpperCAmelCase ) > 0.5 ).float() lowerCAmelCase__ : Optional[int] = (torch.rand((self.batch_size, self.num_labels) ,device=__UpperCAmelCase ) > 0.5).long() lowerCAmelCase__ : Any = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def UpperCAmelCase_ ( self ) -> Dict: return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] ,) ,decoder_config=DetrConfig( decoder_ffn_dim=128 ,num_queries=self.num_queries ,decoder_attention_heads=2 ,d_model=self.mask_feature_size ,) ,mask_feature_size=self.mask_feature_size ,fpn_feature_size=self.mask_feature_size ,num_channels=self.num_channels ,num_labels=self.num_labels ,) def UpperCAmelCase_ ( self ) -> Optional[int]: lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.prepare_config_and_inputs() lowerCAmelCase__ : List[str] = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask} return config, inputs_dict def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Any: lowerCAmelCase__ : Optional[int] = output.encoder_hidden_states lowerCAmelCase__ : Optional[int] = output.pixel_decoder_hidden_states lowerCAmelCase__ : Dict = output.transformer_decoder_hidden_states self.parent.assertTrue(len(__UpperCAmelCase ) ,len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__UpperCAmelCase ) ,len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__UpperCAmelCase ) ,config.decoder_config.decoder_layers ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=False ) -> Optional[Any]: with torch.no_grad(): lowerCAmelCase__ : int = MaskFormerModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : str = model(pixel_values=__UpperCAmelCase ,pixel_mask=__UpperCAmelCase ) lowerCAmelCase__ : int = model(__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape ,(self.batch_size, self.num_queries, self.mask_feature_size) ,) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(__UpperCAmelCase ,__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[int]: lowerCAmelCase__ : Dict = MaskFormerForInstanceSegmentation(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() def comm_check_on_output(__UpperCAmelCase ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape ,(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) ,) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape ,(self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): lowerCAmelCase__ : List[Any] = model(pixel_values=__UpperCAmelCase ,pixel_mask=__UpperCAmelCase ) lowerCAmelCase__ : Dict = model(__UpperCAmelCase ) comm_check_on_output(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = model( pixel_values=__UpperCAmelCase ,pixel_mask=__UpperCAmelCase ,mask_labels=__UpperCAmelCase ,class_labels=__UpperCAmelCase ) comm_check_on_output(__UpperCAmelCase ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape ,torch.Size([1] ) ) @require_torch class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): '''simple docstring''' __lowercase : Optional[Any] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () __lowercase : int = ( {'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) __lowercase : Union[str, Any] = False __lowercase : Dict = False __lowercase : Tuple = False __lowercase : List[Any] = False def UpperCAmelCase_ ( self ) -> Optional[int]: lowerCAmelCase__ : str = MaskFormerModelTester(self ) lowerCAmelCase__ : List[Any] = ConfigTester(self ,config_class=__UpperCAmelCase ,has_text_modality=__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> List[str]: self.config_tester.run_common_tests() def UpperCAmelCase_ ( self ) -> Union[str, Any]: lowerCAmelCase__ , lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__UpperCAmelCase ,**__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> Optional[int]: lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__UpperCAmelCase ) @unittest.skip(reason="""MaskFormer does not use inputs_embeds""" ) def UpperCAmelCase_ ( self ) -> List[Any]: pass @unittest.skip(reason="""MaskFormer does not have a get_input_embeddings method""" ) def UpperCAmelCase_ ( self ) -> str: pass @unittest.skip(reason="""MaskFormer is not a generative model""" ) def UpperCAmelCase_ ( self ) -> Any: pass @unittest.skip(reason="""MaskFormer does not use token embeddings""" ) def UpperCAmelCase_ ( self ) -> List[str]: pass @require_torch_multi_gpu @unittest.skip( reason="""MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def UpperCAmelCase_ ( self ) -> List[str]: pass def UpperCAmelCase_ ( self ) -> Tuple: lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ : str = model_class(__UpperCAmelCase ) lowerCAmelCase__ : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase__ : Dict = [*signature.parameters.keys()] lowerCAmelCase__ : Tuple = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,__UpperCAmelCase ) @slow def UpperCAmelCase_ ( self ) -> Union[str, Any]: for model_name in ["facebook/maskformer-swin-small-coco"]: lowerCAmelCase__ : List[str] = MaskFormerModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> str: lowerCAmelCase__ : List[Any] = (self.model_tester.min_size,) * 2 lowerCAmelCase__ : Any = { """pixel_values""": torch.randn((2, 3, *size) ,device=__UpperCAmelCase ), """mask_labels""": torch.randn((2, 10, *size) ,device=__UpperCAmelCase ), """class_labels""": torch.zeros(2 ,10 ,device=__UpperCAmelCase ).long(), } lowerCAmelCase__ : Tuple = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = model(**__UpperCAmelCase ) self.assertTrue(outputs.loss is not None ) def UpperCAmelCase_ ( self ) -> str: lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__UpperCAmelCase ,**__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> Tuple: lowerCAmelCase__ , lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ : int = model_class(__UpperCAmelCase ).to(__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = model(**__UpperCAmelCase ,output_attentions=__UpperCAmelCase ) self.assertTrue(outputs.attentions is not None ) def UpperCAmelCase_ ( self ) -> int: if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss lowerCAmelCase__ : Dict = self.all_model_classes[1] lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() lowerCAmelCase__ : List[Any] = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.train() lowerCAmelCase__ : List[str] = model(__UpperCAmelCase ,mask_labels=__UpperCAmelCase ,class_labels=__UpperCAmelCase ).loss loss.backward() def UpperCAmelCase_ ( self ) -> List[str]: # only MaskFormerForInstanceSegmentation has the loss lowerCAmelCase__ : Tuple = self.all_model_classes[1] lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() lowerCAmelCase__ : Union[str, Any] = True lowerCAmelCase__ : Tuple = True lowerCAmelCase__ : Optional[Any] = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.train() lowerCAmelCase__ : Dict = model(__UpperCAmelCase ,mask_labels=__UpperCAmelCase ,class_labels=__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() lowerCAmelCase__ : str = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't lowerCAmelCase__ : Union[str, Any] = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() lowerCAmelCase__ : List[Any] = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=__UpperCAmelCase ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) _lowerCAmelCase = 1e-4 def _SCREAMING_SNAKE_CASE ( ): """simple docstring""" lowerCAmelCase__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_vision @slow class lowerCAmelCase_( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCAmelCase_ ( self ) -> List[Any]: return ( MaskFormerImageProcessor.from_pretrained("""facebook/maskformer-swin-small-coco""" ) if is_vision_available() else None ) def UpperCAmelCase_ ( self ) -> Any: lowerCAmelCase__ : Any = MaskFormerModel.from_pretrained("""facebook/maskformer-swin-small-coco""" ).to(__UpperCAmelCase ) lowerCAmelCase__ : str = self.default_image_processor lowerCAmelCase__ : str = prepare_img() lowerCAmelCase__ : Optional[int] = image_processor(__UpperCAmelCase ,return_tensors="""pt""" ).to(__UpperCAmelCase ) lowerCAmelCase__ : Dict = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__UpperCAmelCase ,(1, 3, 800, 1088) ) with torch.no_grad(): lowerCAmelCase__ : Union[str, Any] = model(**__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = torch.tensor( [[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(__UpperCAmelCase ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) ) lowerCAmelCase__ : Dict = torch.tensor( [[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(__UpperCAmelCase ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) ) lowerCAmelCase__ : Optional[int] = torch.tensor( [[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(__UpperCAmelCase ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) ) def UpperCAmelCase_ ( self ) -> Optional[Any]: lowerCAmelCase__ : List[Any] = ( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" ) .to(__UpperCAmelCase ) .eval() ) lowerCAmelCase__ : Optional[Any] = self.default_image_processor lowerCAmelCase__ : List[str] = prepare_img() lowerCAmelCase__ : str = image_processor(__UpperCAmelCase ,return_tensors="""pt""" ).to(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__UpperCAmelCase ,(1, 3, 800, 1088) ) with torch.no_grad(): lowerCAmelCase__ : List[Any] = model(**__UpperCAmelCase ) # masks_queries_logits lowerCAmelCase__ : Optional[int] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape ,(1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ,) lowerCAmelCase__ : Optional[int] = [ [-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3], [-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5], [-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2], ] lowerCAmelCase__ : Optional[int] = torch.tensor(__UpperCAmelCase ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) ) # class_queries_logits lowerCAmelCase__ : Tuple = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape ,(1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) lowerCAmelCase__ : Union[str, Any] = torch.tensor( [ [1.65_12E00, -5.25_72E00, -3.35_19E00], [3.61_69E-02, -5.90_25E00, -2.93_13E00], [1.07_66E-04, -7.76_30E00, -5.12_63E00], ] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) ) def UpperCAmelCase_ ( self ) -> str: lowerCAmelCase__ : List[Any] = ( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-resnet101-coco-stuff""" ) .to(__UpperCAmelCase ) .eval() ) lowerCAmelCase__ : Optional[Any] = self.default_image_processor lowerCAmelCase__ : int = prepare_img() lowerCAmelCase__ : Optional[Any] = image_processor(__UpperCAmelCase ,return_tensors="""pt""" ).to(__UpperCAmelCase ) lowerCAmelCase__ : str = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__UpperCAmelCase ,(1, 3, 800, 1088) ) with torch.no_grad(): lowerCAmelCase__ : str = model(**__UpperCAmelCase ) # masks_queries_logits lowerCAmelCase__ : Optional[Any] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape ,(1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ,) lowerCAmelCase__ : int = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]] lowerCAmelCase__ : List[str] = torch.tensor(__UpperCAmelCase ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) ) # class_queries_logits lowerCAmelCase__ : Optional[Any] = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape ,(1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) lowerCAmelCase__ : Tuple = torch.tensor( [[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) ) def UpperCAmelCase_ ( self ) -> Optional[Any]: lowerCAmelCase__ : str = ( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" ) .to(__UpperCAmelCase ) .eval() ) lowerCAmelCase__ : Dict = self.default_image_processor lowerCAmelCase__ : Union[str, Any] = image_processor( [np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] ,segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] ,return_tensors="""pt""" ,) lowerCAmelCase__ : Tuple = inputs["""pixel_values"""].to(__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = [el.to(__UpperCAmelCase ) for el in inputs["""mask_labels"""]] lowerCAmelCase__ : Union[str, Any] = [el.to(__UpperCAmelCase ) for el in inputs["""class_labels"""]] with torch.no_grad(): lowerCAmelCase__ : Any = model(**__UpperCAmelCase ) self.assertTrue(outputs.loss is not None )
37
1
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : list[list[int]] = [[0 for _ in range(UpperCamelCase )] for _ in range(m + 1 )] for i in range(m + 1 ): lowerCAmelCase__ : Tuple = 1 for n in range(m + 1 ): for k in range(1 , UpperCamelCase ): memo[n][k] += memo[n][k - 1] if n - k > 0: memo[n][k] += memo[n - k - 1][k] return memo[m][m - 1] if __name__ == "__main__": import sys if len(sys.argv) == 1: try: _lowerCAmelCase = int(input('''Enter a number: ''').strip()) print(partition(n)) except ValueError: print('''Please enter a number.''') else: try: _lowerCAmelCase = int(sys.argv[1]) print(partition(n)) except ValueError: print('''Please pass a number.''')
37
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { '''microsoft/focalnet-tiny''': '''https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json''', } class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Optional[Any] = '''focalnet''' def __init__( self ,__UpperCAmelCase=224 ,__UpperCAmelCase=4 ,__UpperCAmelCase=3 ,__UpperCAmelCase=96 ,__UpperCAmelCase=False ,__UpperCAmelCase=[192, 384, 768, 768] ,__UpperCAmelCase=[2, 2, 6, 2] ,__UpperCAmelCase=[2, 2, 2, 2] ,__UpperCAmelCase=[3, 3, 3, 3] ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=4.0 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=False ,__UpperCAmelCase=1E-4 ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=1E-5 ,__UpperCAmelCase=32 ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,**__UpperCAmelCase ,) -> Optional[Any]: super().__init__(**__UpperCAmelCase ) lowerCAmelCase__ : Dict = image_size lowerCAmelCase__ : int = patch_size lowerCAmelCase__ : str = num_channels lowerCAmelCase__ : Dict = embed_dim lowerCAmelCase__ : List[str] = use_conv_embed lowerCAmelCase__ : List[Any] = hidden_sizes lowerCAmelCase__ : Dict = depths lowerCAmelCase__ : List[str] = focal_levels lowerCAmelCase__ : List[str] = focal_windows lowerCAmelCase__ : Dict = hidden_act lowerCAmelCase__ : Dict = mlp_ratio lowerCAmelCase__ : Tuple = hidden_dropout_prob lowerCAmelCase__ : Tuple = drop_path_rate lowerCAmelCase__ : Dict = use_layerscale lowerCAmelCase__ : Optional[Any] = layerscale_value lowerCAmelCase__ : str = use_post_layernorm lowerCAmelCase__ : Union[str, Any] = use_post_layernorm_in_modulation lowerCAmelCase__ : int = normalize_modulator lowerCAmelCase__ : Optional[Any] = initializer_range lowerCAmelCase__ : List[str] = layer_norm_eps lowerCAmelCase__ : List[Any] = encoder_stride lowerCAmelCase__ : Dict = ["""stem"""] + [F"""stage{idx}""" for idx in range(1 ,len(self.depths ) + 1 )] lowerCAmelCase__ , lowerCAmelCase__ : Any = get_aligned_output_features_output_indices( out_features=__UpperCAmelCase ,out_indices=__UpperCAmelCase ,stage_names=self.stage_names )
37
1
'''simple docstring''' import gzip import hashlib import json import multiprocessing import os import re import shutil import time from pathlib import Path import numpy as np from arguments import PreprocessingArguments from datasets import load_dataset from minhash_deduplication import deduplicate_dataset from transformers import AutoTokenizer, HfArgumentParser _lowerCAmelCase = re.compile(R'''\s+''') def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" return {"hash": hashlib.mda(re.sub(UpperCamelCase , """""" , example["""content"""] ).encode("""utf-8""" ) ).hexdigest()} def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : str = [len(UpperCamelCase ) for line in example["""content"""].splitlines()] return {"line_mean": np.mean(UpperCamelCase ), "line_max": max(UpperCamelCase )} def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Any = np.mean([c.isalnum() for c in example["""content"""]] ) return {"alpha_frac": alpha_frac} def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" if example["hash"] in uniques: uniques.remove(example["""hash"""] ) return True else: return False def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase=5 ): """simple docstring""" lowerCAmelCase__ : Union[str, Any] = ["""auto-generated""", """autogenerated""", """automatically generated"""] lowerCAmelCase__ : Any = example["""content"""].splitlines() for _, line in zip(range(UpperCamelCase ) , UpperCamelCase ): for keyword in keywords: if keyword in line.lower(): return {"autogenerated": True} else: return {"autogenerated": False} def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase=5 , UpperCamelCase=0.05 ): """simple docstring""" lowerCAmelCase__ : Dict = ["""unit tests""", """test file""", """configuration file"""] lowerCAmelCase__ : str = example["""content"""].splitlines() lowerCAmelCase__ : str = 0 lowerCAmelCase__ : Union[str, Any] = 0 # first test for _, line in zip(range(UpperCamelCase ) , UpperCamelCase ): for keyword in keywords: if keyword in line.lower(): return {"config_or_test": True} # second test lowerCAmelCase__ : Any = example["""content"""].count("""\n""" ) lowerCAmelCase__ : Optional[Any] = int(coeff * nlines ) for line in lines: count_config += line.lower().count("""config""" ) count_test += line.lower().count("""test""" ) if count_config > threshold or count_test > threshold: return {"config_or_test": True} return {"config_or_test": False} def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : str = ["""def """, """class """, """for """, """while """] lowerCAmelCase__ : Union[str, Any] = example["""content"""].splitlines() for line in lines: for keyword in keywords: if keyword in line.lower(): return {"has_no_keywords": False} return {"has_no_keywords": True} def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase=4 ): """simple docstring""" lowerCAmelCase__ : Union[str, Any] = example["""content"""].splitlines() lowerCAmelCase__ : List[Any] = 0 for line in lines: counter += line.lower().count("""=""" ) if counter > minimum: return {"has_few_assignments": False} return {"has_few_assignments": True} def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : List[str] = tokenizer(example["""content"""] , truncation=UpperCamelCase )["""input_ids"""] lowerCAmelCase__ : Union[str, Any] = len(example["""content"""] ) / len(UpperCamelCase ) return {"ratio": ratio} def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : int = {} results.update(get_hash(UpperCamelCase ) ) results.update(line_stats(UpperCamelCase ) ) results.update(alpha_stats(UpperCamelCase ) ) results.update(char_token_ratio(UpperCamelCase ) ) results.update(is_autogenerated(UpperCamelCase ) ) results.update(is_config_or_test(UpperCamelCase ) ) results.update(has_no_keywords(UpperCamelCase ) ) results.update(has_few_assignments(UpperCamelCase ) ) return results def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" if not check_uniques(UpperCamelCase , UpperCamelCase ): return False elif example["autogenerated"]: return False elif example["line_max"] > args.line_max: return False elif example["line_mean"] > args.line_mean: return False elif example["alpha_frac"] < args.alpha_frac: return False elif example["ratio"] < args.min_token_ratio: return False elif example["config_or_test"] and np.random.rand() <= args.filter_proba: return False elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba: return False elif example["has_few_assignments"]: return False else: return True def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" with open(UpperCamelCase , """rb""" ) as f_in: with gzip.open(str(UpperCamelCase ) + """.gz""" , """wb""" , compresslevel=6 ) as f_out: shutil.copyfileobj(UpperCamelCase , UpperCamelCase ) os.unlink(UpperCamelCase ) # Settings _lowerCAmelCase = HfArgumentParser(PreprocessingArguments) _lowerCAmelCase = parser.parse_args() if args.num_workers is None: _lowerCAmelCase = multiprocessing.cpu_count() _lowerCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_dir) # Load dataset _lowerCAmelCase = time.time() _lowerCAmelCase = load_dataset(args.dataset_name, split='''train''') print(F"""Time to load dataset: {time.time()-t_start:.2f}""") # Run preprocessing _lowerCAmelCase = time.time() _lowerCAmelCase = ds.map(preprocess, num_proc=args.num_workers) print(F"""Time to preprocess dataset: {time.time()-t_start:.2f}""") # Deduplicate hashes _lowerCAmelCase = set(ds.unique('''hash''')) _lowerCAmelCase = len(uniques) / len(ds) print(F"""Fraction of duplicates: {1-frac:.2%}""") # Deduplicate data and apply heuristics _lowerCAmelCase = time.time() _lowerCAmelCase = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args}) print(F"""Time to filter dataset: {time.time()-t_start:.2f}""") print(F"""Size of filtered dataset: {len(ds_filter)}""") # Deduplicate with minhash and jaccard similarity if args.near_deduplication: _lowerCAmelCase = time.time() _lowerCAmelCase , _lowerCAmelCase = deduplicate_dataset(ds_filter, args.jaccard_threshold) print(F"""Time to deduplicate dataset: {time.time()-t_start:.2f}""") print(F"""Size of deduplicate dataset: {len(ds_filter)}""") # Save data in batches of samples_per_file _lowerCAmelCase = Path(args.output_dir) output_dir.mkdir(exist_ok=True) # save duplicate_clusters in the output_dir as artifacts # not sure it is the right place the save it if args.near_deduplication: with open(output_dir / '''duplicate_clusters.json''', '''w''') as f: json.dump(duplicate_clusters, f) _lowerCAmelCase = output_dir / '''data''' data_dir.mkdir(exist_ok=True) _lowerCAmelCase = time.time() for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)): _lowerCAmelCase = str(data_dir / F"""file-{file_number+1:012}.json""") _lowerCAmelCase = min(len(ds_filter), index + args.samples_per_file) ds_filter.select(list(range(index, end_index))).to_json(file_path) compress_file(file_path) print(F"""Time to save dataset: {time.time()-t_start:.2f}""")
37
'''simple docstring''' import argparse import os.path as osp import re import torch from safetensors.torch import load_file, save_file # =================# # UNet Conversion # # =================# _lowerCAmelCase = [ # (stable-diffusion, HF Diffusers) ('''time_embed.0.weight''', '''time_embedding.linear_1.weight'''), ('''time_embed.0.bias''', '''time_embedding.linear_1.bias'''), ('''time_embed.2.weight''', '''time_embedding.linear_2.weight'''), ('''time_embed.2.bias''', '''time_embedding.linear_2.bias'''), ('''input_blocks.0.0.weight''', '''conv_in.weight'''), ('''input_blocks.0.0.bias''', '''conv_in.bias'''), ('''out.0.weight''', '''conv_norm_out.weight'''), ('''out.0.bias''', '''conv_norm_out.bias'''), ('''out.2.weight''', '''conv_out.weight'''), ('''out.2.bias''', '''conv_out.bias'''), ] _lowerCAmelCase = [ # (stable-diffusion, HF Diffusers) ('''in_layers.0''', '''norm1'''), ('''in_layers.2''', '''conv1'''), ('''out_layers.0''', '''norm2'''), ('''out_layers.3''', '''conv2'''), ('''emb_layers.1''', '''time_emb_proj'''), ('''skip_connection''', '''conv_shortcut'''), ] _lowerCAmelCase = [] # hardcoded number of downblocks and resnets/attentions... # would need smarter logic for other networks. for i in range(4): # loop over downblocks/upblocks for j in range(2): # loop over resnets/attentions for downblocks _lowerCAmelCase = F"""down_blocks.{i}.resnets.{j}.""" _lowerCAmelCase = F"""input_blocks.{3*i + j + 1}.0.""" unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix)) if i < 3: # no attention layers in down_blocks.3 _lowerCAmelCase = F"""down_blocks.{i}.attentions.{j}.""" _lowerCAmelCase = F"""input_blocks.{3*i + j + 1}.1.""" unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix)) for j in range(3): # loop over resnets/attentions for upblocks _lowerCAmelCase = F"""up_blocks.{i}.resnets.{j}.""" _lowerCAmelCase = F"""output_blocks.{3*i + j}.0.""" unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix)) if i > 0: # no attention layers in up_blocks.0 _lowerCAmelCase = F"""up_blocks.{i}.attentions.{j}.""" _lowerCAmelCase = F"""output_blocks.{3*i + j}.1.""" unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix)) if i < 3: # no downsample in down_blocks.3 _lowerCAmelCase = F"""down_blocks.{i}.downsamplers.0.conv.""" _lowerCAmelCase = F"""input_blocks.{3*(i+1)}.0.op.""" unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix)) # no upsample in up_blocks.3 _lowerCAmelCase = F"""up_blocks.{i}.upsamplers.0.""" _lowerCAmelCase = F"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}.""" unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix)) _lowerCAmelCase = '''mid_block.attentions.0.''' _lowerCAmelCase = '''middle_block.1.''' unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix)) for j in range(2): _lowerCAmelCase = F"""mid_block.resnets.{j}.""" _lowerCAmelCase = F"""middle_block.{2*j}.""" unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix)) def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Any = {k: k for k in unet_state_dict.keys()} for sd_name, hf_name in unet_conversion_map: lowerCAmelCase__ : Optional[int] = sd_name for k, v in mapping.items(): if "resnets" in k: for sd_part, hf_part in unet_conversion_map_resnet: lowerCAmelCase__ : Any = v.replace(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : List[Any] = v for k, v in mapping.items(): for sd_part, hf_part in unet_conversion_map_layer: lowerCAmelCase__ : List[Any] = v.replace(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = v lowerCAmelCase__ : Tuple = {v: unet_state_dict[k] for k, v in mapping.items()} return new_state_dict # ================# # VAE Conversion # # ================# _lowerCAmelCase = [ # (stable-diffusion, HF Diffusers) ('''nin_shortcut''', '''conv_shortcut'''), ('''norm_out''', '''conv_norm_out'''), ('''mid.attn_1.''', '''mid_block.attentions.0.'''), ] for i in range(4): # down_blocks have two resnets for j in range(2): _lowerCAmelCase = F"""encoder.down_blocks.{i}.resnets.{j}.""" _lowerCAmelCase = F"""encoder.down.{i}.block.{j}.""" vae_conversion_map.append((sd_down_prefix, hf_down_prefix)) if i < 3: _lowerCAmelCase = F"""down_blocks.{i}.downsamplers.0.""" _lowerCAmelCase = F"""down.{i}.downsample.""" vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix)) _lowerCAmelCase = F"""up_blocks.{i}.upsamplers.0.""" _lowerCAmelCase = F"""up.{3-i}.upsample.""" vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix)) # up_blocks have three resnets # also, up blocks in hf are numbered in reverse from sd for j in range(3): _lowerCAmelCase = F"""decoder.up_blocks.{i}.resnets.{j}.""" _lowerCAmelCase = F"""decoder.up.{3-i}.block.{j}.""" vae_conversion_map.append((sd_up_prefix, hf_up_prefix)) # this part accounts for mid blocks in both the encoder and the decoder for i in range(2): _lowerCAmelCase = F"""mid_block.resnets.{i}.""" _lowerCAmelCase = F"""mid.block_{i+1}.""" vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix)) _lowerCAmelCase = [ # (stable-diffusion, HF Diffusers) ('''norm.''', '''group_norm.'''), ('''q.''', '''query.'''), ('''k.''', '''key.'''), ('''v.''', '''value.'''), ('''proj_out.''', '''proj_attn.'''), ] def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" return w.reshape(*w.shape , 1 , 1 ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Optional[int] = {k: k for k in vae_state_dict.keys()} for k, v in mapping.items(): for sd_part, hf_part in vae_conversion_map: lowerCAmelCase__ : str = v.replace(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = v for k, v in mapping.items(): if "attentions" in k: for sd_part, hf_part in vae_conversion_map_attn: lowerCAmelCase__ : Dict = v.replace(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : List[Any] = v lowerCAmelCase__ : Union[str, Any] = {v: vae_state_dict[k] for k, v in mapping.items()} lowerCAmelCase__ : Tuple = ["""q""", """k""", """v""", """proj_out"""] for k, v in new_state_dict.items(): for weight_name in weights_to_convert: if f"""mid.attn_1.{weight_name}.weight""" in k: print(f"""Reshaping {k} for SD format""" ) lowerCAmelCase__ : Optional[int] = reshape_weight_for_sd(UpperCamelCase ) return new_state_dict # =========================# # Text Encoder Conversion # # =========================# _lowerCAmelCase = [ # (stable-diffusion, HF Diffusers) ('''resblocks.''', '''text_model.encoder.layers.'''), ('''ln_1''', '''layer_norm1'''), ('''ln_2''', '''layer_norm2'''), ('''.c_fc.''', '''.fc1.'''), ('''.c_proj.''', '''.fc2.'''), ('''.attn''', '''.self_attn'''), ('''ln_final.''', '''transformer.text_model.final_layer_norm.'''), ('''token_embedding.weight''', '''transformer.text_model.embeddings.token_embedding.weight'''), ('''positional_embedding''', '''transformer.text_model.embeddings.position_embedding.weight'''), ] _lowerCAmelCase = {re.escape(x[1]): x[0] for x in textenc_conversion_lst} _lowerCAmelCase = re.compile('''|'''.join(protected.keys())) # Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp _lowerCAmelCase = {'''q''': 0, '''k''': 1, '''v''': 2} def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Optional[Any] = {} lowerCAmelCase__ : int = {} lowerCAmelCase__ : List[Any] = {} for k, v in text_enc_dict.items(): if ( k.endswith(""".self_attn.q_proj.weight""" ) or k.endswith(""".self_attn.k_proj.weight""" ) or k.endswith(""".self_attn.v_proj.weight""" ) ): lowerCAmelCase__ : Optional[int] = k[: -len(""".q_proj.weight""" )] lowerCAmelCase__ : Tuple = k[-len("""q_proj.weight""" )] if k_pre not in capture_qkv_weight: lowerCAmelCase__ : List[Any] = [None, None, None] lowerCAmelCase__ : Dict = v continue if ( k.endswith(""".self_attn.q_proj.bias""" ) or k.endswith(""".self_attn.k_proj.bias""" ) or k.endswith(""".self_attn.v_proj.bias""" ) ): lowerCAmelCase__ : str = k[: -len(""".q_proj.bias""" )] lowerCAmelCase__ : List[str] = k[-len("""q_proj.bias""" )] if k_pre not in capture_qkv_bias: lowerCAmelCase__ : Union[str, Any] = [None, None, None] lowerCAmelCase__ : Any = v continue lowerCAmelCase__ : Dict = textenc_pattern.sub(lambda UpperCamelCase : protected[re.escape(m.group(0 ) )] , UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = v for k_pre, tensors in capture_qkv_weight.items(): if None in tensors: raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" ) lowerCAmelCase__ : Any = textenc_pattern.sub(lambda UpperCamelCase : protected[re.escape(m.group(0 ) )] , UpperCamelCase ) lowerCAmelCase__ : Tuple = torch.cat(UpperCamelCase ) for k_pre, tensors in capture_qkv_bias.items(): if None in tensors: raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" ) lowerCAmelCase__ : str = textenc_pattern.sub(lambda UpperCamelCase : protected[re.escape(m.group(0 ) )] , UpperCamelCase ) lowerCAmelCase__ : List[Any] = torch.cat(UpperCamelCase ) return new_state_dict def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" return text_enc_dict if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''') parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''') parser.add_argument( '''--use_safetensors''', action='''store_true''', help='''Save weights use safetensors, default is ckpt.''' ) _lowerCAmelCase = parser.parse_args() assert args.model_path is not None, "Must provide a model path!" assert args.checkpoint_path is not None, "Must provide a checkpoint path!" # Path for safetensors _lowerCAmelCase = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.safetensors''') _lowerCAmelCase = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.safetensors''') _lowerCAmelCase = osp.join(args.model_path, '''text_encoder''', '''model.safetensors''') # Load models from safetensors if it exists, if it doesn't pytorch if osp.exists(unet_path): _lowerCAmelCase = load_file(unet_path, device='''cpu''') else: _lowerCAmelCase = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.bin''') _lowerCAmelCase = torch.load(unet_path, map_location='''cpu''') if osp.exists(vae_path): _lowerCAmelCase = load_file(vae_path, device='''cpu''') else: _lowerCAmelCase = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.bin''') _lowerCAmelCase = torch.load(vae_path, map_location='''cpu''') if osp.exists(text_enc_path): _lowerCAmelCase = load_file(text_enc_path, device='''cpu''') else: _lowerCAmelCase = osp.join(args.model_path, '''text_encoder''', '''pytorch_model.bin''') _lowerCAmelCase = torch.load(text_enc_path, map_location='''cpu''') # Convert the UNet model _lowerCAmelCase = convert_unet_state_dict(unet_state_dict) _lowerCAmelCase = {'''model.diffusion_model.''' + k: v for k, v in unet_state_dict.items()} # Convert the VAE model _lowerCAmelCase = convert_vae_state_dict(vae_state_dict) _lowerCAmelCase = {'''first_stage_model.''' + k: v for k, v in vae_state_dict.items()} # Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper _lowerCAmelCase = '''text_model.encoder.layers.22.layer_norm2.bias''' in text_enc_dict if is_vaa_model: # Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm _lowerCAmelCase = {'''transformer.''' + k: v for k, v in text_enc_dict.items()} _lowerCAmelCase = convert_text_enc_state_dict_vaa(text_enc_dict) _lowerCAmelCase = {'''cond_stage_model.model.''' + k: v for k, v in text_enc_dict.items()} else: _lowerCAmelCase = convert_text_enc_state_dict(text_enc_dict) _lowerCAmelCase = {'''cond_stage_model.transformer.''' + k: v for k, v in text_enc_dict.items()} # Put together new checkpoint _lowerCAmelCase = {**unet_state_dict, **vae_state_dict, **text_enc_dict} if args.half: _lowerCAmelCase = {k: v.half() for k, v in state_dict.items()} if args.use_safetensors: save_file(state_dict, args.checkpoint_path) else: _lowerCAmelCase = {'''state_dict''': state_dict} torch.save(state_dict, args.checkpoint_path)
37
1
'''simple docstring''' import numpy as np from nltk.translate import meteor_score import datasets from datasets.config import importlib_metadata, version _lowerCAmelCase = version.parse(importlib_metadata.version('''nltk''')) if NLTK_VERSION >= version.Version('''3.6.4'''): from nltk import word_tokenize _lowerCAmelCase = '''\ @inproceedings{banarjee2005, title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments}, author = {Banerjee, Satanjeev and Lavie, Alon}, booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization}, month = jun, year = {2005}, address = {Ann Arbor, Michigan}, publisher = {Association for Computational Linguistics}, url = {https://www.aclweb.org/anthology/W05-0909}, pages = {65--72}, } ''' _lowerCAmelCase = '''\ METEOR, an automatic metric for machine translation evaluation that is based on a generalized concept of unigram matching between the machine-produced translation and human-produced reference translations. Unigrams can be matched based on their surface forms, stemmed forms, and meanings; furthermore, METEOR can be easily extended to include more advanced matching strategies. Once all generalized unigram matches between the two strings have been found, METEOR computes a score for this matching using a combination of unigram-precision, unigram-recall, and a measure of fragmentation that is designed to directly capture how well-ordered the matched words in the machine translation are in relation to the reference. METEOR gets an R correlation value of 0.347 with human evaluation on the Arabic data and 0.331 on the Chinese data. This is shown to be an improvement on using simply unigram-precision, unigram-recall and their harmonic F1 combination. ''' _lowerCAmelCase = ''' Computes METEOR score of translated segments against one or more references. Args: predictions: list of predictions to score. Each prediction should be a string with tokens separated by spaces. references: list of reference for each prediction. Each reference should be a string with tokens separated by spaces. alpha: Parameter for controlling relative weights of precision and recall. default: 0.9 beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3 gamma: Relative weight assigned to fragmentation penalty. default: 0.5 Returns: \'meteor\': meteor score. Examples: >>> meteor = datasets.load_metric(\'meteor\') >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"] >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"] >>> results = meteor.compute(predictions=predictions, references=references) >>> print(round(results["meteor"], 4)) 0.6944 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_( datasets.Metric ): '''simple docstring''' def UpperCAmelCase_ ( self ) -> Dict: return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { """predictions""": datasets.Value("""string""" ,id="""sequence""" ), """references""": datasets.Value("""string""" ,id="""sequence""" ), } ) ,codebase_urls=["""https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"""] ,reference_urls=[ """https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score""", """https://en.wikipedia.org/wiki/METEOR""", ] ,) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[Any]: import nltk nltk.download("""wordnet""" ) if NLTK_VERSION >= version.Version("""3.6.5""" ): nltk.download("""punkt""" ) if NLTK_VERSION >= version.Version("""3.6.6""" ): nltk.download("""omw-1.4""" ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=0.9 ,__UpperCAmelCase=3 ,__UpperCAmelCase=0.5 ) -> List[Any]: if NLTK_VERSION >= version.Version("""3.6.5""" ): lowerCAmelCase__ : Optional[int] = [ meteor_score.single_meteor_score( word_tokenize(__UpperCAmelCase ) ,word_tokenize(__UpperCAmelCase ) ,alpha=__UpperCAmelCase ,beta=__UpperCAmelCase ,gamma=__UpperCAmelCase ) for ref, pred in zip(__UpperCAmelCase ,__UpperCAmelCase ) ] else: lowerCAmelCase__ : Dict = [ meteor_score.single_meteor_score(__UpperCAmelCase ,__UpperCAmelCase ,alpha=__UpperCAmelCase ,beta=__UpperCAmelCase ,gamma=__UpperCAmelCase ) for ref, pred in zip(__UpperCAmelCase ,__UpperCAmelCase ) ] return {"meteor": np.mean(__UpperCAmelCase )}
37
'''simple docstring''' import os from bleurt import score # From: git+https://github.com/google-research/bleurt.git import datasets _lowerCAmelCase = datasets.logging.get_logger(__name__) _lowerCAmelCase = '''\ @inproceedings{bleurt, title={BLEURT: Learning Robust Metrics for Text Generation}, author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh}, booktitle={ACL}, year={2020}, url={https://arxiv.org/abs/2004.04696} } ''' _lowerCAmelCase = '''\ BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018) and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune it for your specific application (the latter is expected to perform better). See the project\'s README at https://github.com/google-research/bleurt#readme for more information. ''' _lowerCAmelCase = ''' BLEURT score. Args: `predictions` (list of str): prediction/candidate sentences `references` (list of str): reference sentences `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None. Returns: \'scores\': List of scores. Examples: >>> predictions = ["hello there", "general kenobi"] >>> references = ["hello there", "general kenobi"] >>> bleurt = datasets.load_metric("bleurt") >>> results = bleurt.compute(predictions=predictions, references=references) >>> print([round(v, 2) for v in results["scores"]]) [1.03, 1.04] ''' _lowerCAmelCase = { '''bleurt-tiny-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip''', '''bleurt-tiny-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip''', '''bleurt-base-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip''', '''bleurt-base-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip''', '''bleurt-large-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip''', '''bleurt-large-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip''', '''BLEURT-20-D3''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip''', '''BLEURT-20-D6''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip''', '''BLEURT-20-D12''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip''', '''BLEURT-20''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip''', } @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_( datasets.Metric ): '''simple docstring''' def UpperCAmelCase_ ( self ) -> Union[str, Any]: return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,homepage="""https://github.com/google-research/bleurt""" ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { """predictions""": datasets.Value("""string""" ,id="""sequence""" ), """references""": datasets.Value("""string""" ,id="""sequence""" ), } ) ,codebase_urls=["""https://github.com/google-research/bleurt"""] ,reference_urls=["""https://github.com/google-research/bleurt""", """https://arxiv.org/abs/2004.04696"""] ,) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Tuple: # check that config name specifies a valid BLEURT model if self.config_name == "default": logger.warning( """Using default BLEURT-Base checkpoint for sequence maximum length 128. """ """You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512').""" ) lowerCAmelCase__ : str = """bleurt-base-128""" if self.config_name.lower() in CHECKPOINT_URLS: lowerCAmelCase__ : Union[str, Any] = self.config_name.lower() elif self.config_name.upper() in CHECKPOINT_URLS: lowerCAmelCase__ : List[Any] = self.config_name.upper() else: raise KeyError( F"""{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}""" ) # download the model checkpoint specified by self.config_name and set up the scorer lowerCAmelCase__ : int = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] ) lowerCAmelCase__ : Dict = score.BleurtScorer(os.path.join(__UpperCAmelCase ,__UpperCAmelCase ) ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Union[str, Any]: lowerCAmelCase__ : Union[str, Any] = self.scorer.score(references=__UpperCAmelCase ,candidates=__UpperCAmelCase ) return {"scores": scores}
37
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_imagegpt import ImageGPTImageProcessor _lowerCAmelCase = logging.get_logger(__name__) class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> None: warnings.warn( """The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use ImageGPTImageProcessor instead.""" ,__UpperCAmelCase ,) super().__init__(*__UpperCAmelCase ,**__UpperCAmelCase )
37
'''simple docstring''' import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch _lowerCAmelCase = logging.get_logger(__name__) class lowerCAmelCase_: '''simple docstring''' def __init__( self ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ) -> str: if not conversation_id: lowerCAmelCase__ : List[str] = uuid.uuida() if past_user_inputs is None: lowerCAmelCase__ : List[Any] = [] if generated_responses is None: lowerCAmelCase__ : str = [] lowerCAmelCase__ : uuid.UUID = conversation_id lowerCAmelCase__ : List[str] = past_user_inputs lowerCAmelCase__ : List[str] = generated_responses lowerCAmelCase__ : Optional[str] = text def __eq__( self ,__UpperCAmelCase ) -> Dict: if not isinstance(__UpperCAmelCase ,__UpperCAmelCase ): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = False ) -> Optional[Any]: if self.new_user_input: if overwrite: logger.warning( F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """ F"""with: \"{text}\".""" ) lowerCAmelCase__ : Optional[int] = text else: logger.warning( F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """ F"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" ) else: lowerCAmelCase__ : Optional[Any] = text def UpperCAmelCase_ ( self ) -> List[Any]: if self.new_user_input: self.past_user_inputs.append(self.new_user_input ) lowerCAmelCase__ : Union[str, Any] = None def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Tuple: self.generated_responses.append(__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> List[str]: for user_input, generated_response in zip(self.past_user_inputs ,self.generated_responses ): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__( self ) -> Tuple: lowerCAmelCase__ : Tuple = F"""Conversation id: {self.uuid} \n""" for is_user, text in self.iter_texts(): lowerCAmelCase__ : Any = """user""" if is_user else """bot""" output += F"""{name} >> {text} \n""" return output @add_end_docstrings( SCREAMING_SNAKE_CASE_ , R''' min_length_for_response (`int`, *optional*, defaults to 32): The minimum length (in number of tokens) for a response. minimum_tokens (`int`, *optional*, defaults to 10): The minimum length of tokens to leave for a response. ''' , ) class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple: super().__init__(*__UpperCAmelCase ,**__UpperCAmelCase ) if self.tokenizer.pad_token_id is None: lowerCAmelCase__ : Tuple = self.tokenizer.eos_token def UpperCAmelCase_ ( self ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,**__UpperCAmelCase ) -> Optional[int]: lowerCAmelCase__ : List[Any] = {} lowerCAmelCase__ : Optional[int] = {} lowerCAmelCase__ : List[str] = {} if min_length_for_response is not None: lowerCAmelCase__ : Any = min_length_for_response if minimum_tokens is not None: lowerCAmelCase__ : Optional[int] = minimum_tokens if "max_length" in generate_kwargs: lowerCAmelCase__ : Optional[Any] = generate_kwargs["""max_length"""] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: lowerCAmelCase__ : int = clean_up_tokenization_spaces if generate_kwargs: forward_params.update(__UpperCAmelCase ) return preprocess_params, forward_params, postprocess_params def __call__( self ,__UpperCAmelCase ,__UpperCAmelCase=0 ,**__UpperCAmelCase ) -> List[str]: lowerCAmelCase__ : Optional[int] = super().__call__(__UpperCAmelCase ,num_workers=__UpperCAmelCase ,**__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) and len(__UpperCAmelCase ) == 1: return outputs[0] return outputs def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=32 ) -> Dict[str, Any]: if not isinstance(__UpperCAmelCase ,__UpperCAmelCase ): raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" ) if conversation.new_user_input is None: raise ValueError( F"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """ """Add user inputs with the conversation's `add_user_input` method""" ) if hasattr(self.tokenizer ,"""_build_conversation_input_ids""" ): lowerCAmelCase__ : str = self.tokenizer._build_conversation_input_ids(__UpperCAmelCase ) else: # If the tokenizer cannot handle conversations, we default to only the old version lowerCAmelCase__ : List[Any] = self._legacy_parse_and_tokenize(__UpperCAmelCase ) if self.framework == "pt": lowerCAmelCase__ : List[Any] = torch.LongTensor([input_ids] ) elif self.framework == "tf": lowerCAmelCase__ : Dict = tf.constant([input_ids] ) return {"input_ids": input_ids, "conversation": conversation} def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=10 ,**__UpperCAmelCase ) -> Dict: lowerCAmelCase__ : Optional[Any] = generate_kwargs.get("""max_length""" ,self.model.config.max_length ) lowerCAmelCase__ : Optional[Any] = model_inputs["""input_ids"""].shape[1] if max_length - minimum_tokens < n: logger.warning(F"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" ) lowerCAmelCase__ : str = max_length - minimum_tokens lowerCAmelCase__ : Union[str, Any] = model_inputs["""input_ids"""][:, -trim:] if "attention_mask" in model_inputs: lowerCAmelCase__ : Tuple = model_inputs["""attention_mask"""][:, -trim:] lowerCAmelCase__ : str = model_inputs.pop("""conversation""" ) lowerCAmelCase__ : Union[str, Any] = max_length lowerCAmelCase__ : Any = self.model.generate(**__UpperCAmelCase ,**__UpperCAmelCase ) if self.model.config.is_encoder_decoder: lowerCAmelCase__ : int = 1 else: lowerCAmelCase__ : int = n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=True ) -> List[str]: lowerCAmelCase__ : Optional[int] = model_outputs["""output_ids"""] lowerCAmelCase__ : Tuple = self.tokenizer.decode( output_ids[0] ,skip_special_tokens=__UpperCAmelCase ,clean_up_tokenization_spaces=__UpperCAmelCase ,) lowerCAmelCase__ : Union[str, Any] = model_outputs["""conversation"""] conversation.mark_processed() conversation.append_response(__UpperCAmelCase ) return conversation def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Dict: lowerCAmelCase__ : Dict = self.tokenizer.eos_token_id lowerCAmelCase__ : int = [] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ) + [eos_token_id] ) else: input_ids.extend(self.tokenizer.encode(__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ) ) if len(__UpperCAmelCase ) > self.tokenizer.model_max_length: lowerCAmelCase__ : Optional[Any] = input_ids[-self.tokenizer.model_max_length :] return input_ids
37
1
'''simple docstring''' from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_torch_available from ...utils import OptionalDependencyNotAvailable _lowerCAmelCase = { '''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''], '''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = [ '''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GPTNeoXJapaneseForCausalLM''', '''GPTNeoXJapaneseLayer''', '''GPTNeoXJapaneseModel''', '''GPTNeoXJapanesePreTrainedModel''', ] if TYPE_CHECKING: from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox_japanese import ( GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseLayer, GPTNeoXJapaneseModel, GPTNeoXJapanesePreTrainedModel, ) else: import sys _lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
37
'''simple docstring''' from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax _lowerCAmelCase = logging.get_logger(__name__) @add_end_docstrings(SCREAMING_SNAKE_CASE_ ) class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' def __init__( self ,**__UpperCAmelCase ) -> Tuple: super().__init__(**__UpperCAmelCase ) requires_backends(self ,"""vision""" ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == """tf""" else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self ,__UpperCAmelCase ,**__UpperCAmelCase ) -> str: return super().__call__(__UpperCAmelCase ,**__UpperCAmelCase ) def UpperCAmelCase_ ( self ,**__UpperCAmelCase ) -> str: lowerCAmelCase__ : List[Any] = {} if "candidate_labels" in kwargs: lowerCAmelCase__ : int = kwargs["""candidate_labels"""] if "hypothesis_template" in kwargs: lowerCAmelCase__ : Optional[int] = kwargs["""hypothesis_template"""] return preprocess_params, {}, {} def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=None ,__UpperCAmelCase="This is a photo of {}." ) -> int: lowerCAmelCase__ : str = load_image(__UpperCAmelCase ) lowerCAmelCase__ : Dict = self.image_processor(images=[image] ,return_tensors=self.framework ) lowerCAmelCase__ : List[Any] = candidate_labels lowerCAmelCase__ : List[str] = [hypothesis_template.format(__UpperCAmelCase ) for x in candidate_labels] lowerCAmelCase__ : Optional[Any] = self.tokenizer(__UpperCAmelCase ,return_tensors=self.framework ,padding=__UpperCAmelCase ) lowerCAmelCase__ : Tuple = [text_inputs] return inputs def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Union[str, Any]: lowerCAmelCase__ : Tuple = model_inputs.pop("""candidate_labels""" ) lowerCAmelCase__ : Union[str, Any] = model_inputs.pop("""text_inputs""" ) if isinstance(text_inputs[0] ,__UpperCAmelCase ): lowerCAmelCase__ : int = text_inputs[0] else: # Batching case. lowerCAmelCase__ : Dict = text_inputs[0][0] lowerCAmelCase__ : Any = self.model(**__UpperCAmelCase ,**__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = { """candidate_labels""": candidate_labels, """logits""": outputs.logits_per_image, } return model_outputs def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Any: lowerCAmelCase__ : Union[str, Any] = model_outputs.pop("""candidate_labels""" ) lowerCAmelCase__ : List[str] = model_outputs["""logits"""][0] if self.framework == "pt": lowerCAmelCase__ : List[str] = logits.softmax(dim=-1 ).squeeze(-1 ) lowerCAmelCase__ : Optional[Any] = probs.tolist() if not isinstance(__UpperCAmelCase ,__UpperCAmelCase ): lowerCAmelCase__ : Dict = [scores] elif self.framework == "tf": lowerCAmelCase__ : Any = stable_softmax(__UpperCAmelCase ,axis=-1 ) lowerCAmelCase__ : List[Any] = probs.numpy().tolist() else: raise ValueError(F"""Unsupported framework: {self.framework}""" ) lowerCAmelCase__ : Tuple = [ {"""score""": score, """label""": candidate_label} for score, candidate_label in sorted(zip(__UpperCAmelCase ,__UpperCAmelCase ) ,key=lambda __UpperCAmelCase : -x[0] ) ] return result
37
1
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( UpperCamelCase = 4000000 ): """simple docstring""" lowerCAmelCase__ : Optional[int] = [] lowerCAmelCase__ , lowerCAmelCase__ : Tuple = 0, 1 while b <= n: if b % 2 == 0: even_fibs.append(UpperCamelCase ) lowerCAmelCase__ , lowerCAmelCase__ : Dict = b, a + b return sum(UpperCamelCase ) if __name__ == "__main__": print(F"""{solution() = }""")
37
'''simple docstring''' import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , ) @pytest.mark.usefixtures('''sm_env''' ) @parameterized_class( [ { '''framework''': '''pytorch''', '''script''': '''run_glue.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.p3.16xlarge''', '''results''': {'''train_runtime''': 6_5_0, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6}, }, { '''framework''': '''pytorch''', '''script''': '''run_ddp.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.p3.16xlarge''', '''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6}, }, { '''framework''': '''tensorflow''', '''script''': '''run_tf_dist.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.p3.16xlarge''', '''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.6, '''eval_loss''': 0.7}, }, ] ) class lowerCAmelCase_( unittest.TestCase ): '''simple docstring''' def UpperCAmelCase_ ( self ) -> Optional[Any]: if self.framework == "pytorch": subprocess.run( F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() ,encoding="""utf-8""" ,check=__UpperCAmelCase ,) assert hasattr(self ,"""env""" ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]: lowerCAmelCase__ : Optional[int] = F"""{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}""" # distributed data settings lowerCAmelCase__ : Any = {"""smdistributed""": {"""dataparallel""": {"""enabled""": True}}} if self.script != """run_ddp.py""" else None # creates estimator return HuggingFace( entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=__UpperCAmelCase ,instance_count=__UpperCAmelCase ,instance_type=self.instance_type ,debugger_hook_config=__UpperCAmelCase ,hyperparameters={**self.env.distributed_hyperparameters, """model_name_or_path""": self.model_name_or_path} ,metric_definitions=self.env.metric_definitions ,distribution=__UpperCAmelCase ,py_version="""py36""" ,) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]: TrainingJobAnalytics(__UpperCAmelCase ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" ) @parameterized.expand([(2,)] ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Any: # create estimator lowerCAmelCase__ : List[Any] = self.create_estimator(__UpperCAmelCase ) # run training estimator.fit() # result dataframe lowerCAmelCase__ : Any = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis lowerCAmelCase__ : int = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] ) lowerCAmelCase__ : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping lowerCAmelCase__ : List[str] = ( Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" ,99_9999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy ) assert all(t <= self.results["""eval_loss"""] for t in eval_loss ) # dump tests result into json file to share in PR with open(F"""{estimator.latest_training_job.name}.json""" ,"""w""" ) as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} ,__UpperCAmelCase )
37
1
'''simple docstring''' import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Any: with open(__UpperCAmelCase ,encoding="""utf-8""" ) as input_file: lowerCAmelCase__ : List[str] = re.compile(R"""(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)""" ) lowerCAmelCase__ : Any = input_file.read() lowerCAmelCase__ : Any = regexp.search(__UpperCAmelCase ) return match def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[int]: with open(__UpperCAmelCase ,encoding="""utf-8""" ) as input_file: lowerCAmelCase__ : str = re.compile(R"""#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()""" ,re.DOTALL ) lowerCAmelCase__ : Optional[int] = input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` lowerCAmelCase__ : Any = regexp.finditer(__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = [match for match in matches if match is not None and match.group(1 ) is not None] return matches[0] if matches else None def UpperCAmelCase_ ( self ) -> List[Any]: lowerCAmelCase__ : str = Path("""./datasets""" ) lowerCAmelCase__ : Dict = list(dataset_paths.absolute().glob("""**/*.py""" ) ) for dataset in dataset_files: if self._no_encoding_on_file_open(str(__UpperCAmelCase ) ): raise AssertionError(F"""open(...) must use utf-8 encoding in {dataset}""" ) def UpperCAmelCase_ ( self ) -> Tuple: lowerCAmelCase__ : Union[str, Any] = Path("""./datasets""" ) lowerCAmelCase__ : Optional[int] = list(dataset_paths.absolute().glob("""**/*.py""" ) ) for dataset in dataset_files: if self._no_print_statements(str(__UpperCAmelCase ) ): raise AssertionError(F"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
37
'''simple docstring''' import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = {'''vocab_file''': '''spiece.model'''} _lowerCAmelCase = { '''vocab_file''': { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''', } } _lowerCAmelCase = { '''albert-base-v1''': 512, '''albert-large-v1''': 512, '''albert-xlarge-v1''': 512, '''albert-xxlarge-v1''': 512, '''albert-base-v2''': 512, '''albert-large-v2''': 512, '''albert-xlarge-v2''': 512, '''albert-xxlarge-v2''': 512, } _lowerCAmelCase = '''▁''' class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Any = VOCAB_FILES_NAMES __lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP __lowercase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=False ,__UpperCAmelCase="[CLS]" ,__UpperCAmelCase="[SEP]" ,__UpperCAmelCase="<unk>" ,__UpperCAmelCase="[SEP]" ,__UpperCAmelCase="<pad>" ,__UpperCAmelCase="[CLS]" ,__UpperCAmelCase="[MASK]" ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> None: # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. lowerCAmelCase__ : Tuple = ( AddedToken(__UpperCAmelCase ,lstrip=__UpperCAmelCase ,rstrip=__UpperCAmelCase ,normalized=__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else mask_token ) lowerCAmelCase__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=__UpperCAmelCase ,remove_space=__UpperCAmelCase ,keep_accents=__UpperCAmelCase ,bos_token=__UpperCAmelCase ,eos_token=__UpperCAmelCase ,unk_token=__UpperCAmelCase ,sep_token=__UpperCAmelCase ,pad_token=__UpperCAmelCase ,cls_token=__UpperCAmelCase ,mask_token=__UpperCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**__UpperCAmelCase ,) lowerCAmelCase__ : str = do_lower_case lowerCAmelCase__ : int = remove_space lowerCAmelCase__ : Tuple = keep_accents lowerCAmelCase__ : Any = vocab_file lowerCAmelCase__ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__UpperCAmelCase ) @property def UpperCAmelCase_ ( self ) -> Optional[int]: return len(self.sp_model ) def UpperCAmelCase_ ( self ) -> Optional[Any]: lowerCAmelCase__ : Union[str, Any] = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> Any: lowerCAmelCase__ : Optional[Any] = self.__dict__.copy() lowerCAmelCase__ : Optional[Any] = None return state def __setstate__( self ,__UpperCAmelCase ) -> List[Any]: lowerCAmelCase__ : List[str] = d # for backward compatibility if not hasattr(self ,"""sp_model_kwargs""" ): lowerCAmelCase__ : Union[str, Any] = {} lowerCAmelCase__ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[int]: if self.remove_space: lowerCAmelCase__ : int = """ """.join(inputs.strip().split() ) else: lowerCAmelCase__ : str = inputs lowerCAmelCase__ : Tuple = outputs.replace("""``""" ,"""\"""" ).replace("""''""" ,"""\"""" ) if not self.keep_accents: lowerCAmelCase__ : Any = unicodedata.normalize("""NFKD""" ,__UpperCAmelCase ) lowerCAmelCase__ : Dict = """""".join([c for c in outputs if not unicodedata.combining(__UpperCAmelCase )] ) if self.do_lower_case: lowerCAmelCase__ : Tuple = outputs.lower() return outputs def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[str]: lowerCAmelCase__ : List[str] = self.preprocess_text(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = self.sp_model.encode(__UpperCAmelCase ,out_type=__UpperCAmelCase ) lowerCAmelCase__ : str = [] for piece in pieces: if len(__UpperCAmelCase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit(): lowerCAmelCase__ : List[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(__UpperCAmelCase ,"""""" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: lowerCAmelCase__ : str = cur_pieces[1:] else: lowerCAmelCase__ : int = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(__UpperCAmelCase ) else: new_pieces.append(__UpperCAmelCase ) return new_pieces def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[str]: return self.sp_model.PieceToId(__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Any: return self.sp_model.IdToPiece(__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str: lowerCAmelCase__ : str = [] lowerCAmelCase__ : Tuple = """""" lowerCAmelCase__ : Tuple = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__UpperCAmelCase ) + token lowerCAmelCase__ : Union[str, Any] = True lowerCAmelCase__ : List[str] = [] else: current_sub_tokens.append(__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = False out_string += self.sp_model.decode(__UpperCAmelCase ) return out_string.strip() def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> List[int]: lowerCAmelCase__ : int = [self.sep_token_id] lowerCAmelCase__ : Dict = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCAmelCase ,token_ids_a=__UpperCAmelCase ,already_has_special_tokens=__UpperCAmelCase ) if token_ids_a is not None: return [1] + ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1] return [1] + ([0] * len(__UpperCAmelCase )) + [1] def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> List[int]: lowerCAmelCase__ : List[str] = [self.sep_token_id] lowerCAmelCase__ : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(__UpperCAmelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase__ : int = os.path.join( __UpperCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,__UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__UpperCAmelCase ,"""wb""" ) as fi: lowerCAmelCase__ : List[Any] = self.sp_model.serialized_model_proto() fi.write(__UpperCAmelCase ) return (out_vocab_file,)
37
1
'''simple docstring''' import numpy as np from matplotlib import pyplot as plt from sklearn.datasets import load_iris from sklearn.metrics import ConfusionMatrixDisplay from sklearn.model_selection import train_test_split from xgboost import XGBClassifier def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" return (data["data"], data["target"]) def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Tuple = XGBClassifier() classifier.fit(UpperCamelCase , UpperCamelCase ) return classifier def _SCREAMING_SNAKE_CASE ( ): """simple docstring""" lowerCAmelCase__ : str = load_iris() lowerCAmelCase__ , lowerCAmelCase__ : Tuple = data_handling(UpperCamelCase ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = train_test_split( UpperCamelCase , UpperCamelCase , test_size=0.25 ) lowerCAmelCase__ : int = iris["""target_names"""] # Create an XGBoost Classifier from the training data lowerCAmelCase__ : List[Any] = xgboost(UpperCamelCase , UpperCamelCase ) # Display the confusion matrix of the classifier with both training and test sets ConfusionMatrixDisplay.from_estimator( UpperCamelCase , UpperCamelCase , UpperCamelCase , display_labels=UpperCamelCase , cmap="""Blues""" , normalize="""true""" , ) plt.title("""Normalized Confusion Matrix - IRIS Dataset""" ) plt.show() if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
37
'''simple docstring''' import json import os from typing import Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', } _lowerCAmelCase = { '''vocab_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'''}, '''merges_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'''}, } _lowerCAmelCase = { '''ctrl''': 256, } _lowerCAmelCase = { '''Pregnancy''': 16_8629, '''Christianity''': 7675, '''Explain''': 10_6423, '''Fitness''': 6_3440, '''Saving''': 6_3163, '''Ask''': 2_7171, '''Ass''': 9_5985, '''Joke''': 16_3509, '''Questions''': 4_5622, '''Thoughts''': 4_9605, '''Retail''': 5_2342, '''Feminism''': 16_4338, '''Writing''': 1_1992, '''Atheism''': 19_2263, '''Netflix''': 4_8616, '''Computing''': 3_9639, '''Opinion''': 4_3213, '''Alone''': 4_4967, '''Funny''': 5_8917, '''Gaming''': 4_0358, '''Human''': 4088, '''India''': 1331, '''Joker''': 7_7138, '''Diet''': 3_6206, '''Legal''': 1_1859, '''Norman''': 4939, '''Tip''': 7_2689, '''Weight''': 5_2343, '''Movies''': 4_6273, '''Running''': 2_3425, '''Science''': 2090, '''Horror''': 3_7793, '''Confession''': 6_0572, '''Finance''': 1_2250, '''Politics''': 1_6360, '''Scary''': 19_1985, '''Support''': 1_2654, '''Technologies''': 3_2516, '''Teenage''': 6_6160, '''Event''': 3_2769, '''Learned''': 6_7460, '''Notion''': 18_2770, '''Wikipedia''': 3_7583, '''Books''': 6665, '''Extract''': 7_6050, '''Confessions''': 10_2701, '''Conspiracy''': 7_5932, '''Links''': 6_3674, '''Narcissus''': 15_0425, '''Relationship''': 5_4766, '''Relationships''': 13_4796, '''Reviews''': 4_1671, '''News''': 4256, '''Translation''': 2_6820, '''multilingual''': 12_8406, } def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Optional[int] = set() lowerCAmelCase__ : str = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCAmelCase__ : List[Any] = char lowerCAmelCase__ : Optional[Any] = set(UpperCamelCase ) return pairs class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Dict = VOCAB_FILES_NAMES __lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP __lowercase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowercase : Any = CONTROL_CODES def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase="<unk>" ,**__UpperCAmelCase ) -> Optional[int]: super().__init__(unk_token=__UpperCAmelCase ,**__UpperCAmelCase ) with open(__UpperCAmelCase ,encoding="""utf-8""" ) as vocab_handle: lowerCAmelCase__ : int = json.load(__UpperCAmelCase ) lowerCAmelCase__ : Any = {v: k for k, v in self.encoder.items()} with open(__UpperCAmelCase ,encoding="""utf-8""" ) as merges_handle: lowerCAmelCase__ : Union[str, Any] = merges_handle.read().split("""\n""" )[1:-1] lowerCAmelCase__ : Optional[Any] = [tuple(merge.split() ) for merge in merges] lowerCAmelCase__ : int = dict(zip(__UpperCAmelCase ,range(len(__UpperCAmelCase ) ) ) ) lowerCAmelCase__ : List[Any] = {} @property def UpperCAmelCase_ ( self ) -> Optional[Any]: return len(self.encoder ) def UpperCAmelCase_ ( self ) -> Optional[Any]: return dict(self.encoder ,**self.added_tokens_encoder ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> int: if token in self.cache: return self.cache[token] lowerCAmelCase__ : Optional[Any] = tuple(__UpperCAmelCase ) lowerCAmelCase__ : List[str] = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] ) lowerCAmelCase__ : Any = get_pairs(__UpperCAmelCase ) if not pairs: return token while True: lowerCAmelCase__ : List[str] = min(__UpperCAmelCase ,key=lambda __UpperCAmelCase : self.bpe_ranks.get(__UpperCAmelCase ,float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = bigram lowerCAmelCase__ : List[str] = [] lowerCAmelCase__ : Dict = 0 while i < len(__UpperCAmelCase ): try: lowerCAmelCase__ : Optional[Any] = word.index(__UpperCAmelCase ,__UpperCAmelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCAmelCase__ : Dict = j if word[i] == first and i < len(__UpperCAmelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCAmelCase__ : Tuple = tuple(__UpperCAmelCase ) lowerCAmelCase__ : Tuple = new_word if len(__UpperCAmelCase ) == 1: break else: lowerCAmelCase__ : Dict = get_pairs(__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = """@@ """.join(__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = word[:-4] lowerCAmelCase__ : Optional[Any] = word return word def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Dict: lowerCAmelCase__ : Optional[int] = [] lowerCAmelCase__ : Any = re.findall(R"""\S+\n?""" ,__UpperCAmelCase ) for token in words: split_tokens.extend(list(self.bpe(__UpperCAmelCase ).split(""" """ ) ) ) return split_tokens def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]: return self.encoder.get(__UpperCAmelCase ,self.encoder.get(self.unk_token ) ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Tuple: return self.decoder.get(__UpperCAmelCase ,self.unk_token ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Tuple: lowerCAmelCase__ : Any = """ """.join(__UpperCAmelCase ).replace("""@@ """ ,"""""" ).strip() return out_string def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(__UpperCAmelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase__ : List[Any] = os.path.join( __UpperCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) lowerCAmelCase__ : Optional[int] = os.path.join( __UpperCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(__UpperCAmelCase ,"""w""" ,encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=__UpperCAmelCase ,ensure_ascii=__UpperCAmelCase ) + """\n""" ) lowerCAmelCase__ : int = 0 with open(__UpperCAmelCase ,"""w""" ,encoding="""utf-8""" ) as writer: writer.write("""#version: 0.2\n""" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda __UpperCAmelCase : kv[1] ): if index != token_index: logger.warning( F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" """ Please check that the tokenizer is not corrupted!""" ) lowerCAmelCase__ : Dict = token_index writer.write(""" """.join(__UpperCAmelCase ) + """\n""" ) index += 1 return vocab_file, merge_file # def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True): # filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)) # tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens) # tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far) # return ''.join(tokens_generated_so_far)
37
1
'''simple docstring''' import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase=0.999 , UpperCamelCase="cosine" , ): """simple docstring""" if alpha_transform_type == "cosine": def alpha_bar_fn(UpperCamelCase ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(UpperCamelCase ): return math.exp(t * -12.0 ) else: raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" ) lowerCAmelCase__ : Optional[Any] = [] for i in range(UpperCamelCase ): lowerCAmelCase__ : Union[str, Any] = i / num_diffusion_timesteps lowerCAmelCase__ : Tuple = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(UpperCamelCase ) / alpha_bar_fn(UpperCamelCase ) , UpperCamelCase ) ) return torch.tensor(UpperCamelCase , dtype=torch.floataa ) class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : List[Any] = [e.name for e in KarrasDiffusionSchedulers] __lowercase : str = 2 @register_to_config def __init__( self ,__UpperCAmelCase = 1000 ,__UpperCAmelCase = 0.0_0_0_8_5 ,__UpperCAmelCase = 0.0_1_2 ,__UpperCAmelCase = "linear" ,__UpperCAmelCase = None ,__UpperCAmelCase = "epsilon" ,__UpperCAmelCase = "linspace" ,__UpperCAmelCase = 0 ,) -> List[Any]: if trained_betas is not None: lowerCAmelCase__ : Union[str, Any] = torch.tensor(__UpperCAmelCase ,dtype=torch.floataa ) elif beta_schedule == "linear": lowerCAmelCase__ : List[str] = torch.linspace(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. lowerCAmelCase__ : Union[str, Any] = ( torch.linspace(beta_start**0.5 ,beta_end**0.5 ,__UpperCAmelCase ,dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule lowerCAmelCase__ : Any = betas_for_alpha_bar(__UpperCAmelCase ) else: raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" ) lowerCAmelCase__ : List[Any] = 1.0 - self.betas lowerCAmelCase__ : Optional[Any] = torch.cumprod(self.alphas ,dim=0 ) # set all values self.set_timesteps(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=None ) -> Tuple: if schedule_timesteps is None: lowerCAmelCase__ : int = self.timesteps lowerCAmelCase__ : str = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: lowerCAmelCase__ : Dict = 1 if len(__UpperCAmelCase ) > 1 else 0 else: lowerCAmelCase__ : Tuple = timestep.cpu().item() if torch.is_tensor(__UpperCAmelCase ) else timestep lowerCAmelCase__ : Optional[Any] = self._index_counter[timestep_int] return indices[pos].item() @property def UpperCAmelCase_ ( self ) -> str: # standard deviation of the initial noise distribution if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,) -> torch.FloatTensor: lowerCAmelCase__ : int = self.index_for_timestep(__UpperCAmelCase ) if self.state_in_first_order: lowerCAmelCase__ : Tuple = self.sigmas[step_index] else: lowerCAmelCase__ : Any = self.sigmas_interpol[step_index] lowerCAmelCase__ : List[Any] = sample / ((sigma**2 + 1) ** 0.5) return sample def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,) -> int: lowerCAmelCase__ : Tuple = num_inference_steps lowerCAmelCase__ : Dict = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": lowerCAmelCase__ : Dict = np.linspace(0 ,num_train_timesteps - 1 ,__UpperCAmelCase ,dtype=__UpperCAmelCase )[::-1].copy() elif self.config.timestep_spacing == "leading": lowerCAmelCase__ : int = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 lowerCAmelCase__ : Dict = (np.arange(0 ,__UpperCAmelCase ) * step_ratio).round()[::-1].copy().astype(__UpperCAmelCase ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": lowerCAmelCase__ : str = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 lowerCAmelCase__ : Tuple = (np.arange(__UpperCAmelCase ,0 ,-step_ratio )).round().copy().astype(__UpperCAmelCase ) timesteps -= 1 else: raise ValueError( F"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" ) lowerCAmelCase__ : Optional[int] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) lowerCAmelCase__ : List[str] = torch.from_numpy(np.log(__UpperCAmelCase ) ).to(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = np.interp(__UpperCAmelCase ,np.arange(0 ,len(__UpperCAmelCase ) ) ,__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) lowerCAmelCase__ : Optional[Any] = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase ) # interpolate sigmas lowerCAmelCase__ : Dict = sigmas.log().lerp(sigmas.roll(1 ).log() ,0.5 ).exp() lowerCAmelCase__ : Dict = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] ) lowerCAmelCase__ : List[Any] = torch.cat( [sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] ) if str(__UpperCAmelCase ).startswith("""mps""" ): # mps does not support float64 lowerCAmelCase__ : List[str] = torch.from_numpy(__UpperCAmelCase ).to(__UpperCAmelCase ,dtype=torch.floataa ) else: lowerCAmelCase__ : Optional[Any] = torch.from_numpy(__UpperCAmelCase ).to(__UpperCAmelCase ) # interpolate timesteps lowerCAmelCase__ : Tuple = self.sigma_to_t(__UpperCAmelCase ).to(__UpperCAmelCase ,dtype=timesteps.dtype ) lowerCAmelCase__ : List[Any] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) ,dim=-1 ).flatten() lowerCAmelCase__ : Tuple = torch.cat([timesteps[:1], interleaved_timesteps] ) lowerCAmelCase__ : List[str] = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter lowerCAmelCase__ : Any = defaultdict(__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> int: # get log sigma lowerCAmelCase__ : str = sigma.log() # get distribution lowerCAmelCase__ : Union[str, Any] = log_sigma - self.log_sigmas[:, None] # get sigmas range lowerCAmelCase__ : Optional[int] = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 ) lowerCAmelCase__ : List[str] = low_idx + 1 lowerCAmelCase__ : Any = self.log_sigmas[low_idx] lowerCAmelCase__ : Optional[Any] = self.log_sigmas[high_idx] # interpolate sigmas lowerCAmelCase__ : List[Any] = (low - log_sigma) / (low - high) lowerCAmelCase__ : Union[str, Any] = w.clamp(0 ,1 ) # transform interpolation to time range lowerCAmelCase__ : Tuple = (1 - w) * low_idx + w * high_idx lowerCAmelCase__ : Union[str, Any] = t.view(sigma.shape ) return t @property def UpperCAmelCase_ ( self ) -> Tuple: return self.sample is None def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = True ,) -> Union[SchedulerOutput, Tuple]: lowerCAmelCase__ : Optional[Any] = self.index_for_timestep(__UpperCAmelCase ) # advance index counter by 1 lowerCAmelCase__ : Tuple = timestep.cpu().item() if torch.is_tensor(__UpperCAmelCase ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: lowerCAmelCase__ : int = self.sigmas[step_index] lowerCAmelCase__ : Optional[int] = self.sigmas_interpol[step_index + 1] lowerCAmelCase__ : List[str] = self.sigmas[step_index + 1] else: # 2nd order / KDPM2's method lowerCAmelCase__ : Optional[int] = self.sigmas[step_index - 1] lowerCAmelCase__ : Dict = self.sigmas_interpol[step_index] lowerCAmelCase__ : List[Any] = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API lowerCAmelCase__ : List[str] = 0 lowerCAmelCase__ : Union[str, Any] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": lowerCAmelCase__ : Dict = sigma_hat if self.state_in_first_order else sigma_interpol lowerCAmelCase__ : Tuple = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": lowerCAmelCase__ : Optional[Any] = sigma_hat if self.state_in_first_order else sigma_interpol lowerCAmelCase__ : Dict = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": raise NotImplementedError("""prediction_type not implemented yet: sample""" ) else: raise ValueError( F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order lowerCAmelCase__ : Optional[int] = (sample - pred_original_sample) / sigma_hat # 3. delta timestep lowerCAmelCase__ : Dict = sigma_interpol - sigma_hat # store for 2nd order step lowerCAmelCase__ : Optional[int] = sample else: # DPM-Solver-2 # 2. Convert to an ODE derivative for 2nd order lowerCAmelCase__ : Any = (sample - pred_original_sample) / sigma_interpol # 3. delta timestep lowerCAmelCase__ : Optional[Any] = sigma_next - sigma_hat lowerCAmelCase__ : Tuple = self.sample lowerCAmelCase__ : int = None lowerCAmelCase__ : List[Any] = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,) -> torch.FloatTensor: # Make sure sigmas and timesteps have the same device and dtype as original_samples lowerCAmelCase__ : List[Any] = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(__UpperCAmelCase ): # mps does not support float64 lowerCAmelCase__ : Union[str, Any] = self.timesteps.to(original_samples.device ,dtype=torch.floataa ) lowerCAmelCase__ : Optional[Any] = timesteps.to(original_samples.device ,dtype=torch.floataa ) else: lowerCAmelCase__ : List[str] = self.timesteps.to(original_samples.device ) lowerCAmelCase__ : Union[str, Any] = timesteps.to(original_samples.device ) lowerCAmelCase__ : List[Any] = [self.index_for_timestep(__UpperCAmelCase ,__UpperCAmelCase ) for t in timesteps] lowerCAmelCase__ : List[str] = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): lowerCAmelCase__ : Dict = sigma.unsqueeze(-1 ) lowerCAmelCase__ : str = original_samples + noise * sigma return noisy_samples def __len__( self ) -> int: return self.config.num_train_timesteps
37
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( UpperCamelCase = "The quick brown fox jumps over the lazy dog" , ): """simple docstring""" lowerCAmelCase__ : str = set() # Replace all the whitespace in our sentence lowerCAmelCase__ : Tuple = input_str.replace(""" """ , """""" ) for alpha in input_str: if "a" <= alpha.lower() <= "z": frequency.add(alpha.lower() ) return len(UpperCamelCase ) == 26 def _SCREAMING_SNAKE_CASE ( UpperCamelCase = "The quick brown fox jumps over the lazy dog" , ): """simple docstring""" lowerCAmelCase__ : Any = [False] * 26 for char in input_str: if char.islower(): lowerCAmelCase__ : Optional[Any] = True elif char.isupper(): lowerCAmelCase__ : Any = True return all(UpperCamelCase ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase = "The quick brown fox jumps over the lazy dog" , ): """simple docstring""" return len({char for char in input_str.lower() if char.isalpha()} ) == 26 def _SCREAMING_SNAKE_CASE ( ): """simple docstring""" from timeit import timeit lowerCAmelCase__ : Union[str, Any] = """from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest""" print(timeit("""is_pangram()""" , setup=UpperCamelCase ) ) print(timeit("""is_pangram_faster()""" , setup=UpperCamelCase ) ) print(timeit("""is_pangram_fastest()""" , setup=UpperCamelCase ) ) # 5.348480500048026, 2.6477354579837993, 1.8470395830227062 # 5.036091582966037, 2.644472333951853, 1.8869528750656173 if __name__ == "__main__": import doctest doctest.testmod() benchmark()
37
1
'''simple docstring''' from __future__ import annotations import time from collections.abc import Sequence from random import randint from matplotlib import pyplot as plt def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" if not arr: return None, None, 0 if low == high: return low, high, arr[low] lowerCAmelCase__ : Union[str, Any] = (low + high) // 2 lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Dict = max_subarray(UpperCamelCase , UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : int = max_subarray(UpperCamelCase , mid + 1 , UpperCamelCase ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = max_cross_sum(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) if left_sum >= right_sum and left_sum >= cross_sum: return left_low, left_high, left_sum elif right_sum >= left_sum and right_sum >= cross_sum: return right_low, right_high, right_sum return cross_left, cross_right, cross_sum def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCAmelCase__ , lowerCAmelCase__ : Any = float("""-inf""" ), -1 lowerCAmelCase__ , lowerCAmelCase__ : str = float("""-inf""" ), -1 lowerCAmelCase__ : int | float = 0 for i in range(UpperCamelCase , low - 1 , -1 ): summ += arr[i] if summ > left_sum: lowerCAmelCase__ : Optional[int] = summ lowerCAmelCase__ : List[Any] = i lowerCAmelCase__ : Optional[Any] = 0 for i in range(mid + 1 , high + 1 ): summ += arr[i] if summ > right_sum: lowerCAmelCase__ : Optional[Any] = summ lowerCAmelCase__ : List[str] = i return max_left, max_right, (left_sum + right_sum) def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Optional[Any] = [randint(1 , UpperCamelCase ) for _ in range(UpperCamelCase )] lowerCAmelCase__ : Union[str, Any] = time.time() max_subarray(UpperCamelCase , 0 , input_size - 1 ) lowerCAmelCase__ : str = time.time() return end - start def _SCREAMING_SNAKE_CASE ( ): """simple docstring""" lowerCAmelCase__ : Any = [10, 100, 1000, 10000, 50000, 100000, 200000, 300000, 400000, 500000] lowerCAmelCase__ : List[Any] = [time_max_subarray(UpperCamelCase ) for input_size in input_sizes] print("""No of Inputs\t\tTime Taken""" ) for input_size, runtime in zip(UpperCamelCase , UpperCamelCase ): print(UpperCamelCase , """\t\t""" , UpperCamelCase ) plt.plot(UpperCamelCase , UpperCamelCase ) plt.xlabel("""Number of Inputs""" ) plt.ylabel("""Time taken in seconds""" ) plt.show() if __name__ == "__main__": from doctest import testmod testmod()
37
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Tuple = abs(UpperCamelCase ) lowerCAmelCase__ : List[Any] = 0 while n > 0: res += n % 10 n //= 10 return res def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Union[str, Any] = abs(UpperCamelCase ) return n if n < 10 else n % 10 + sum_of_digits(n // 10 ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" return sum(int(UpperCamelCase ) for c in str(abs(UpperCamelCase ) ) ) def _SCREAMING_SNAKE_CASE ( ): """simple docstring""" from collections.abc import Callable from timeit import timeit def benchmark_a_function(UpperCamelCase , UpperCamelCase ) -> None: lowerCAmelCase__ : str = f"""{func.__name__}({value})""" lowerCAmelCase__ : str = timeit(f"""__main__.{call}""" , setup="""import __main__""" ) print(f"""{call:56} = {func(UpperCamelCase )} -- {timing:.4f} seconds""" ) for value in (262144, 1125899906842624, 1267650600228229401496703205376): for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact): benchmark_a_function(UpperCamelCase , UpperCamelCase ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
37
1
'''simple docstring''' import argparse import json import os import re import shutil import torch from transformers import BioGptConfig, BioGptForCausalLM from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE from transformers.utils import WEIGHTS_NAME, logging logging.set_verbosity_warning() _lowerCAmelCase = 2 class lowerCAmelCase_: '''simple docstring''' def __init__( self ,*, # begin keyword-only arguments __UpperCAmelCase="<s>" ,__UpperCAmelCase="<pad>" ,__UpperCAmelCase="</s>" ,__UpperCAmelCase="<unk>" ,__UpperCAmelCase=None ,) -> int: lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = bos, unk, pad, eos lowerCAmelCase__ : Dict = [] lowerCAmelCase__ : Tuple = [] lowerCAmelCase__ : Dict = {} lowerCAmelCase__ : int = self.add_symbol(__UpperCAmelCase ) lowerCAmelCase__ : List[str] = self.add_symbol(__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = self.add_symbol(__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = self.add_symbol(__UpperCAmelCase ) if extra_special_symbols: for s in extra_special_symbols: self.add_symbol(__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = len(self.symbols ) def __eq__( self ,__UpperCAmelCase ) -> Union[str, Any]: return self.indices == other.indices def __getitem__( self ,__UpperCAmelCase ) -> int: if idx < len(self.symbols ): return self.symbols[idx] return self.unk_word def __len__( self ) -> List[str]: return len(self.symbols ) def __contains__( self ,__UpperCAmelCase ) -> Optional[int]: return sym in self.indices @classmethod def UpperCAmelCase_ ( cls ,__UpperCAmelCase ) -> Optional[int]: lowerCAmelCase__ : List[Any] = cls() d.add_from_file(__UpperCAmelCase ) return d def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=1 ,__UpperCAmelCase=False ) -> List[Any]: if word in self.indices and not overwrite: lowerCAmelCase__ : Tuple = self.indices[word] lowerCAmelCase__ : Optional[Any] = self.count[idx] + n return idx else: lowerCAmelCase__ : str = len(self.symbols ) lowerCAmelCase__ : Tuple = idx self.symbols.append(__UpperCAmelCase ) self.count.append(__UpperCAmelCase ) return idx def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[Any]: return 0 def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[str]: if isinstance(__UpperCAmelCase ,__UpperCAmelCase ): try: with open(__UpperCAmelCase ,"""r""" ,encoding="""utf-8""" ) as fd: self.add_from_file(__UpperCAmelCase ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception("""Incorrect encoding detected in {}, please rebuild the dataset""".format(__UpperCAmelCase ) ) return lowerCAmelCase__ : int = f.readlines() lowerCAmelCase__ : int = self._load_meta(__UpperCAmelCase ) for line in lines[indices_start_line:]: try: lowerCAmelCase__ , lowerCAmelCase__ : Tuple = line.rstrip().rsplit(""" """ ,1 ) if field == "#fairseq:overwrite": lowerCAmelCase__ : List[str] = True lowerCAmelCase__ , lowerCAmelCase__ : str = line.rsplit(""" """ ,1 ) else: lowerCAmelCase__ : Tuple = False lowerCAmelCase__ : Any = int(__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = line if word in self and not overwrite: raise RuntimeError( """Duplicate word found when loading Dictionary: '{}'. """ """Duplicate words can overwrite earlier ones by adding the """ """#fairseq:overwrite flag at the end of the corresponding row """ """in the dictionary file. If using the Camembert model, please """ """download an updated copy of the model file.""".format(__UpperCAmelCase ) ) self.add_symbol(__UpperCAmelCase ,n=__UpperCAmelCase ,overwrite=__UpperCAmelCase ) except ValueError: raise ValueError("""Incorrect dictionary format, expected '<token> <cnt> [flags]'""" ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : int = dict((re.sub(R"""@@$""" , """""" , UpperCamelCase ), v) if k.endswith("""@@""" ) else (re.sub(R"""$""" , """</w>""" , UpperCamelCase ), v) for k, v in d.items() ) lowerCAmelCase__ : Optional[Any] = """<s> <pad> </s> <unk>""".split() # restore the special tokens for k in keep_keys: del da[f"""{k}</w>"""] lowerCAmelCase__ : List[Any] = d[k] # restore return da def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" if not os.path.exists(UpperCamelCase ): raise ValueError(f"""path {biogpt_checkpoint_path} does not exist!""" ) os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase ) print(f"""Writing results to {pytorch_dump_folder_path}""" ) # handle various types of models lowerCAmelCase__ : int = os.path.join(UpperCamelCase , """checkpoint.pt""" ) if not os.path.isfile(UpperCamelCase ): raise ValueError(f"""path to the file {checkpoint_file} does not exist!""" ) lowerCAmelCase__ : Union[str, Any] = torch.load(UpperCamelCase , map_location="""cpu""" ) lowerCAmelCase__ : Optional[int] = chkpt["""cfg"""]["""model"""] # dicts lowerCAmelCase__ : str = os.path.join(UpperCamelCase , """dict.txt""" ) if not os.path.isfile(UpperCamelCase ): raise ValueError(f"""path to the file {dict_file} does not exist!""" ) lowerCAmelCase__ : Any = Dictionary.load(UpperCamelCase ) lowerCAmelCase__ : Any = rewrite_dict_keys(src_dict.indices ) lowerCAmelCase__ : str = len(UpperCamelCase ) lowerCAmelCase__ : int = os.path.join(UpperCamelCase , VOCAB_FILES_NAMES["""vocab_file"""] ) print(f"""Generating {src_vocab_file} of {src_vocab_size} records""" ) with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(UpperCamelCase , ensure_ascii=UpperCamelCase , indent=UpperCamelCase ) ) # merges_file (bpecodes) lowerCAmelCase__ : Optional[Any] = os.path.join(UpperCamelCase , """bpecodes""" ) if not os.path.isfile(UpperCamelCase ): raise ValueError(f"""path to the file {bpecodes_file} does not exist!""" ) lowerCAmelCase__ : Dict = os.path.join(UpperCamelCase , VOCAB_FILES_NAMES["""merges_file"""] ) shutil.copyfile(UpperCamelCase , UpperCamelCase ) # model config lowerCAmelCase__ : Optional[Any] = os.path.join(UpperCamelCase , """config.json""" ) lowerCAmelCase__ : Optional[int] = { """activation_dropout""": args["""activation_dropout"""], """architectures""": ["""BioGptForCausalLM"""], """attention_probs_dropout_prob""": args["""attention_dropout"""], """bos_token_id""": 0, """eos_token_id""": 2, """hidden_act""": args["""activation_fn"""], """hidden_dropout_prob""": args["""dropout"""], """hidden_size""": args["""decoder_embed_dim"""], """initializer_range""": 0.02, """intermediate_size""": args["""decoder_ffn_embed_dim"""], """layer_norm_eps""": 1e-12, """layerdrop""": args["""decoder_layerdrop"""], """max_position_embeddings""": args["""max_target_positions"""], """model_type""": """biogpt""", """num_attention_heads""": args["""decoder_attention_heads"""], """num_hidden_layers""": args["""decoder_layers"""], """pad_token_id""": 1, """scale_embedding""": not args["""no_scale_embedding"""], """tie_word_embeddings""": args["""share_decoder_input_output_embed"""], """vocab_size""": src_vocab_size, } # good hparam defaults to start with print(f"""Generating {biogpt_model_config_file}""" ) with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(UpperCamelCase , ensure_ascii=UpperCamelCase , indent=UpperCamelCase ) ) # tokenizer config lowerCAmelCase__ : str = os.path.join(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : Tuple = { """bos_token""": """<s>""", """eos_token""": """</s>""", """model_max_length""": 1024, """pad_token""": """<pad>""", """special_tokens_map_file""": None, """tokenizer_class""": """BioGptTokenizer""", """unk_token""": """<unk>""", } print(f"""Generating {biogpt_tokenizer_config_file}""" ) with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(UpperCamelCase , ensure_ascii=UpperCamelCase , indent=UpperCamelCase ) ) # model lowerCAmelCase__ : Union[str, Any] = chkpt["""model"""] # remove unneeded keys lowerCAmelCase__ : Optional[Any] = [ """decoder.version""", ] for k in ignore_keys: model_state_dict.pop(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : List[str] = list(model_state_dict.keys() ) for layer_name in layer_names: if layer_name.endswith("""output_projection.weight""" ): lowerCAmelCase__ : Any = model_state_dict.pop(UpperCamelCase ) else: lowerCAmelCase__ : Any = model_state_dict.pop(UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = BioGptConfig.from_pretrained(UpperCamelCase ) lowerCAmelCase__ : Dict = BioGptForCausalLM(UpperCamelCase ) # check that it loads ok model_new.load_state_dict(UpperCamelCase ) # save lowerCAmelCase__ : Any = os.path.join(UpperCamelCase , UpperCamelCase ) print(f"""Generating {pytorch_weights_dump_path}""" ) torch.save(UpperCamelCase , UpperCamelCase ) print("""Conversion is done!""" ) if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--biogpt_checkpoint_path''', default=None, type=str, required=True, help=( '''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,''' ''' bpecodes, etc.''' ), ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) _lowerCAmelCase = parser.parse_args() convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
37
'''simple docstring''' # coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers _lowerCAmelCase = '''3''' print('''Python version:''', sys.version) print('''transformers version:''', transformers.__version__) try: import torch print('''Torch version:''', torch.__version__) print('''Cuda available:''', torch.cuda.is_available()) print('''Cuda version:''', torch.version.cuda) print('''CuDNN version:''', torch.backends.cudnn.version()) print('''Number of GPUs available:''', torch.cuda.device_count()) print('''NCCL version:''', torch.cuda.nccl.version()) except ImportError: print('''Torch version:''', None) try: import deepspeed print('''DeepSpeed version:''', deepspeed.__version__) except ImportError: print('''DeepSpeed version:''', None) try: import tensorflow as tf print('''TensorFlow version:''', tf.__version__) print('''TF GPUs available:''', bool(tf.config.list_physical_devices('''GPU'''))) print('''Number of TF GPUs available:''', len(tf.config.list_physical_devices('''GPU'''))) except ImportError: print('''TensorFlow version:''', None)
37
1
'''simple docstring''' _lowerCAmelCase = '''0.18.2''' from .configuration_utils import ConfigMixin from .utils import ( OptionalDependencyNotAvailable, is_flax_available, is_inflect_available, is_invisible_watermark_available, is_k_diffusion_available, is_k_diffusion_version, is_librosa_available, is_note_seq_available, is_onnx_available, is_scipy_available, is_torch_available, is_torchsde_available, is_transformers_available, is_transformers_version, is_unidecode_available, logging, ) try: if not is_onnx_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_onnx_objects import * # noqa F403 else: from .pipelines import OnnxRuntimeModel try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_pt_objects import * # noqa F403 else: from .models import ( AutoencoderKL, ControlNetModel, ModelMixin, PriorTransformer, TaFilmDecoder, TransformeraDModel, UNetaDModel, UNetaDConditionModel, UNetaDModel, UNetaDConditionModel, VQModel, ) from .optimization import ( get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, get_scheduler, ) from .pipelines import ( AudioPipelineOutput, ConsistencyModelPipeline, DanceDiffusionPipeline, DDIMPipeline, DDPMPipeline, DiffusionPipeline, DiTPipeline, ImagePipelineOutput, KarrasVePipeline, LDMPipeline, LDMSuperResolutionPipeline, PNDMPipeline, RePaintPipeline, ScoreSdeVePipeline, ) from .schedulers import ( CMStochasticIterativeScheduler, DDIMInverseScheduler, DDIMParallelScheduler, DDIMScheduler, DDPMParallelScheduler, DDPMScheduler, DEISMultistepScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, HeunDiscreteScheduler, IPNDMScheduler, KarrasVeScheduler, KDPMaAncestralDiscreteScheduler, KDPMaDiscreteScheduler, PNDMScheduler, RePaintScheduler, SchedulerMixin, ScoreSdeVeScheduler, UnCLIPScheduler, UniPCMultistepScheduler, VQDiffusionScheduler, ) from .training_utils import EMAModel try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .schedulers import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .schedulers import DPMSolverSDEScheduler try: if not (is_torch_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipelines import ( AltDiffusionImgaImgPipeline, AltDiffusionPipeline, AudioLDMPipeline, CycleDiffusionPipeline, IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ImageTextPipelineOutput, KandinskyImgaImgPipeline, KandinskyInpaintPipeline, KandinskyPipeline, KandinskyPriorPipeline, KandinskyVaaControlnetImgaImgPipeline, KandinskyVaaControlnetPipeline, KandinskyVaaImgaImgPipeline, KandinskyVaaInpaintPipeline, KandinskyVaaPipeline, KandinskyVaaPriorEmbaEmbPipeline, KandinskyVaaPriorPipeline, LDMTextToImagePipeline, PaintByExamplePipeline, SemanticStableDiffusionPipeline, ShapEImgaImgPipeline, ShapEPipeline, StableDiffusionAttendAndExcitePipeline, StableDiffusionControlNetImgaImgPipeline, StableDiffusionControlNetInpaintPipeline, StableDiffusionControlNetPipeline, StableDiffusionDepthaImgPipeline, StableDiffusionDiffEditPipeline, StableDiffusionImageVariationPipeline, StableDiffusionImgaImgPipeline, StableDiffusionInpaintPipeline, StableDiffusionInpaintPipelineLegacy, StableDiffusionInstructPixaPixPipeline, StableDiffusionLatentUpscalePipeline, StableDiffusionLDMaDPipeline, StableDiffusionModelEditingPipeline, StableDiffusionPanoramaPipeline, StableDiffusionParadigmsPipeline, StableDiffusionPipeline, StableDiffusionPipelineSafe, StableDiffusionPixaPixZeroPipeline, StableDiffusionSAGPipeline, StableDiffusionUpscalePipeline, StableUnCLIPImgaImgPipeline, StableUnCLIPPipeline, TextToVideoSDPipeline, TextToVideoZeroPipeline, UnCLIPImageVariationPipeline, UnCLIPPipeline, UniDiffuserModel, UniDiffuserPipeline, UniDiffuserTextDecoder, VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, VideoToVideoSDPipeline, VQDiffusionPipeline, ) try: if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403 else: from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline try: if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403 else: from .pipelines import StableDiffusionKDiffusionPipeline try: if not (is_torch_available() and is_transformers_available() and is_onnx_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403 else: from .pipelines import ( OnnxStableDiffusionImgaImgPipeline, OnnxStableDiffusionInpaintPipeline, OnnxStableDiffusionInpaintPipelineLegacy, OnnxStableDiffusionPipeline, OnnxStableDiffusionUpscalePipeline, StableDiffusionOnnxPipeline, ) try: if not (is_torch_available() and is_librosa_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_librosa_objects import * # noqa F403 else: from .pipelines import AudioDiffusionPipeline, Mel try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .pipelines import SpectrogramDiffusionPipeline try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_objects import * # noqa F403 else: from .models.controlnet_flax import FlaxControlNetModel from .models.modeling_flax_utils import FlaxModelMixin from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel from .models.vae_flax import FlaxAutoencoderKL from .pipelines import FlaxDiffusionPipeline from .schedulers import ( FlaxDDIMScheduler, FlaxDDPMScheduler, FlaxDPMSolverMultistepScheduler, FlaxKarrasVeScheduler, FlaxLMSDiscreteScheduler, FlaxPNDMScheduler, FlaxSchedulerMixin, FlaxScoreSdeVeScheduler, ) try: if not (is_flax_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_and_transformers_objects import * # noqa F403 else: from .pipelines import ( FlaxStableDiffusionControlNetPipeline, FlaxStableDiffusionImgaImgPipeline, FlaxStableDiffusionInpaintPipeline, FlaxStableDiffusionPipeline, ) try: if not (is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_note_seq_objects import * # noqa F403 else: from .pipelines import MidiProcessor
37
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { '''facebook/xlm-roberta-xl''': '''https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json''', '''facebook/xlm-roberta-xxl''': '''https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json''', # See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl } class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : int = '''xlm-roberta-xl''' def __init__( self ,__UpperCAmelCase=25_0880 ,__UpperCAmelCase=2560 ,__UpperCAmelCase=36 ,__UpperCAmelCase=32 ,__UpperCAmelCase=1_0240 ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=514 ,__UpperCAmelCase=1 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=1E-05 ,__UpperCAmelCase=1 ,__UpperCAmelCase=0 ,__UpperCAmelCase=2 ,__UpperCAmelCase="absolute" ,__UpperCAmelCase=True ,__UpperCAmelCase=None ,**__UpperCAmelCase ,) -> str: super().__init__(pad_token_id=__UpperCAmelCase ,bos_token_id=__UpperCAmelCase ,eos_token_id=__UpperCAmelCase ,**__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = vocab_size lowerCAmelCase__ : int = hidden_size lowerCAmelCase__ : int = num_hidden_layers lowerCAmelCase__ : str = num_attention_heads lowerCAmelCase__ : int = hidden_act lowerCAmelCase__ : Dict = intermediate_size lowerCAmelCase__ : List[Any] = hidden_dropout_prob lowerCAmelCase__ : str = attention_probs_dropout_prob lowerCAmelCase__ : Optional[int] = max_position_embeddings lowerCAmelCase__ : List[str] = type_vocab_size lowerCAmelCase__ : List[Any] = initializer_range lowerCAmelCase__ : Tuple = layer_norm_eps lowerCAmelCase__ : int = position_embedding_type lowerCAmelCase__ : Optional[Any] = use_cache lowerCAmelCase__ : Optional[Any] = classifier_dropout class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' @property def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": lowerCAmelCase__ : Dict = {0: """batch""", 1: """choice""", 2: """sequence"""} else: lowerCAmelCase__ : Any = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
37
1
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" try: lowerCAmelCase__ : Any = float(UpperCamelCase ) except ValueError: raise ValueError("""Please enter a valid number""" ) lowerCAmelCase__ : int = decimal - int(UpperCamelCase ) if fractional_part == 0: return int(UpperCamelCase ), 1 else: lowerCAmelCase__ : int = len(str(UpperCamelCase ).split(""".""" )[1] ) lowerCAmelCase__ : str = int(decimal * (10**number_of_frac_digits) ) lowerCAmelCase__ : int = 10**number_of_frac_digits lowerCAmelCase__ , lowerCAmelCase__ : Any = denominator, numerator while True: lowerCAmelCase__ : Dict = dividend % divisor if remainder == 0: break lowerCAmelCase__ , lowerCAmelCase__ : Dict = divisor, remainder lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = numerator / divisor, denominator / divisor return int(UpperCamelCase ), int(UpperCamelCase ) if __name__ == "__main__": print(F"""{decimal_to_fraction(2) = }""") print(F"""{decimal_to_fraction(89.0) = }""") print(F"""{decimal_to_fraction("67") = }""") print(F"""{decimal_to_fraction("45.0") = }""") print(F"""{decimal_to_fraction(1.5) = }""") print(F"""{decimal_to_fraction("6.25") = }""") print(F"""{decimal_to_fraction("78td") = }""")
37
'''simple docstring''' from __future__ import annotations import math from collections import Counter from string import ascii_lowercase def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ , lowerCAmelCase__ : List[str] = analyze_text(UpperCamelCase ) lowerCAmelCase__ : Optional[int] = list(""" """ + ascii_lowercase ) # what is our total sum of probabilities. lowerCAmelCase__ : List[Any] = sum(single_char_strings.values() ) # one length string lowerCAmelCase__ : Optional[int] = 0 # for each alpha we go in our dict and if it is in it we calculate entropy for ch in my_alphas: if ch in single_char_strings: lowerCAmelCase__ : List[Any] = single_char_strings[ch] lowerCAmelCase__ : List[Any] = my_str / all_sum my_fir_sum += prob * math.loga(UpperCamelCase ) # entropy formula. # print entropy print(f"""{round(-1 * my_fir_sum ):.1f}""" ) # two len string lowerCAmelCase__ : Dict = sum(two_char_strings.values() ) lowerCAmelCase__ : int = 0 # for each alpha (two in size) calculate entropy. for cha in my_alphas: for cha in my_alphas: lowerCAmelCase__ : Union[str, Any] = cha + cha if sequence in two_char_strings: lowerCAmelCase__ : Dict = two_char_strings[sequence] lowerCAmelCase__ : Tuple = int(UpperCamelCase ) / all_sum my_sec_sum += prob * math.loga(UpperCamelCase ) # print second entropy print(f"""{round(-1 * my_sec_sum ):.1f}""" ) # print the difference between them print(f"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Optional[Any] = Counter() # type: ignore lowerCAmelCase__ : Tuple = Counter() # type: ignore single_char_strings[text[-1]] += 1 # first case when we have space at start. two_char_strings[" " + text[0]] += 1 for i in range(0 , len(UpperCamelCase ) - 1 ): single_char_strings[text[i]] += 1 two_char_strings[text[i : i + 2]] += 1 return single_char_strings, two_char_strings def _SCREAMING_SNAKE_CASE ( ): """simple docstring""" import doctest doctest.testmod() # text = ( # "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark " # "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest " # "jointure saw horrible. He private he on be imagine suppose. Fertile " # "beloved evident through no service elderly is. Blind there if every no so " # "at. Own neglected you preferred way sincerity delivered his attempted. To " # "of message cottage windows do besides against uncivil. Delightful " # "unreserved impossible few estimating men favourable see entreaties. She " # "propriety immediate was improving. He or entrance humoured likewise " # "moderate. Much nor game son say feel. Fat make met can must form into " # "gate. Me we offending prevailed discovery. " # ) # calculate_prob(text) if __name__ == "__main__": main()
37
1
'''simple docstring''' import random import unittest import numpy as np import torch from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionUpscalePipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): '''simple docstring''' __lowercase : Union[str, Any] = '''ssube/stable-diffusion-x4-upscaler-onnx''' def UpperCAmelCase_ ( self ,__UpperCAmelCase=0 ) -> List[str]: lowerCAmelCase__ : Dict = floats_tensor((1, 3, 128, 128) ,rng=random.Random(__UpperCAmelCase ) ) lowerCAmelCase__ : Union[str, Any] = torch.manual_seed(__UpperCAmelCase ) lowerCAmelCase__ : Dict = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def UpperCAmelCase_ ( self ) -> Optional[int]: lowerCAmelCase__ : List[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = self.get_dummy_inputs() lowerCAmelCase__ : Optional[Any] = pipe(**__UpperCAmelCase ).images lowerCAmelCase__ : List[str] = image[0, -3:, -3:, -1].flatten() # started as 128, should now be 512 assert image.shape == (1, 512, 512, 3) lowerCAmelCase__ : int = np.array( [0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] ) assert np.abs(image_slice - expected_slice ).max() < 1E-1 def UpperCAmelCase_ ( self ) -> int: lowerCAmelCase__ : Optional[int] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" ) lowerCAmelCase__ : Union[str, Any] = PNDMScheduler.from_config(pipe.scheduler.config ,skip_prk_steps=__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = self.get_dummy_inputs() lowerCAmelCase__ : Optional[int] = pipe(**__UpperCAmelCase ).images lowerCAmelCase__ : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCAmelCase__ : Tuple = np.array( [0.6_8_9_8_8_9_2, 0.5_9_2_4_0_5_5_6, 0.5_2_4_9_9_5_2_7, 0.5_8_8_6_6_2_1_5, 0.5_2_2_5_8_2_3_5, 0.5_2_5_7_2_7_1_5, 0.6_2_4_1_4_4_7_3, 0.6_1_7_4_3_8_7, 0.6_2_1_4_9_6_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def UpperCAmelCase_ ( self ) -> Optional[Any]: lowerCAmelCase__ : List[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" ) lowerCAmelCase__ : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = self.get_dummy_inputs() lowerCAmelCase__ : List[Any] = pipe(**__UpperCAmelCase ).images lowerCAmelCase__ : int = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCAmelCase__ : List[Any] = np.array( [0.7_6_5_9_2_7_8, 0.7_6_4_3_7_6_6_4, 0.7_5_5_7_9_1_0_7, 0.7_6_9_1_1_1_6, 0.7_7_6_6_6_9_8_6, 0.7_7_2_7_6_7_2, 0.7_7_5_8_6_6_4, 0.7_8_1_2_2_2_6, 0.7_6_9_4_2_5_1_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def UpperCAmelCase_ ( self ) -> int: lowerCAmelCase__ : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" ) lowerCAmelCase__ : Optional[Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) lowerCAmelCase__ : str = self.get_dummy_inputs() lowerCAmelCase__ : Tuple = pipe(**__UpperCAmelCase ).images lowerCAmelCase__ : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCAmelCase__ : int = np.array( [0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def UpperCAmelCase_ ( self ) -> str: lowerCAmelCase__ : List[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" ) lowerCAmelCase__ : Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) lowerCAmelCase__ : Tuple = self.get_dummy_inputs() lowerCAmelCase__ : Optional[int] = pipe(**__UpperCAmelCase ).images lowerCAmelCase__ : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCAmelCase__ : int = np.array( [0.7_7_4_2_4_4_9_6, 0.7_7_3_6_0_1, 0.7_6_4_5_2_8_8, 0.7_7_6_9_5_9_8, 0.7_7_7_2_7_3_9, 0.7_7_3_8_6_8_8, 0.7_8_1_8_7_2_3_3, 0.7_7_8_7_9_5_8_4, 0.7_6_7_0_4_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 @nightly @require_onnxruntime @require_torch_gpu class lowerCAmelCase_( unittest.TestCase ): '''simple docstring''' @property def UpperCAmelCase_ ( self ) -> Optional[Any]: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def UpperCAmelCase_ ( self ) -> Any: lowerCAmelCase__ : List[Any] = ort.SessionOptions() lowerCAmelCase__ : List[Any] = False return options def UpperCAmelCase_ ( self ) -> List[str]: lowerCAmelCase__ : Dict = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/img2img/sketch-mountains-input.jpg""" ) lowerCAmelCase__ : int = init_image.resize((128, 128) ) # using the PNDM scheduler by default lowerCAmelCase__ : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained( """ssube/stable-diffusion-x4-upscaler-onnx""" ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) lowerCAmelCase__ : str = """A fantasy landscape, trending on artstation""" lowerCAmelCase__ : Union[str, Any] = torch.manual_seed(0 ) lowerCAmelCase__ : str = pipe( prompt=__UpperCAmelCase ,image=__UpperCAmelCase ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=__UpperCAmelCase ,output_type="""np""" ,) lowerCAmelCase__ : Any = output.images lowerCAmelCase__ : Optional[int] = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 512, 3) lowerCAmelCase__ : Optional[Any] = np.array([0.4_8_8_3, 0.4_9_4_7, 0.4_9_8_0, 0.4_9_7_5, 0.4_9_8_2, 0.4_9_8_0, 0.5_0_0_0, 0.5_0_0_6, 0.4_9_7_2] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def UpperCAmelCase_ ( self ) -> Optional[int]: lowerCAmelCase__ : str = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/img2img/sketch-mountains-input.jpg""" ) lowerCAmelCase__ : Optional[int] = init_image.resize((128, 128) ) lowerCAmelCase__ : Tuple = LMSDiscreteScheduler.from_pretrained( """ssube/stable-diffusion-x4-upscaler-onnx""" ,subfolder="""scheduler""" ) lowerCAmelCase__ : List[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained( """ssube/stable-diffusion-x4-upscaler-onnx""" ,scheduler=__UpperCAmelCase ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) lowerCAmelCase__ : str = """A fantasy landscape, trending on artstation""" lowerCAmelCase__ : str = torch.manual_seed(0 ) lowerCAmelCase__ : Dict = pipe( prompt=__UpperCAmelCase ,image=__UpperCAmelCase ,guidance_scale=7.5 ,num_inference_steps=20 ,generator=__UpperCAmelCase ,output_type="""np""" ,) lowerCAmelCase__ : Optional[int] = output.images lowerCAmelCase__ : List[Any] = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 512, 3) lowerCAmelCase__ : Union[str, Any] = np.array( [0.5_0_1_7_3_7_5_3, 0.5_0_2_2_3_3_5_6, 0.5_0_2_0_3_9, 0.5_0_2_3_3_0_3_6, 0.5_0_2_3_7_2_5, 0.5_0_2_2_6_0_1, 0.5_0_1_8_7_5_8, 0.5_0_2_3_4_0_8_5, 0.5_0_2_4_1_5_6_6] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
37
'''simple docstring''' import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase=1 ): """simple docstring""" if n_shave_prefix_segments >= 0: return ".".join(path.split(""".""" )[n_shave_prefix_segments:] ) else: return ".".join(path.split(""".""" )[:n_shave_prefix_segments] ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase=0 ): """simple docstring""" lowerCAmelCase__ : Union[str, Any] = [] for old_item in old_list: lowerCAmelCase__ : Optional[Any] = old_item.replace("""in_layers.0""" , """norm1""" ) lowerCAmelCase__ : Optional[int] = new_item.replace("""in_layers.2""" , """conv1""" ) lowerCAmelCase__ : Dict = new_item.replace("""out_layers.0""" , """norm2""" ) lowerCAmelCase__ : str = new_item.replace("""out_layers.3""" , """conv2""" ) lowerCAmelCase__ : str = new_item.replace("""emb_layers.1""" , """time_emb_proj""" ) lowerCAmelCase__ : Optional[Any] = new_item.replace("""skip_connection""" , """conv_shortcut""" ) lowerCAmelCase__ : Union[str, Any] = shave_segments(UpperCamelCase , n_shave_prefix_segments=UpperCamelCase ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase=0 ): """simple docstring""" lowerCAmelCase__ : int = [] for old_item in old_list: lowerCAmelCase__ : List[str] = old_item lowerCAmelCase__ : int = new_item.replace("""norm.weight""" , """group_norm.weight""" ) lowerCAmelCase__ : Optional[Any] = new_item.replace("""norm.bias""" , """group_norm.bias""" ) lowerCAmelCase__ : Optional[Any] = new_item.replace("""proj_out.weight""" , """proj_attn.weight""" ) lowerCAmelCase__ : int = new_item.replace("""proj_out.bias""" , """proj_attn.bias""" ) lowerCAmelCase__ : str = shave_segments(UpperCamelCase , n_shave_prefix_segments=UpperCamelCase ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): lowerCAmelCase__ : Any = old_checkpoint[path] lowerCAmelCase__ : int = old_tensor.shape[0] // 3 lowerCAmelCase__ : int = (-1, channels) if len(old_tensor.shape ) == 3 else (-1) lowerCAmelCase__ : Tuple = old_tensor.shape[0] // config["""num_head_channels"""] // 3 lowerCAmelCase__ : List[Any] = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = old_tensor.split(channels // num_heads , dim=1 ) lowerCAmelCase__ : int = query.reshape(UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = key.reshape(UpperCamelCase ) lowerCAmelCase__ : Optional[int] = value.reshape(UpperCamelCase ) for path in paths: lowerCAmelCase__ : Any = path["""new"""] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here lowerCAmelCase__ : Any = new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" ) lowerCAmelCase__ : Any = new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" ) lowerCAmelCase__ : Any = new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" ) if additional_replacements is not None: for replacement in additional_replacements: lowerCAmelCase__ : Any = new_path.replace(replacement["""old"""] , replacement["""new"""] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: lowerCAmelCase__ : List[Any] = old_checkpoint[path["""old"""]][:, :, 0] else: lowerCAmelCase__ : Dict = old_checkpoint[path["""old"""]] def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : str = {} lowerCAmelCase__ : str = checkpoint["""time_embed.0.weight"""] lowerCAmelCase__ : List[Any] = checkpoint["""time_embed.0.bias"""] lowerCAmelCase__ : int = checkpoint["""time_embed.2.weight"""] lowerCAmelCase__ : List[str] = checkpoint["""time_embed.2.bias"""] lowerCAmelCase__ : str = checkpoint["""input_blocks.0.0.weight"""] lowerCAmelCase__ : Any = checkpoint["""input_blocks.0.0.bias"""] lowerCAmelCase__ : Union[str, Any] = checkpoint["""out.0.weight"""] lowerCAmelCase__ : Union[str, Any] = checkpoint["""out.0.bias"""] lowerCAmelCase__ : str = checkpoint["""out.2.weight"""] lowerCAmelCase__ : Tuple = checkpoint["""out.2.bias"""] # Retrieves the keys for the input blocks only lowerCAmelCase__ : Optional[Any] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} ) lowerCAmelCase__ : Optional[Any] = { layer_id: [key for key in checkpoint if f"""input_blocks.{layer_id}""" in key] for layer_id in range(UpperCamelCase ) } # Retrieves the keys for the middle blocks only lowerCAmelCase__ : Union[str, Any] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} ) lowerCAmelCase__ : Union[str, Any] = { layer_id: [key for key in checkpoint if f"""middle_block.{layer_id}""" in key] for layer_id in range(UpperCamelCase ) } # Retrieves the keys for the output blocks only lowerCAmelCase__ : List[str] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} ) lowerCAmelCase__ : List[Any] = { layer_id: [key for key in checkpoint if f"""output_blocks.{layer_id}""" in key] for layer_id in range(UpperCamelCase ) } for i in range(1 , UpperCamelCase ): lowerCAmelCase__ : Dict = (i - 1) // (config["""num_res_blocks"""] + 1) lowerCAmelCase__ : Tuple = (i - 1) % (config["""num_res_blocks"""] + 1) lowerCAmelCase__ : Optional[int] = [key for key in input_blocks[i] if f"""input_blocks.{i}.0""" in key] lowerCAmelCase__ : Optional[Any] = [key for key in input_blocks[i] if f"""input_blocks.{i}.1""" in key] if f"""input_blocks.{i}.0.op.weight""" in checkpoint: lowerCAmelCase__ : Optional[int] = checkpoint[ f"""input_blocks.{i}.0.op.weight""" ] lowerCAmelCase__ : Tuple = checkpoint[ f"""input_blocks.{i}.0.op.bias""" ] continue lowerCAmelCase__ : Optional[Any] = renew_resnet_paths(UpperCamelCase ) lowerCAmelCase__ : Dict = {"""old""": f"""input_blocks.{i}.0""", """new""": f"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""} lowerCAmelCase__ : Optional[Any] = {"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""} assign_to_checkpoint( UpperCamelCase , UpperCamelCase , UpperCamelCase , additional_replacements=[meta_path, resnet_op] , config=UpperCamelCase ) if len(UpperCamelCase ): lowerCAmelCase__ : Optional[Any] = renew_attention_paths(UpperCamelCase ) lowerCAmelCase__ : Tuple = { """old""": f"""input_blocks.{i}.1""", """new""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}""", } lowerCAmelCase__ : List[str] = { f"""input_blocks.{i}.1.qkv.bias""": { """key""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""", """query""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""", """value""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""", }, f"""input_blocks.{i}.1.qkv.weight""": { """key""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""", """query""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""", """value""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""", }, } assign_to_checkpoint( UpperCamelCase , UpperCamelCase , UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=UpperCamelCase , config=UpperCamelCase , ) lowerCAmelCase__ : Dict = middle_blocks[0] lowerCAmelCase__ : Union[str, Any] = middle_blocks[1] lowerCAmelCase__ : Dict = middle_blocks[2] lowerCAmelCase__ : Any = renew_resnet_paths(UpperCamelCase ) assign_to_checkpoint(UpperCamelCase , UpperCamelCase , UpperCamelCase , config=UpperCamelCase ) lowerCAmelCase__ : Dict = renew_resnet_paths(UpperCamelCase ) assign_to_checkpoint(UpperCamelCase , UpperCamelCase , UpperCamelCase , config=UpperCamelCase ) lowerCAmelCase__ : Optional[int] = renew_attention_paths(UpperCamelCase ) lowerCAmelCase__ : Optional[int] = { """middle_block.1.qkv.bias""": { """key""": """mid_block.attentions.0.key.bias""", """query""": """mid_block.attentions.0.query.bias""", """value""": """mid_block.attentions.0.value.bias""", }, """middle_block.1.qkv.weight""": { """key""": """mid_block.attentions.0.key.weight""", """query""": """mid_block.attentions.0.query.weight""", """value""": """mid_block.attentions.0.value.weight""", }, } assign_to_checkpoint( UpperCamelCase , UpperCamelCase , UpperCamelCase , attention_paths_to_split=UpperCamelCase , config=UpperCamelCase ) for i in range(UpperCamelCase ): lowerCAmelCase__ : Tuple = i // (config["""num_res_blocks"""] + 1) lowerCAmelCase__ : List[str] = i % (config["""num_res_blocks"""] + 1) lowerCAmelCase__ : int = [shave_segments(UpperCamelCase , 2 ) for name in output_blocks[i]] lowerCAmelCase__ : Union[str, Any] = {} for layer in output_block_layers: lowerCAmelCase__ , lowerCAmelCase__ : Any = layer.split(""".""" )[0], shave_segments(UpperCamelCase , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(UpperCamelCase ) else: lowerCAmelCase__ : str = [layer_name] if len(UpperCamelCase ) > 1: lowerCAmelCase__ : str = [key for key in output_blocks[i] if f"""output_blocks.{i}.0""" in key] lowerCAmelCase__ : Dict = [key for key in output_blocks[i] if f"""output_blocks.{i}.1""" in key] lowerCAmelCase__ : Optional[int] = renew_resnet_paths(UpperCamelCase ) lowerCAmelCase__ : int = renew_resnet_paths(UpperCamelCase ) lowerCAmelCase__ : Optional[int] = {"""old""": f"""output_blocks.{i}.0""", """new""": f"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""} assign_to_checkpoint(UpperCamelCase , UpperCamelCase , UpperCamelCase , additional_replacements=[meta_path] , config=UpperCamelCase ) if ["conv.weight", "conv.bias"] in output_block_list.values(): lowerCAmelCase__ : List[Any] = list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] ) lowerCAmelCase__ : int = checkpoint[ f"""output_blocks.{i}.{index}.conv.weight""" ] lowerCAmelCase__ : int = checkpoint[ f"""output_blocks.{i}.{index}.conv.bias""" ] # Clear attentions as they have been attributed above. if len(UpperCamelCase ) == 2: lowerCAmelCase__ : Tuple = [] if len(UpperCamelCase ): lowerCAmelCase__ : Dict = renew_attention_paths(UpperCamelCase ) lowerCAmelCase__ : Tuple = { """old""": f"""output_blocks.{i}.1""", """new""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}""", } lowerCAmelCase__ : Tuple = { f"""output_blocks.{i}.1.qkv.bias""": { """key""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""", """query""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""", """value""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""", }, f"""output_blocks.{i}.1.qkv.weight""": { """key""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""", """query""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""", """value""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""", }, } assign_to_checkpoint( UpperCamelCase , UpperCamelCase , UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=UpperCamelCase , ) else: lowerCAmelCase__ : int = renew_resnet_paths(UpperCamelCase , n_shave_prefix_segments=1 ) for path in resnet_0_paths: lowerCAmelCase__ : Tuple = """.""".join(["""output_blocks""", str(UpperCamelCase ), path["""old"""]] ) lowerCAmelCase__ : List[Any] = """.""".join(["""up_blocks""", str(UpperCamelCase ), """resnets""", str(UpperCamelCase ), path["""new"""]] ) lowerCAmelCase__ : Union[str, Any] = checkpoint[old_path] return new_checkpoint if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the architecture.''', ) parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') _lowerCAmelCase = parser.parse_args() _lowerCAmelCase = torch.load(args.checkpoint_path) with open(args.config_file) as f: _lowerCAmelCase = json.loads(f.read()) _lowerCAmelCase = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] _lowerCAmelCase = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: _lowerCAmelCase = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) _lowerCAmelCase = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) _lowerCAmelCase = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
37
1
'''simple docstring''' import math class lowerCAmelCase_: '''simple docstring''' def __init__( self ,__UpperCAmelCase=0 ) -> int: # a graph with Node 0,1,...,N-1 lowerCAmelCase__ : Union[str, Any] = n lowerCAmelCase__ : Optional[Any] = [ [math.inf for j in range(0 ,__UpperCAmelCase )] for i in range(0 ,__UpperCAmelCase ) ] # adjacency matrix for weight lowerCAmelCase__ : Union[str, Any] = [ [math.inf for j in range(0 ,__UpperCAmelCase )] for i in range(0 ,__UpperCAmelCase ) ] # dp[i][j] stores minimum distance from i to j def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> str: lowerCAmelCase__ : Optional[int] = w def UpperCAmelCase_ ( self ) -> int: for k in range(0 ,self.n ): for i in range(0 ,self.n ): for j in range(0 ,self.n ): lowerCAmelCase__ : Optional[Any] = min(self.dp[i][j] ,self.dp[i][k] + self.dp[k][j] ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> List[Any]: return self.dp[u][v] if __name__ == "__main__": _lowerCAmelCase = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 10) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 10) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
37
'''simple docstring''' from math import sqrt def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and ( number >= 0 ), "'number' must been an int and positive" lowerCAmelCase__ : int = True # 0 and 1 are none primes. if number <= 1: lowerCAmelCase__ : Optional[Any] = False for divisor in range(2 , int(round(sqrt(UpperCamelCase ) ) ) + 1 ): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: lowerCAmelCase__ : Any = False break # precondition assert isinstance(UpperCamelCase , UpperCamelCase ), "'status' must been from type bool" return status def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N lowerCAmelCase__ : List[str] = list(range(2 , n + 1 ) ) lowerCAmelCase__ : str = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(UpperCamelCase ) ): for j in range(i + 1 , len(UpperCamelCase ) ): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): lowerCAmelCase__ : List[Any] = 0 # filters actual prime numbers. lowerCAmelCase__ : List[Any] = [x for x in begin_list if x != 0] # precondition assert isinstance(UpperCamelCase , UpperCamelCase ), "'ans' must been from type list" return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and (n > 2), "'N' must been an int and > 2" lowerCAmelCase__ : List[str] = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2 , n + 1 ): if is_prime(UpperCamelCase ): ans.append(UpperCamelCase ) # precondition assert isinstance(UpperCamelCase , UpperCamelCase ), "'ans' must been from type list" return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and number >= 0, "'number' must been an int and >= 0" lowerCAmelCase__ : Optional[Any] = [] # this list will be returns of the function. # potential prime number factors. lowerCAmelCase__ : Dict = 2 lowerCAmelCase__ : Dict = number if number == 0 or number == 1: ans.append(UpperCamelCase ) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(UpperCamelCase ): while quotient != 1: if is_prime(UpperCamelCase ) and (quotient % factor == 0): ans.append(UpperCamelCase ) quotient /= factor else: factor += 1 else: ans.append(UpperCamelCase ) # precondition assert isinstance(UpperCamelCase , UpperCamelCase ), "'ans' must been from type list" return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and ( number >= 0 ), "'number' bust been an int and >= 0" lowerCAmelCase__ : Optional[int] = 0 # prime factorization of 'number' lowerCAmelCase__ : List[str] = prime_factorization(UpperCamelCase ) lowerCAmelCase__ : Any = max(UpperCamelCase ) # precondition assert isinstance(UpperCamelCase , UpperCamelCase ), "'ans' must been from type int" return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and ( number >= 0 ), "'number' bust been an int and >= 0" lowerCAmelCase__ : List[Any] = 0 # prime factorization of 'number' lowerCAmelCase__ : List[str] = prime_factorization(UpperCamelCase ) lowerCAmelCase__ : Optional[int] = min(UpperCamelCase ) # precondition assert isinstance(UpperCamelCase , UpperCamelCase ), "'ans' must been from type int" return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ), "'number' must been an int" assert isinstance(number % 2 == 0 , UpperCamelCase ), "compare bust been from type bool" return number % 2 == 0 def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ), "'number' must been an int" assert isinstance(number % 2 != 0 , UpperCamelCase ), "compare bust been from type bool" return number % 2 != 0 def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert ( isinstance(UpperCamelCase , UpperCamelCase ) and (number > 2) and is_even(UpperCamelCase ) ), "'number' must been an int, even and > 2" lowerCAmelCase__ : Dict = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' lowerCAmelCase__ : Dict = get_prime_numbers(UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = len(UpperCamelCase ) # run variable for while-loops. lowerCAmelCase__ : List[str] = 0 lowerCAmelCase__ : List[Any] = None # exit variable. for break up the loops lowerCAmelCase__ : Any = True while i < len_pn and loop: lowerCAmelCase__ : List[Any] = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: lowerCAmelCase__ : Optional[Any] = False ans.append(prime_numbers[i] ) ans.append(prime_numbers[j] ) j += 1 i += 1 # precondition assert ( isinstance(UpperCamelCase , UpperCamelCase ) and (len(UpperCamelCase ) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0] ) and is_prime(ans[1] ) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" assert ( isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase ) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." lowerCAmelCase__ : int = 0 while numbera != 0: lowerCAmelCase__ : Any = numbera % numbera lowerCAmelCase__ : str = numbera lowerCAmelCase__ : List[str] = rest # precondition assert isinstance(UpperCamelCase , UpperCamelCase ) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" assert ( isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase ) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." lowerCAmelCase__ : int = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' lowerCAmelCase__ : int = prime_factorization(UpperCamelCase ) lowerCAmelCase__ : Any = prime_factorization(UpperCamelCase ) elif numbera == 1 or numbera == 1: lowerCAmelCase__ : Optional[Any] = [] lowerCAmelCase__ : Dict = [] lowerCAmelCase__ : List[str] = max(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : Tuple = 0 lowerCAmelCase__ : str = 0 lowerCAmelCase__ : List[Any] = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: lowerCAmelCase__ : int = prime_fac_a.count(UpperCamelCase ) lowerCAmelCase__ : Any = prime_fac_a.count(UpperCamelCase ) for _ in range(max(UpperCamelCase , UpperCamelCase ) ): ans *= n else: lowerCAmelCase__ : Any = prime_fac_a.count(UpperCamelCase ) for _ in range(UpperCamelCase ): ans *= n done.append(UpperCamelCase ) # iterates through primeFac2 for n in prime_fac_a: if n not in done: lowerCAmelCase__ : Optional[int] = prime_fac_a.count(UpperCamelCase ) for _ in range(UpperCamelCase ): ans *= n done.append(UpperCamelCase ) # precondition assert isinstance(UpperCamelCase , UpperCamelCase ) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and (n >= 0), "'number' must been a positive int" lowerCAmelCase__ : Optional[Any] = 0 lowerCAmelCase__ : Tuple = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(UpperCamelCase ): ans += 1 # precondition assert isinstance(UpperCamelCase , UpperCamelCase ) and is_prime( UpperCamelCase ), "'ans' must been a prime number and from type int" return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" assert ( is_prime(UpperCamelCase ) and is_prime(UpperCamelCase ) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" lowerCAmelCase__ : Dict = p_number_a + 1 # jump to the next number lowerCAmelCase__ : List[Any] = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(UpperCamelCase ): number += 1 while number < p_number_a: ans.append(UpperCamelCase ) number += 1 # fetch the next prime number. while not is_prime(UpperCamelCase ): number += 1 # precondition assert ( isinstance(UpperCamelCase , UpperCamelCase ) and ans[0] != p_number_a and ans[len(UpperCamelCase ) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and (n >= 1), "'n' must been int and >= 1" lowerCAmelCase__ : List[Any] = [] # will be returned. for divisor in range(1 , n + 1 ): if n % divisor == 0: ans.append(UpperCamelCase ) # precondition assert ans[0] == 1 and ans[len(UpperCamelCase ) - 1] == n, "Error in function getDivisiors(...)" return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and ( number > 1 ), "'number' must been an int and >= 1" lowerCAmelCase__ : Optional[int] = get_divisors(UpperCamelCase ) # precondition assert ( isinstance(UpperCamelCase , UpperCamelCase ) and (divisors[0] == 1) and (divisors[len(UpperCamelCase ) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1] ) == number def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" assert ( isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase ) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. lowerCAmelCase__ : int = gcd(abs(UpperCamelCase ) , abs(UpperCamelCase ) ) # precondition assert ( isinstance(UpperCamelCase , UpperCamelCase ) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and (n >= 0), "'n' must been a int and >= 0" lowerCAmelCase__ : str = 1 # this will be return. for factor in range(1 , n + 1 ): ans *= factor return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and (n >= 0), "'n' must been an int and >= 0" lowerCAmelCase__ : List[Any] = 0 lowerCAmelCase__ : Any = 1 lowerCAmelCase__ : Optional[Any] = 1 # this will be return for _ in range(n - 1 ): lowerCAmelCase__ : Dict = ans ans += fiba lowerCAmelCase__ : str = tmp return ans
37
1
'''simple docstring''' from manim import * class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' def UpperCAmelCase_ ( self ) -> Tuple: lowerCAmelCase__ : Dict = Rectangle(height=0.5 ,width=0.5 ) lowerCAmelCase__ : Any = Rectangle(height=0.2_5 ,width=0.2_5 ) lowerCAmelCase__ : List[str] = Rectangle(height=0.4_6 ,width=0.4_6 ).set_stroke(width=0 ) lowerCAmelCase__ : Any = [mem.copy() for i in range(6 )] lowerCAmelCase__ : List[str] = [mem.copy() for i in range(6 )] lowerCAmelCase__ : str = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase ,buff=0 ) lowerCAmelCase__ : Optional[Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase ,buff=0 ) lowerCAmelCase__ : List[Any] = VGroup(__UpperCAmelCase ,__UpperCAmelCase ).arrange(__UpperCAmelCase ,buff=0 ) lowerCAmelCase__ : Optional[Any] = Text("""CPU""" ,font_size=24 ) lowerCAmelCase__ : str = Group(__UpperCAmelCase ,__UpperCAmelCase ).arrange(__UpperCAmelCase ,buff=0.5 ,aligned_edge=__UpperCAmelCase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : str = [mem.copy() for i in range(4 )] lowerCAmelCase__ : List[Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase ,buff=0 ) lowerCAmelCase__ : Optional[int] = Text("""GPU""" ,font_size=24 ) lowerCAmelCase__ : Union[str, Any] = Group(__UpperCAmelCase ,__UpperCAmelCase ).arrange(__UpperCAmelCase ,buff=0.5 ,aligned_edge=__UpperCAmelCase ) gpu.move_to([-1, -1, 0] ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : Tuple = [mem.copy() for i in range(6 )] lowerCAmelCase__ : List[str] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase ,buff=0 ) lowerCAmelCase__ : Optional[Any] = Text("""Model""" ,font_size=24 ) lowerCAmelCase__ : Optional[Any] = Group(__UpperCAmelCase ,__UpperCAmelCase ).arrange(__UpperCAmelCase ,buff=0.5 ,aligned_edge=__UpperCAmelCase ) model.move_to([3, -1.0, 0] ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : Any = [] lowerCAmelCase__ : Any = [] lowerCAmelCase__ : List[Any] = [] for i, rect in enumerate(__UpperCAmelCase ): rect.set_stroke(__UpperCAmelCase ) lowerCAmelCase__ : str = Rectangle(height=0.4_6 / 4 ,width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(__UpperCAmelCase ,opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) ,buff=0.0_2 ,direction=__UpperCAmelCase ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(model_cpu_arr[0] ,direction=__UpperCAmelCase ,buff=0.0 ) else: cpu_target.next_to(model_cpu_arr[i - 1] ,direction=__UpperCAmelCase ,buff=0.0 ) self.add(__UpperCAmelCase ) model_cpu_arr.append(__UpperCAmelCase ) self.add(*__UpperCAmelCase ,*__UpperCAmelCase ,*__UpperCAmelCase ) lowerCAmelCase__ : Dict = [mem.copy() for i in range(6 )] lowerCAmelCase__ : Any = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase ,buff=0 ) lowerCAmelCase__ : int = Text("""Loaded Checkpoint""" ,font_size=24 ) lowerCAmelCase__ : str = Group(__UpperCAmelCase ,__UpperCAmelCase ).arrange(__UpperCAmelCase ,buff=0.5 ,aligned_edge=__UpperCAmelCase ) checkpoint.move_to([3, 0.5, 0] ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : List[str] = [] lowerCAmelCase__ : List[str] = [] for i, rect in enumerate(__UpperCAmelCase ): lowerCAmelCase__ : List[Any] = fill.copy().set_fill(__UpperCAmelCase ,opacity=0.7 ) target.move_to(__UpperCAmelCase ) ckpt_arr.append(__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = target.copy() if i < 5: cpu_target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.move_to(cpu_right_col_base[i - 5] ) ckpt_cpu_arr.append(__UpperCAmelCase ) self.add(*__UpperCAmelCase ,*__UpperCAmelCase ) lowerCAmelCase__ : str = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) lowerCAmelCase__ : Any = MarkupText( F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" ,font_size=18 ,) key_text.move_to([-5, 2.4, 0] ) self.add(__UpperCAmelCase ,__UpperCAmelCase ) lowerCAmelCase__ : int = MarkupText( F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" ,font_size=18 ,) blue_text.next_to(__UpperCAmelCase ,DOWN * 2.4 ,aligned_edge=key_text.get_left() ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : str = MarkupText( F"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" ,font_size=24 ,) step_a.move_to([2, 2, 0] ) lowerCAmelCase__ : List[Any] = [meta_mem.copy() for i in range(6 )] lowerCAmelCase__ : str = [meta_mem.copy() for i in range(6 )] lowerCAmelCase__ : Union[str, Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase ,buff=0 ) lowerCAmelCase__ : Dict = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase ,buff=0 ) lowerCAmelCase__ : Union[str, Any] = VGroup(__UpperCAmelCase ,__UpperCAmelCase ).arrange(__UpperCAmelCase ,buff=0 ) lowerCAmelCase__ : Tuple = Text("""Disk""" ,font_size=24 ) lowerCAmelCase__ : Optional[Any] = Group(__UpperCAmelCase ,__UpperCAmelCase ).arrange(__UpperCAmelCase ,buff=0.5 ,aligned_edge=__UpperCAmelCase ) disk.move_to([-4.0, -1.2_5, 0] ) self.play(Write(__UpperCAmelCase ,run_time=3 ) ,Write(__UpperCAmelCase ,run_time=1 ) ,Create(__UpperCAmelCase ,run_time=1 ) ) lowerCAmelCase__ : List[Any] = [] for i, rect in enumerate(__UpperCAmelCase ): lowerCAmelCase__ : Optional[int] = rect.copy() target.generate_target() target.target.move_to(disk_left_col_base[i] ).scale(0.5 ) animations.append(MoveToTarget(__UpperCAmelCase ,run_time=1.5 ) ) self.play(*__UpperCAmelCase ) self.play(FadeOut(__UpperCAmelCase ) ) lowerCAmelCase__ : Tuple = MarkupText(F"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" ,font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(__UpperCAmelCase ,run_time=3 ) ) self.play( FadeOut(__UpperCAmelCase ,__UpperCAmelCase ,*__UpperCAmelCase ,*__UpperCAmelCase ) ,) self.wait()
37
'''simple docstring''' from sklearn.metrics import fa_score, matthews_corrcoef import datasets from .record_evaluation import evaluate as evaluate_record _lowerCAmelCase = '''\ @article{wang2019superglue, title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems}, author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R}, journal={arXiv preprint arXiv:1905.00537}, year={2019} } ''' _lowerCAmelCase = '''\ SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after GLUE with a new set of more difficult language understanding tasks, improved resources, and a new public leaderboard. ''' _lowerCAmelCase = ''' Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset. Args: predictions: list of predictions to score. Depending on the SuperGlUE subset: - for \'record\': list of question-answer dictionaries with the following keys: - \'idx\': index of the question as specified by the dataset - \'prediction_text\': the predicted answer text - for \'multirc\': list of question-answer dictionaries with the following keys: - \'idx\': index of the question-answer pair as specified by the dataset - \'prediction\': the predicted answer label - otherwise: list of predicted labels references: list of reference labels. Depending on the SuperGLUE subset: - for \'record\': list of question-answers dictionaries with the following keys: - \'idx\': index of the question as specified by the dataset - \'answers\': list of possible answers - otherwise: list of reference labels Returns: depending on the SuperGLUE subset: - for \'record\': - \'exact_match\': Exact match between answer and gold answer - \'f1\': F1 score - for \'multirc\': - \'exact_match\': Exact match between answer and gold answer - \'f1_m\': Per-question macro-F1 score - \'f1_a\': Average F1 score over all answers - for \'axb\': \'matthews_correlation\': Matthew Correlation - for \'cb\': - \'accuracy\': Accuracy - \'f1\': F1 score - for all others: - \'accuracy\': Accuracy Examples: >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"] >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\') >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0, \'f1\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\') >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}] >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'exact_match\': 1.0, \'f1\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\') >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\') >>> references = [0, 1] >>> predictions = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'matthews_correlation\': 1.0} ''' def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" return float((preds == labels).mean() ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase="binary" ): """simple docstring""" lowerCAmelCase__ : Any = simple_accuracy(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : Tuple = float(fa_score(y_true=UpperCamelCase , y_pred=UpperCamelCase , average=UpperCamelCase ) ) return { "accuracy": acc, "f1": fa, } def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : List[str] = {} for id_pred, label in zip(UpperCamelCase , UpperCamelCase ): lowerCAmelCase__ : str = f"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}""" lowerCAmelCase__ : Dict = id_pred["""prediction"""] if question_id in question_map: question_map[question_id].append((pred, label) ) else: lowerCAmelCase__ : Optional[int] = [(pred, label)] lowerCAmelCase__ , lowerCAmelCase__ : int = [], [] for question, preds_labels in question_map.items(): lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = zip(*UpperCamelCase ) lowerCAmelCase__ : List[Any] = fa_score(y_true=UpperCamelCase , y_pred=UpperCamelCase , average="""macro""" ) fas.append(UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = int(sum(pred == label for pred, label in preds_labels ) == len(UpperCamelCase ) ) ems.append(UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = float(sum(UpperCamelCase ) / len(UpperCamelCase ) ) lowerCAmelCase__ : List[Any] = sum(UpperCamelCase ) / len(UpperCamelCase ) lowerCAmelCase__ : Dict = float(fa_score(y_true=UpperCamelCase , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) ) return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a} @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_( datasets.Metric ): '''simple docstring''' def UpperCAmelCase_ ( self ) -> Optional[Any]: if self.config_name not in [ "boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg", ]: raise KeyError( """You should supply a configuration name selected in """ """[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" ) return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(self._get_feature_types() ) ,codebase_urls=[] ,reference_urls=[] ,format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None ,) def UpperCAmelCase_ ( self ) -> str: if self.config_name == "record": return { "predictions": { "idx": { "passage": datasets.Value("""int64""" ), "query": datasets.Value("""int64""" ), }, "prediction_text": datasets.Value("""string""" ), }, "references": { "idx": { "passage": datasets.Value("""int64""" ), "query": datasets.Value("""int64""" ), }, "answers": datasets.Sequence(datasets.Value("""string""" ) ), }, } elif self.config_name == "multirc": return { "predictions": { "idx": { "answer": datasets.Value("""int64""" ), "paragraph": datasets.Value("""int64""" ), "question": datasets.Value("""int64""" ), }, "prediction": datasets.Value("""int64""" ), }, "references": datasets.Value("""int64""" ), } else: return { "predictions": datasets.Value("""int64""" ), "references": datasets.Value("""int64""" ), } def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Any: if self.config_name == "axb": return {"matthews_correlation": matthews_corrcoef(__UpperCAmelCase ,__UpperCAmelCase )} elif self.config_name == "cb": return acc_and_fa(__UpperCAmelCase ,__UpperCAmelCase ,fa_avg="""macro""" ) elif self.config_name == "record": lowerCAmelCase__ : Optional[Any] = [ { """qas""": [ {"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]} for ref in references ] } ] lowerCAmelCase__ : Union[str, Any] = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions} return evaluate_record(__UpperCAmelCase ,__UpperCAmelCase )[0] elif self.config_name == "multirc": return evaluate_multirc(__UpperCAmelCase ,__UpperCAmelCase ) elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]: return {"accuracy": simple_accuracy(__UpperCAmelCase ,__UpperCAmelCase )} else: raise KeyError( """You should supply a configuration name selected in """ """[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
37
1
'''simple docstring''' import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class lowerCAmelCase_: '''simple docstring''' def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=2 ,__UpperCAmelCase=True ,__UpperCAmelCase=False ,__UpperCAmelCase=10 ,__UpperCAmelCase=3 ,__UpperCAmelCase=32 * 4 ,__UpperCAmelCase=32 * 6 ,__UpperCAmelCase=4 ,__UpperCAmelCase=32 ,) -> str: lowerCAmelCase__ : Optional[int] = parent lowerCAmelCase__ : Optional[int] = batch_size lowerCAmelCase__ : Optional[int] = is_training lowerCAmelCase__ : Dict = use_auxiliary_loss lowerCAmelCase__ : Union[str, Any] = num_queries lowerCAmelCase__ : str = num_channels lowerCAmelCase__ : List[str] = min_size lowerCAmelCase__ : int = max_size lowerCAmelCase__ : Optional[Any] = num_labels lowerCAmelCase__ : List[Any] = mask_feature_size def UpperCAmelCase_ ( self ) -> Tuple: lowerCAmelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( __UpperCAmelCase ) lowerCAmelCase__ : str = torch.ones([self.batch_size, self.min_size, self.max_size] ,device=__UpperCAmelCase ) lowerCAmelCase__ : Any = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] ,device=__UpperCAmelCase ) > 0.5 ).float() lowerCAmelCase__ : Optional[int] = (torch.rand((self.batch_size, self.num_labels) ,device=__UpperCAmelCase ) > 0.5).long() lowerCAmelCase__ : Any = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def UpperCAmelCase_ ( self ) -> Dict: return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] ,) ,decoder_config=DetrConfig( decoder_ffn_dim=128 ,num_queries=self.num_queries ,decoder_attention_heads=2 ,d_model=self.mask_feature_size ,) ,mask_feature_size=self.mask_feature_size ,fpn_feature_size=self.mask_feature_size ,num_channels=self.num_channels ,num_labels=self.num_labels ,) def UpperCAmelCase_ ( self ) -> Optional[int]: lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.prepare_config_and_inputs() lowerCAmelCase__ : List[str] = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask} return config, inputs_dict def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Any: lowerCAmelCase__ : Optional[int] = output.encoder_hidden_states lowerCAmelCase__ : Optional[int] = output.pixel_decoder_hidden_states lowerCAmelCase__ : Dict = output.transformer_decoder_hidden_states self.parent.assertTrue(len(__UpperCAmelCase ) ,len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__UpperCAmelCase ) ,len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__UpperCAmelCase ) ,config.decoder_config.decoder_layers ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=False ) -> Optional[Any]: with torch.no_grad(): lowerCAmelCase__ : int = MaskFormerModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : str = model(pixel_values=__UpperCAmelCase ,pixel_mask=__UpperCAmelCase ) lowerCAmelCase__ : int = model(__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape ,(self.batch_size, self.num_queries, self.mask_feature_size) ,) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(__UpperCAmelCase ,__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[int]: lowerCAmelCase__ : Dict = MaskFormerForInstanceSegmentation(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() def comm_check_on_output(__UpperCAmelCase ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape ,(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) ,) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape ,(self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): lowerCAmelCase__ : List[Any] = model(pixel_values=__UpperCAmelCase ,pixel_mask=__UpperCAmelCase ) lowerCAmelCase__ : Dict = model(__UpperCAmelCase ) comm_check_on_output(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = model( pixel_values=__UpperCAmelCase ,pixel_mask=__UpperCAmelCase ,mask_labels=__UpperCAmelCase ,class_labels=__UpperCAmelCase ) comm_check_on_output(__UpperCAmelCase ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape ,torch.Size([1] ) ) @require_torch class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): '''simple docstring''' __lowercase : Optional[Any] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () __lowercase : int = ( {'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) __lowercase : Union[str, Any] = False __lowercase : Dict = False __lowercase : Tuple = False __lowercase : List[Any] = False def UpperCAmelCase_ ( self ) -> Optional[int]: lowerCAmelCase__ : str = MaskFormerModelTester(self ) lowerCAmelCase__ : List[Any] = ConfigTester(self ,config_class=__UpperCAmelCase ,has_text_modality=__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> List[str]: self.config_tester.run_common_tests() def UpperCAmelCase_ ( self ) -> Union[str, Any]: lowerCAmelCase__ , lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__UpperCAmelCase ,**__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> Optional[int]: lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__UpperCAmelCase ) @unittest.skip(reason="""MaskFormer does not use inputs_embeds""" ) def UpperCAmelCase_ ( self ) -> List[Any]: pass @unittest.skip(reason="""MaskFormer does not have a get_input_embeddings method""" ) def UpperCAmelCase_ ( self ) -> str: pass @unittest.skip(reason="""MaskFormer is not a generative model""" ) def UpperCAmelCase_ ( self ) -> Any: pass @unittest.skip(reason="""MaskFormer does not use token embeddings""" ) def UpperCAmelCase_ ( self ) -> List[str]: pass @require_torch_multi_gpu @unittest.skip( reason="""MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def UpperCAmelCase_ ( self ) -> List[str]: pass def UpperCAmelCase_ ( self ) -> Tuple: lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ : str = model_class(__UpperCAmelCase ) lowerCAmelCase__ : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase__ : Dict = [*signature.parameters.keys()] lowerCAmelCase__ : Tuple = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,__UpperCAmelCase ) @slow def UpperCAmelCase_ ( self ) -> Union[str, Any]: for model_name in ["facebook/maskformer-swin-small-coco"]: lowerCAmelCase__ : List[str] = MaskFormerModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> str: lowerCAmelCase__ : List[Any] = (self.model_tester.min_size,) * 2 lowerCAmelCase__ : Any = { """pixel_values""": torch.randn((2, 3, *size) ,device=__UpperCAmelCase ), """mask_labels""": torch.randn((2, 10, *size) ,device=__UpperCAmelCase ), """class_labels""": torch.zeros(2 ,10 ,device=__UpperCAmelCase ).long(), } lowerCAmelCase__ : Tuple = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = model(**__UpperCAmelCase ) self.assertTrue(outputs.loss is not None ) def UpperCAmelCase_ ( self ) -> str: lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__UpperCAmelCase ,**__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> Tuple: lowerCAmelCase__ , lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ : int = model_class(__UpperCAmelCase ).to(__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = model(**__UpperCAmelCase ,output_attentions=__UpperCAmelCase ) self.assertTrue(outputs.attentions is not None ) def UpperCAmelCase_ ( self ) -> int: if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss lowerCAmelCase__ : Dict = self.all_model_classes[1] lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() lowerCAmelCase__ : List[Any] = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.train() lowerCAmelCase__ : List[str] = model(__UpperCAmelCase ,mask_labels=__UpperCAmelCase ,class_labels=__UpperCAmelCase ).loss loss.backward() def UpperCAmelCase_ ( self ) -> List[str]: # only MaskFormerForInstanceSegmentation has the loss lowerCAmelCase__ : Tuple = self.all_model_classes[1] lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() lowerCAmelCase__ : Union[str, Any] = True lowerCAmelCase__ : Tuple = True lowerCAmelCase__ : Optional[Any] = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.train() lowerCAmelCase__ : Dict = model(__UpperCAmelCase ,mask_labels=__UpperCAmelCase ,class_labels=__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() lowerCAmelCase__ : str = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't lowerCAmelCase__ : Union[str, Any] = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() lowerCAmelCase__ : List[Any] = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=__UpperCAmelCase ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) _lowerCAmelCase = 1e-4 def _SCREAMING_SNAKE_CASE ( ): """simple docstring""" lowerCAmelCase__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_vision @slow class lowerCAmelCase_( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCAmelCase_ ( self ) -> List[Any]: return ( MaskFormerImageProcessor.from_pretrained("""facebook/maskformer-swin-small-coco""" ) if is_vision_available() else None ) def UpperCAmelCase_ ( self ) -> Any: lowerCAmelCase__ : Any = MaskFormerModel.from_pretrained("""facebook/maskformer-swin-small-coco""" ).to(__UpperCAmelCase ) lowerCAmelCase__ : str = self.default_image_processor lowerCAmelCase__ : str = prepare_img() lowerCAmelCase__ : Optional[int] = image_processor(__UpperCAmelCase ,return_tensors="""pt""" ).to(__UpperCAmelCase ) lowerCAmelCase__ : Dict = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__UpperCAmelCase ,(1, 3, 800, 1088) ) with torch.no_grad(): lowerCAmelCase__ : Union[str, Any] = model(**__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = torch.tensor( [[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(__UpperCAmelCase ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) ) lowerCAmelCase__ : Dict = torch.tensor( [[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(__UpperCAmelCase ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) ) lowerCAmelCase__ : Optional[int] = torch.tensor( [[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(__UpperCAmelCase ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) ) def UpperCAmelCase_ ( self ) -> Optional[Any]: lowerCAmelCase__ : List[Any] = ( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" ) .to(__UpperCAmelCase ) .eval() ) lowerCAmelCase__ : Optional[Any] = self.default_image_processor lowerCAmelCase__ : List[str] = prepare_img() lowerCAmelCase__ : str = image_processor(__UpperCAmelCase ,return_tensors="""pt""" ).to(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__UpperCAmelCase ,(1, 3, 800, 1088) ) with torch.no_grad(): lowerCAmelCase__ : List[Any] = model(**__UpperCAmelCase ) # masks_queries_logits lowerCAmelCase__ : Optional[int] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape ,(1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ,) lowerCAmelCase__ : Optional[int] = [ [-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3], [-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5], [-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2], ] lowerCAmelCase__ : Optional[int] = torch.tensor(__UpperCAmelCase ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) ) # class_queries_logits lowerCAmelCase__ : Tuple = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape ,(1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) lowerCAmelCase__ : Union[str, Any] = torch.tensor( [ [1.65_12E00, -5.25_72E00, -3.35_19E00], [3.61_69E-02, -5.90_25E00, -2.93_13E00], [1.07_66E-04, -7.76_30E00, -5.12_63E00], ] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) ) def UpperCAmelCase_ ( self ) -> str: lowerCAmelCase__ : List[Any] = ( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-resnet101-coco-stuff""" ) .to(__UpperCAmelCase ) .eval() ) lowerCAmelCase__ : Optional[Any] = self.default_image_processor lowerCAmelCase__ : int = prepare_img() lowerCAmelCase__ : Optional[Any] = image_processor(__UpperCAmelCase ,return_tensors="""pt""" ).to(__UpperCAmelCase ) lowerCAmelCase__ : str = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__UpperCAmelCase ,(1, 3, 800, 1088) ) with torch.no_grad(): lowerCAmelCase__ : str = model(**__UpperCAmelCase ) # masks_queries_logits lowerCAmelCase__ : Optional[Any] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape ,(1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ,) lowerCAmelCase__ : int = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]] lowerCAmelCase__ : List[str] = torch.tensor(__UpperCAmelCase ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) ) # class_queries_logits lowerCAmelCase__ : Optional[Any] = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape ,(1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) lowerCAmelCase__ : Tuple = torch.tensor( [[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) ) def UpperCAmelCase_ ( self ) -> Optional[Any]: lowerCAmelCase__ : str = ( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" ) .to(__UpperCAmelCase ) .eval() ) lowerCAmelCase__ : Dict = self.default_image_processor lowerCAmelCase__ : Union[str, Any] = image_processor( [np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] ,segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] ,return_tensors="""pt""" ,) lowerCAmelCase__ : Tuple = inputs["""pixel_values"""].to(__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = [el.to(__UpperCAmelCase ) for el in inputs["""mask_labels"""]] lowerCAmelCase__ : Union[str, Any] = [el.to(__UpperCAmelCase ) for el in inputs["""class_labels"""]] with torch.no_grad(): lowerCAmelCase__ : Any = model(**__UpperCAmelCase ) self.assertTrue(outputs.loss is not None )
37
'''simple docstring''' import unittest from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryDirectory from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline from transformers.convert_graph_to_onnx import ( convert, ensure_valid_input, generate_identified_filename, infer_shapes, quantize, ) from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow class lowerCAmelCase_: '''simple docstring''' def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[Any]: return None class lowerCAmelCase_: '''simple docstring''' def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Tuple: return None class lowerCAmelCase_( unittest.TestCase ): '''simple docstring''' __lowercase : Dict = [ # (model_name, model_kwargs) ('''bert-base-cased''', {}), ('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore ] @require_tf @slow def UpperCAmelCase_ ( self ) -> int: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__UpperCAmelCase ,"""tf""" ,12 ,**__UpperCAmelCase ) @require_torch @slow def UpperCAmelCase_ ( self ) -> Union[str, Any]: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__UpperCAmelCase ,"""pt""" ,12 ,**__UpperCAmelCase ) @require_torch @slow def UpperCAmelCase_ ( self ) -> Any: from transformers import BertModel lowerCAmelCase__ : Optional[int] = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""] with NamedTemporaryFile(mode="""w+t""" ) as vocab_file: vocab_file.write("""\n""".join(__UpperCAmelCase ) ) vocab_file.flush() lowerCAmelCase__ : Dict = BertTokenizerFast(vocab_file.name ) with TemporaryDirectory() as bert_save_dir: lowerCAmelCase__ : Tuple = BertModel(BertConfig(vocab_size=len(__UpperCAmelCase ) ) ) model.save_pretrained(__UpperCAmelCase ) self._test_export(__UpperCAmelCase ,"""pt""" ,12 ,__UpperCAmelCase ) @require_tf @slow def UpperCAmelCase_ ( self ) -> List[str]: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: lowerCAmelCase__ : Dict = self._test_export(__UpperCAmelCase ,"""tf""" ,12 ,**__UpperCAmelCase ) lowerCAmelCase__ : List[str] = quantize(Path(__UpperCAmelCase ) ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__UpperCAmelCase ).stat().st_size: self.fail("""Quantized model is bigger than initial ONNX model""" ) @require_torch @slow def UpperCAmelCase_ ( self ) -> List[Any]: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: lowerCAmelCase__ : Any = self._test_export(__UpperCAmelCase ,"""pt""" ,12 ,**__UpperCAmelCase ) lowerCAmelCase__ : Dict = quantize(__UpperCAmelCase ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__UpperCAmelCase ).stat().st_size: self.fail("""Quantized model is bigger than initial ONNX model""" ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=None ,**__UpperCAmelCase ) -> Optional[Any]: try: # Compute path with TemporaryDirectory() as tempdir: lowerCAmelCase__ : Optional[int] = Path(__UpperCAmelCase ).joinpath("""model.onnx""" ) # Remove folder if exists if path.parent.exists(): path.parent.rmdir() # Export convert(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,**__UpperCAmelCase ) return path except Exception as e: self.fail(__UpperCAmelCase ) @require_torch @require_tokenizers @slow def UpperCAmelCase_ ( self ) -> Union[str, Any]: from transformers import BertModel lowerCAmelCase__ : List[Any] = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) ) lowerCAmelCase__ : Union[str, Any] = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" ) self._test_infer_dynamic_axis(__UpperCAmelCase ,__UpperCAmelCase ,"""pt""" ) @require_tf @require_tokenizers @slow def UpperCAmelCase_ ( self ) -> Optional[int]: from transformers import TFBertModel lowerCAmelCase__ : int = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) ) lowerCAmelCase__ : Optional[int] = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" ) self._test_infer_dynamic_axis(__UpperCAmelCase ,__UpperCAmelCase ,"""tf""" ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Tuple: lowerCAmelCase__ : Any = FeatureExtractionPipeline(__UpperCAmelCase ,__UpperCAmelCase ) lowerCAmelCase__ : List[str] = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""] lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = infer_shapes(__UpperCAmelCase ,__UpperCAmelCase ) # Assert all variables are present self.assertEqual(len(__UpperCAmelCase ) ,len(__UpperCAmelCase ) ) self.assertTrue(all(var_name in shapes for var_name in variable_names ) ) self.assertSequenceEqual(variable_names[:3] ,__UpperCAmelCase ) self.assertSequenceEqual(variable_names[3:] ,__UpperCAmelCase ) # Assert inputs are {0: batch, 1: sequence} for var_name in ["input_ids", "token_type_ids", "attention_mask"]: self.assertDictEqual(shapes[var_name] ,{0: """batch""", 1: """sequence"""} ) # Assert outputs are {0: batch, 1: sequence} and {0: batch} self.assertDictEqual(shapes["""output_0"""] ,{0: """batch""", 1: """sequence"""} ) self.assertDictEqual(shapes["""output_1"""] ,{0: """batch"""} ) def UpperCAmelCase_ ( self ) -> Optional[int]: lowerCAmelCase__ : List[str] = ["""input_ids""", """attention_mask""", """token_type_ids"""] lowerCAmelCase__ : Union[str, Any] = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]} lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = ensure_valid_input(FuncContiguousArgs() ,__UpperCAmelCase ,__UpperCAmelCase ) # Should have exactly the same number of args (all are valid) self.assertEqual(len(__UpperCAmelCase ) ,3 ) # Should have exactly the same input names self.assertEqual(set(__UpperCAmelCase ) ,set(__UpperCAmelCase ) ) # Parameter should be reordered according to their respective place in the function: # (input_ids, token_type_ids, attention_mask) self.assertEqual(__UpperCAmelCase ,(tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) ) # Generated args are interleaved with another args (for instance parameter "past" in GPT2) lowerCAmelCase__ , lowerCAmelCase__ : int = ensure_valid_input(FuncNonContiguousArgs() ,__UpperCAmelCase ,__UpperCAmelCase ) # Should have exactly the one arg (all before the one not provided "some_other_args") self.assertEqual(len(__UpperCAmelCase ) ,1 ) self.assertEqual(len(__UpperCAmelCase ) ,1 ) # Should have only "input_ids" self.assertEqual(inputs_args[0] ,tokens["""input_ids"""] ) self.assertEqual(ordered_input_names[0] ,"""input_ids""" ) def UpperCAmelCase_ ( self ) -> Tuple: lowerCAmelCase__ : Dict = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) ,"""-test""" ) self.assertEqual("""/home/something/my_fake_model-test.onnx""" ,generated.as_posix() )
37
1
'''simple docstring''' import collections import inspect import unittest from transformers import SwinvaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCAmelCase_: '''simple docstring''' def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=13 ,__UpperCAmelCase=32 ,__UpperCAmelCase=2 ,__UpperCAmelCase=3 ,__UpperCAmelCase=16 ,__UpperCAmelCase=[1, 2, 1] ,__UpperCAmelCase=[2, 2, 4] ,__UpperCAmelCase=2 ,__UpperCAmelCase=2.0 ,__UpperCAmelCase=True ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=False ,__UpperCAmelCase=True ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=1E-5 ,__UpperCAmelCase=True ,__UpperCAmelCase=None ,__UpperCAmelCase=True ,__UpperCAmelCase=10 ,__UpperCAmelCase=8 ,) -> List[Any]: lowerCAmelCase__ : Any = parent lowerCAmelCase__ : Any = batch_size lowerCAmelCase__ : Union[str, Any] = image_size lowerCAmelCase__ : int = patch_size lowerCAmelCase__ : Dict = num_channels lowerCAmelCase__ : Optional[int] = embed_dim lowerCAmelCase__ : List[str] = depths lowerCAmelCase__ : Union[str, Any] = num_heads lowerCAmelCase__ : str = window_size lowerCAmelCase__ : Union[str, Any] = mlp_ratio lowerCAmelCase__ : Dict = qkv_bias lowerCAmelCase__ : Union[str, Any] = hidden_dropout_prob lowerCAmelCase__ : Optional[Any] = attention_probs_dropout_prob lowerCAmelCase__ : Tuple = drop_path_rate lowerCAmelCase__ : Tuple = hidden_act lowerCAmelCase__ : Dict = use_absolute_embeddings lowerCAmelCase__ : List[str] = patch_norm lowerCAmelCase__ : Optional[int] = layer_norm_eps lowerCAmelCase__ : str = initializer_range lowerCAmelCase__ : str = is_training lowerCAmelCase__ : Tuple = scope lowerCAmelCase__ : Optional[Any] = use_labels lowerCAmelCase__ : Dict = type_sequence_label_size lowerCAmelCase__ : Optional[Any] = encoder_stride def UpperCAmelCase_ ( self ) -> List[Any]: lowerCAmelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase__ : Dict = None if self.use_labels: lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) lowerCAmelCase__ : List[str] = self.get_config() return config, pixel_values, labels def UpperCAmelCase_ ( self ) -> int: return SwinvaConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> List[Any]: lowerCAmelCase__ : List[str] = SwinvaModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : int = model(__UpperCAmelCase ) lowerCAmelCase__ : int = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) lowerCAmelCase__ : Tuple = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> List[str]: lowerCAmelCase__ : int = SwinvaForMaskedImageModeling(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : List[str] = model(__UpperCAmelCase ) self.parent.assertEqual( result.logits.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowerCAmelCase__ : Optional[Any] = 1 lowerCAmelCase__ : int = SwinvaForMaskedImageModeling(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCAmelCase__ : List[str] = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, 1, self.image_size, self.image_size) ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Union[str, Any]: lowerCAmelCase__ : List[str] = self.type_sequence_label_size lowerCAmelCase__ : Dict = SwinvaForImageClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Tuple = model(__UpperCAmelCase ,labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) def UpperCAmelCase_ ( self ) -> Optional[int]: lowerCAmelCase__ : Union[str, Any] = self.prepare_config_and_inputs() lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = config_and_inputs lowerCAmelCase__ : Any = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): '''simple docstring''' __lowercase : int = ( (SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else () ) __lowercase : List[str] = ( {'''feature-extraction''': SwinvaModel, '''image-classification''': SwinvaForImageClassification} if is_torch_available() else {} ) __lowercase : str = False __lowercase : List[str] = False __lowercase : Any = False __lowercase : Dict = False def UpperCAmelCase_ ( self ) -> Optional[int]: lowerCAmelCase__ : Dict = SwinvaModelTester(self ) lowerCAmelCase__ : List[str] = ConfigTester(self ,config_class=__UpperCAmelCase ,embed_dim=37 ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCAmelCase_ ( self ) -> str: lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) @unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" ) def UpperCAmelCase_ ( self ) -> int: pass @unittest.skip(reason="""Swinv2 does not use inputs_embeds""" ) def UpperCAmelCase_ ( self ) -> int: pass def UpperCAmelCase_ ( self ) -> Any: lowerCAmelCase__ , lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ : Optional[int] = model_class(__UpperCAmelCase ) self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) ) lowerCAmelCase__ : List[str] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__UpperCAmelCase ,nn.Linear ) ) def UpperCAmelCase_ ( self ) -> str: lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ : Optional[Any] = model_class(__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase__ : Optional[Any] = [*signature.parameters.keys()] lowerCAmelCase__ : List[str] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> int: lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase__ : Any = True for model_class in self.all_model_classes: lowerCAmelCase__ : int = True lowerCAmelCase__ : Any = False lowerCAmelCase__ : List[Any] = True lowerCAmelCase__ : Optional[Any] = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): lowerCAmelCase__ : Tuple = model(**self._prepare_for_class(__UpperCAmelCase ,__UpperCAmelCase ) ) lowerCAmelCase__ : List[str] = outputs.attentions lowerCAmelCase__ : List[Any] = len(self.model_tester.depths ) self.assertEqual(len(__UpperCAmelCase ) ,__UpperCAmelCase ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowerCAmelCase__ : int = True lowerCAmelCase__ : Dict = config.window_size**2 lowerCAmelCase__ : Optional[Any] = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): lowerCAmelCase__ : Optional[Any] = model(**self._prepare_for_class(__UpperCAmelCase ,__UpperCAmelCase ) ) lowerCAmelCase__ : List[str] = outputs.attentions self.assertEqual(len(__UpperCAmelCase ) ,__UpperCAmelCase ) self.assertListEqual( list(attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,) lowerCAmelCase__ : List[str] = len(__UpperCAmelCase ) # Check attention is always last and order is fine lowerCAmelCase__ : Optional[int] = True lowerCAmelCase__ : Union[str, Any] = True lowerCAmelCase__ : Tuple = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): lowerCAmelCase__ : Any = model(**self._prepare_for_class(__UpperCAmelCase ,__UpperCAmelCase ) ) if hasattr(self.model_tester ,"""num_hidden_states_types""" ): lowerCAmelCase__ : int = self.model_tester.num_hidden_states_types else: # also another +1 for reshaped_hidden_states lowerCAmelCase__ : Optional[int] = 2 self.assertEqual(out_len + added_hidden_states ,len(__UpperCAmelCase ) ) lowerCAmelCase__ : Any = outputs.attentions self.assertEqual(len(__UpperCAmelCase ) ,__UpperCAmelCase ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Tuple: lowerCAmelCase__ : Union[str, Any] = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): lowerCAmelCase__ : List[Any] = model(**self._prepare_for_class(__UpperCAmelCase ,__UpperCAmelCase ) ) lowerCAmelCase__ : Optional[int] = outputs.hidden_states lowerCAmelCase__ : str = getattr( self.model_tester ,"""expected_num_hidden_layers""" ,len(self.model_tester.depths ) + 1 ) self.assertEqual(len(__UpperCAmelCase ) ,__UpperCAmelCase ) # Swinv2 has a different seq_length lowerCAmelCase__ : int = ( config.patch_size if isinstance(config.patch_size ,collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCAmelCase__ : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,) lowerCAmelCase__ : Union[str, Any] = outputs.reshaped_hidden_states self.assertEqual(len(__UpperCAmelCase ) ,__UpperCAmelCase ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : int = reshaped_hidden_states[0].shape lowerCAmelCase__ : int = ( reshaped_hidden_states[0].view(__UpperCAmelCase ,__UpperCAmelCase ,height * width ).permute(0 ,2 ,1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,) def UpperCAmelCase_ ( self ) -> List[str]: lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase__ : Optional[int] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size ,collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: lowerCAmelCase__ : int = True self.check_hidden_states_output(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCAmelCase__ : Tuple = True self.check_hidden_states_output(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase__ : int = 3 lowerCAmelCase__ : List[Any] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size ,collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) lowerCAmelCase__ : int = ( config.patch_size if isinstance(config.patch_size ,collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCAmelCase__ : Union[str, Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) lowerCAmelCase__ : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: lowerCAmelCase__ : Union[str, Any] = True self.check_hidden_states_output(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,(padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCAmelCase__ : int = True self.check_hidden_states_output(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,(padded_height, padded_width) ) def UpperCAmelCase_ ( self ) -> int: lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> Tuple: lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase ) @slow def UpperCAmelCase_ ( self ) -> Any: for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase__ : List[Any] = SwinvaModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> Any: lowerCAmelCase__ , lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase__ : List[str] = _config_zero_init(__UpperCAmelCase ) for model_class in self.all_model_classes: lowerCAmelCase__ : Tuple = model_class(config=__UpperCAmelCase ) for name, param in model.named_parameters(): if "embeddings" not in name and "logit_scale" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() ,[0.0, 1.0] ,msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" ,) @require_vision @require_torch class lowerCAmelCase_( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCAmelCase_ ( self ) -> Any: return ( AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ) if is_vision_available() else None ) @slow def UpperCAmelCase_ ( self ) -> List[str]: lowerCAmelCase__ : int = SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to( __UpperCAmelCase ) lowerCAmelCase__ : Tuple = self.default_image_processor lowerCAmelCase__ : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) lowerCAmelCase__ : Tuple = image_processor(images=__UpperCAmelCase ,return_tensors="""pt""" ).to(__UpperCAmelCase ) # forward pass with torch.no_grad(): lowerCAmelCase__ : Tuple = model(**__UpperCAmelCase ) # verify the logits lowerCAmelCase__ : str = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape ,__UpperCAmelCase ) lowerCAmelCase__ : List[str] = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__UpperCAmelCase ,atol=1E-4 ) )
37
'''simple docstring''' from maths.prime_factors import prime_factors def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" if not isinstance(UpperCamelCase , UpperCamelCase ): lowerCAmelCase__ : int = f"""Input value of [number={number}] must be an integer""" raise TypeError(UpperCamelCase ) if number < 1: raise ValueError("""Input must be a positive integer""" ) return -1 if len(prime_factors(UpperCamelCase ) ) % 2 else 1 if __name__ == "__main__": import doctest doctest.testmod()
37
1
'''simple docstring''' from urllib.parse import quote import pytest from datasets.utils.hub import hf_hub_url @pytest.mark.parametrize("""repo_id""" , ["""canonical_dataset_name""", """org-name/dataset-name"""] ) @pytest.mark.parametrize("""path""" , ["""filename.csv""", """filename with blanks.csv"""] ) @pytest.mark.parametrize("""revision""" , [None, """v2"""] ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Optional[Any] = hf_hub_url(repo_id=UpperCamelCase , path=UpperCamelCase , revision=UpperCamelCase ) assert url == f"""https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(UpperCamelCase )}"""
37
'''simple docstring''' import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = {'''vocab_file''': '''spiece.model'''} _lowerCAmelCase = { '''vocab_file''': { '''AI-Sweden/gpt-sw3-126m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model''', '''AI-Sweden/gpt-sw3-350m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model''', '''AI-Sweden/gpt-sw3-1.6b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model''', '''AI-Sweden/gpt-sw3-6.7b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model''', '''AI-Sweden/gpt-sw3-20b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model''', } } _lowerCAmelCase = { '''AI-Sweden/gpt-sw3-126m''': 2048, '''AI-Sweden/gpt-sw3-350m''': 2048, '''AI-Sweden/gpt-sw3-1.6b''': 2048, '''AI-Sweden/gpt-sw3-6.7b''': 2048, '''AI-Sweden/gpt-sw3-20b''': 2048, } class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Dict = VOCAB_FILES_NAMES __lowercase : str = PRETRAINED_VOCAB_FILES_MAP __lowercase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowercase : Optional[int] = ['''input_ids''', '''attention_mask'''] def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> None: lowerCAmelCase__ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs lowerCAmelCase__ : Dict = kwargs.get("""name_or_path""" ) if name_or_path is None: logger.warning( """name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,""" """ you are testing the model, this can safely be ignored""" ) lowerCAmelCase__ : Tuple = """None""" # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing lowerCAmelCase__ : Union[str, Any] = """<|endoftext|>""" if eos_token is None else eos_token lowerCAmelCase__ : Dict = """<unk>""" if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: lowerCAmelCase__ : Any = unk_token if pad_token is None else pad_token lowerCAmelCase__ : Dict = eos_token if bos_token is None else bos_token else: lowerCAmelCase__ : List[str] = """<pad>""" if pad_token is None else pad_token lowerCAmelCase__ : Optional[int] = """<s>""" if bos_token is None else bos_token super().__init__( do_lower_case=__UpperCAmelCase ,remove_space=__UpperCAmelCase ,keep_accents=__UpperCAmelCase ,bos_token=__UpperCAmelCase ,eos_token=__UpperCAmelCase ,unk_token=__UpperCAmelCase ,pad_token=__UpperCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**__UpperCAmelCase ,) lowerCAmelCase__ : Optional[int] = do_lower_case lowerCAmelCase__ : Dict = remove_space lowerCAmelCase__ : Optional[Any] = keep_accents lowerCAmelCase__ : int = vocab_file lowerCAmelCase__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__UpperCAmelCase ) # Used for whitespace normalization in input texts # fmt : off lowerCAmelCase__ : int = {""" """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """""", """„"""} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing lowerCAmelCase__ : List[str] = re.compile( F"""[{''.join(map(__UpperCAmelCase ,list(range(0 ,9 ) ) + list(range(11 ,32 ) ) + list(range(127 ,160 ) ) + [160, 173, 8203] ) )}]""" ) def __getstate__( self ) -> Any: lowerCAmelCase__ : int = self.__dict__.copy() lowerCAmelCase__ : Optional[int] = None return state def __setstate__( self ,__UpperCAmelCase ) -> List[str]: lowerCAmelCase__ : List[str] = d # for backward compatibility if not hasattr(self ,"""sp_model_kwargs""" ): lowerCAmelCase__ : Tuple = {} lowerCAmelCase__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def UpperCAmelCase_ ( self ) -> int: return len(self.sp_model ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str: lowerCAmelCase__ : Tuple = self.non_printing_characters_re.sub("""""" ,__UpperCAmelCase ) # Normalize whitespaces lowerCAmelCase__ : List[Any] = """""".join([char if char not in self.whitespaces else """ """ for char in text] ) # NFC Unicode normalization lowerCAmelCase__ : List[Any] = unicodedata.normalize("""NFC""" ,__UpperCAmelCase ) return text def UpperCAmelCase_ ( self ,__UpperCAmelCase ,**__UpperCAmelCase ) -> List[str]: lowerCAmelCase__ : List[Any] = self.preprocess_text(__UpperCAmelCase ) return self.sp_model.encode(__UpperCAmelCase ,out_type=__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> int: return self.sp_model.PieceToId(__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str: return self.sp_model.IdToPiece(__UpperCAmelCase ) @staticmethod def UpperCAmelCase_ ( __UpperCAmelCase ) -> str: return out_string def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str: lowerCAmelCase__ : int = [] lowerCAmelCase__ : Optional[int] = """""" lowerCAmelCase__ : Tuple = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__UpperCAmelCase ) + token lowerCAmelCase__ : Union[str, Any] = True lowerCAmelCase__ : Optional[Any] = [] else: current_sub_tokens.append(__UpperCAmelCase ) lowerCAmelCase__ : Any = False out_string += self.sp_model.decode(__UpperCAmelCase ) return out_string def UpperCAmelCase_ ( self ) -> Dict[str, int]: lowerCAmelCase__ : Optional[int] = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(__UpperCAmelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase__ : Optional[int] = os.path.join( __UpperCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,__UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__UpperCAmelCase ,"""wb""" ) as fi: lowerCAmelCase__ : str = self.sp_model.serialized_model_proto() fi.write(__UpperCAmelCase ) return (out_vocab_file,) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]: if isinstance(__UpperCAmelCase ,__UpperCAmelCase ): lowerCAmelCase__ : Tuple = self.preprocess_text(__UpperCAmelCase ) lowerCAmelCase__ : int = self.sp_model.encode(__UpperCAmelCase ) else: lowerCAmelCase__ : int = [self.preprocess_text(__UpperCAmelCase ) for t in text] lowerCAmelCase__ : Any = self.sp_model.encode(__UpperCAmelCase ) if return_tensors is True or return_tensors == "pt": lowerCAmelCase__ : Tuple = torch.tensor(__UpperCAmelCase ) return token_ids def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str: return self.sp_model.decode(__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[int]: lowerCAmelCase__ : List[Any] = [F"""User: {text}""" if is_user else F"""Bot: {text}""" for is_user, text in conversation.iter_texts()] lowerCAmelCase__ : Any = ( F"""{self.eos_token}{self.bos_token}""" + F"""{self.bos_token}""".join(__UpperCAmelCase ) + F"""{self.bos_token}Bot:""" ) return self.encode(text=__UpperCAmelCase )
37
1
'''simple docstring''' import os from collections import deque import torch from torch.utils.data import Dataset class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' def __init__( self ,__UpperCAmelCase="" ,__UpperCAmelCase="train" ) -> Optional[int]: assert os.path.isdir(__UpperCAmelCase ) lowerCAmelCase__ : Any = [] lowerCAmelCase__ : List[Any] = os.listdir(__UpperCAmelCase ) for story_filename in story_filenames_list: if "summary" in story_filename: continue lowerCAmelCase__ : Union[str, Any] = os.path.join(__UpperCAmelCase ,__UpperCAmelCase ) if not os.path.isfile(__UpperCAmelCase ): continue self.documents.append(__UpperCAmelCase ) def __len__( self ) -> Optional[Any]: return len(self.documents ) def __getitem__( self ,__UpperCAmelCase ) -> Tuple: lowerCAmelCase__ : Tuple = self.documents[idx] lowerCAmelCase__ : List[str] = document_path.split("""/""" )[-1] with open(__UpperCAmelCase ,encoding="""utf-8""" ) as source: lowerCAmelCase__ : Tuple = source.read() lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = process_story(__UpperCAmelCase ) return document_name, story_lines, summary_lines def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Optional[int] = list(filter(lambda UpperCamelCase : len(UpperCamelCase ) != 0 , [line.strip() for line in raw_story.split("""\n""" )] ) ) # for some unknown reason some lines miss a period, add it lowerCAmelCase__ : List[Any] = [_add_missing_period(UpperCamelCase ) for line in nonempty_lines] # gather article lines lowerCAmelCase__ : Optional[Any] = [] lowerCAmelCase__ : List[Any] = deque(UpperCamelCase ) while True: try: lowerCAmelCase__ : Union[str, Any] = lines.popleft() if element.startswith("""@highlight""" ): break story_lines.append(UpperCamelCase ) except IndexError: # if "@highlight" is absent from the file we pop # all elements until there is None, raising an exception. return story_lines, [] # gather summary lines lowerCAmelCase__ : List[str] = list(filter(lambda UpperCamelCase : not t.startswith("""@highlight""" ) , UpperCamelCase ) ) return story_lines, summary_lines def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : int = [""".""", """!""", """?""", """...""", """'""", """`""", """\"""", """\u2019""", """\u2019""", """)"""] if line.startswith("""@highlight""" ): return line if line[-1] in END_TOKENS: return line return line + "." def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" if len(UpperCamelCase ) > block_size: return sequence[:block_size] else: sequence.extend([pad_token_id] * (block_size - len(UpperCamelCase )) ) return sequence def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : List[Any] = torch.ones_like(UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = sequence == pad_token_id lowerCAmelCase__ : Dict = 0 return mask def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Tuple = [tokenizer.encode(UpperCamelCase ) for line in story_lines] lowerCAmelCase__ : Tuple = [token for sentence in story_lines_token_ids for token in sentence] lowerCAmelCase__ : Optional[int] = [tokenizer.encode(UpperCamelCase ) for line in summary_lines] lowerCAmelCase__ : List[str] = [token for sentence in summary_lines_token_ids for token in sentence] return story_token_ids, summary_token_ids def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : List[Any] = [] for sequence in batch: lowerCAmelCase__ : Optional[int] = -1 lowerCAmelCase__ : Optional[Any] = [] for s in sequence: if s == separator_token_id: sentence_num += 1 embeddings.append(sentence_num % 2 ) batch_embeddings.append(UpperCamelCase ) return torch.tensor(UpperCamelCase )
37
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class lowerCAmelCase_( unittest.TestCase ): '''simple docstring''' @slow def UpperCAmelCase_ ( self ) -> Union[str, Any]: lowerCAmelCase__ : Tuple = XLMRobertaModel.from_pretrained("""xlm-roberta-base""" ) lowerCAmelCase__ : Optional[int] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] ) # The dog is cute and lives in the garden house lowerCAmelCase__ : str = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim lowerCAmelCase__ : Dict = torch.tensor( [[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): lowerCAmelCase__ : Optional[int] = model(__UpperCAmelCase )["""last_hidden_state"""].detach() self.assertEqual(output.shape ,__UpperCAmelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] ,__UpperCAmelCase ,atol=1E-3 ) ) @slow def UpperCAmelCase_ ( self ) -> int: lowerCAmelCase__ : Union[str, Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-large""" ) lowerCAmelCase__ : Optional[Any] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] ) # The dog is cute and lives in the garden house lowerCAmelCase__ : Dict = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim lowerCAmelCase__ : Union[str, Any] = torch.tensor( [[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase )["""last_hidden_state"""].detach() self.assertEqual(output.shape ,__UpperCAmelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] ,__UpperCAmelCase ,atol=1E-3 ) )
37
1
'''simple docstring''' import numpy as np import torch import tqdm from ...models.unet_ad import UNetaDModel from ...pipelines import DiffusionPipeline from ...utils import randn_tensor from ...utils.dummy_pt_objects import DDPMScheduler class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,) -> Tuple: super().__init__() lowerCAmelCase__ : Union[str, Any] = value_function lowerCAmelCase__ : List[Any] = unet lowerCAmelCase__ : Any = scheduler lowerCAmelCase__ : int = env lowerCAmelCase__ : Union[str, Any] = env.get_dataset() lowerCAmelCase__ : Tuple = {} for key in self.data.keys(): try: lowerCAmelCase__ : int = self.data[key].mean() except: # noqa: E722 pass lowerCAmelCase__ : List[Any] = {} for key in self.data.keys(): try: lowerCAmelCase__ : Any = self.data[key].std() except: # noqa: E722 pass lowerCAmelCase__ : List[str] = env.observation_space.shape[0] lowerCAmelCase__ : List[str] = env.action_space.shape[0] def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Dict: return (x_in - self.means[key]) / self.stds[key] def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> str: return x_in * self.stds[key] + self.means[key] def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[Any]: if type(__UpperCAmelCase ) is dict: return {k: self.to_torch(__UpperCAmelCase ) for k, v in x_in.items()} elif torch.is_tensor(__UpperCAmelCase ): return x_in.to(self.unet.device ) return torch.tensor(__UpperCAmelCase ,device=self.unet.device ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> List[str]: for key, val in cond.items(): lowerCAmelCase__ : List[Any] = val.clone() return x_in def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[int]: lowerCAmelCase__ : Optional[int] = x.shape[0] lowerCAmelCase__ : int = None for i in tqdm.tqdm(self.scheduler.timesteps ): # create batch of timesteps to pass into model lowerCAmelCase__ : Dict = torch.full((batch_size,) ,__UpperCAmelCase ,device=self.unet.device ,dtype=torch.long ) for _ in range(__UpperCAmelCase ): with torch.enable_grad(): x.requires_grad_() # permute to match dimension for pre-trained models lowerCAmelCase__ : str = self.value_function(x.permute(0 ,2 ,1 ) ,__UpperCAmelCase ).sample lowerCAmelCase__ : int = torch.autograd.grad([y.sum()] ,[x] )[0] lowerCAmelCase__ : Tuple = self.scheduler._get_variance(__UpperCAmelCase ) lowerCAmelCase__ : str = torch.exp(0.5 * posterior_variance ) lowerCAmelCase__ : Union[str, Any] = model_std * grad lowerCAmelCase__ : Optional[Any] = 0 lowerCAmelCase__ : Tuple = x.detach() lowerCAmelCase__ : str = x + scale * grad lowerCAmelCase__ : Optional[Any] = self.reset_xa(__UpperCAmelCase ,__UpperCAmelCase ,self.action_dim ) lowerCAmelCase__ : Union[str, Any] = self.unet(x.permute(0 ,2 ,1 ) ,__UpperCAmelCase ).sample.permute(0 ,2 ,1 ) # TODO: verify deprecation of this kwarg lowerCAmelCase__ : Union[str, Any] = self.scheduler.step(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,predict_epsilon=__UpperCAmelCase )["""prev_sample"""] # apply conditions to the trajectory (set the initial state) lowerCAmelCase__ : Dict = self.reset_xa(__UpperCAmelCase ,__UpperCAmelCase ,self.action_dim ) lowerCAmelCase__ : Any = self.to_torch(__UpperCAmelCase ) return x, y def __call__( self ,__UpperCAmelCase ,__UpperCAmelCase=64 ,__UpperCAmelCase=32 ,__UpperCAmelCase=2 ,__UpperCAmelCase=0.1 ) -> Optional[int]: # normalize the observations and create batch dimension lowerCAmelCase__ : int = self.normalize(__UpperCAmelCase ,"""observations""" ) lowerCAmelCase__ : Dict = obs[None].repeat(__UpperCAmelCase ,axis=0 ) lowerCAmelCase__ : Dict = {0: self.to_torch(__UpperCAmelCase )} lowerCAmelCase__ : Union[str, Any] = (batch_size, planning_horizon, self.state_dim + self.action_dim) # generate initial noise and apply our conditions (to make the trajectories start at current state) lowerCAmelCase__ : Any = randn_tensor(__UpperCAmelCase ,device=self.unet.device ) lowerCAmelCase__ : int = self.reset_xa(__UpperCAmelCase ,__UpperCAmelCase ,self.action_dim ) lowerCAmelCase__ : Optional[Any] = self.to_torch(__UpperCAmelCase ) # run the diffusion process lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self.run_diffusion(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) # sort output trajectories by value lowerCAmelCase__ : Optional[Any] = y.argsort(0 ,descending=__UpperCAmelCase ).squeeze() lowerCAmelCase__ : Tuple = x[sorted_idx] lowerCAmelCase__ : Optional[Any] = sorted_values[:, :, : self.action_dim] lowerCAmelCase__ : Any = actions.detach().cpu().numpy() lowerCAmelCase__ : Optional[Any] = self.de_normalize(__UpperCAmelCase ,key="""actions""" ) # select the action with the highest value if y is not None: lowerCAmelCase__ : str = 0 else: # if we didn't run value guiding, select a random action lowerCAmelCase__ : str = np.random.randint(0 ,__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = denorm_actions[selected_index, 0] return denorm_actions
37
'''simple docstring''' from pickle import UnpicklingError import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict from ..utils import logging _lowerCAmelCase = logging.get_logger(__name__) def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" try: with open(UpperCamelCase , """rb""" ) as flax_state_f: lowerCAmelCase__ : Union[str, Any] = from_bytes(UpperCamelCase , flax_state_f.read() ) except UnpicklingError as e: try: with open(UpperCamelCase ) as f: if f.read().startswith("""version""" ): raise OSError( """You seem to have cloned a repository without having git-lfs installed. Please""" """ install git-lfs and run `git lfs install` followed by `git lfs pull` in the""" """ folder you cloned.""" ) else: raise ValueError from e except (UnicodeDecodeError, ValueError): raise EnvironmentError(f"""Unable to convert {model_file} to Flax deserializable object. """ ) return load_flax_weights_in_pytorch_model(UpperCamelCase , UpperCamelCase ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" try: import torch # noqa: F401 except ImportError: logger.error( """Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see""" """ https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation""" """ instructions.""" ) raise # check if we have bf16 weights lowerCAmelCase__ : str = flatten_dict(jax.tree_util.tree_map(lambda UpperCamelCase : x.dtype == jnp.bfloataa , UpperCamelCase ) ).values() if any(UpperCamelCase ): # convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( """Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """ """before loading those in PyTorch model.""" ) lowerCAmelCase__ : Dict = jax.tree_util.tree_map( lambda UpperCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , UpperCamelCase ) lowerCAmelCase__ : Any = """""" lowerCAmelCase__ : Any = flatten_dict(UpperCamelCase , sep=""".""" ) lowerCAmelCase__ : Optional[int] = pt_model.state_dict() # keep track of unexpected & missing keys lowerCAmelCase__ : Optional[Any] = [] lowerCAmelCase__ : int = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): lowerCAmelCase__ : Union[str, Any] = flax_key_tuple.split(""".""" ) if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4: lowerCAmelCase__ : Optional[int] = flax_key_tuple_array[:-1] + ["""weight"""] lowerCAmelCase__ : Any = jnp.transpose(UpperCamelCase , (3, 2, 0, 1) ) elif flax_key_tuple_array[-1] == "kernel": lowerCAmelCase__ : str = flax_key_tuple_array[:-1] + ["""weight"""] lowerCAmelCase__ : Any = flax_tensor.T elif flax_key_tuple_array[-1] == "scale": lowerCAmelCase__ : int = flax_key_tuple_array[:-1] + ["""weight"""] if "time_embedding" not in flax_key_tuple_array: for i, flax_key_tuple_string in enumerate(UpperCamelCase ): lowerCAmelCase__ : List[str] = ( flax_key_tuple_string.replace("""_0""" , """.0""" ) .replace("""_1""" , """.1""" ) .replace("""_2""" , """.2""" ) .replace("""_3""" , """.3""" ) .replace("""_4""" , """.4""" ) .replace("""_5""" , """.5""" ) .replace("""_6""" , """.6""" ) .replace("""_7""" , """.7""" ) .replace("""_8""" , """.8""" ) .replace("""_9""" , """.9""" ) ) lowerCAmelCase__ : Union[str, Any] = """.""".join(UpperCamelCase ) if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( f"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """ f"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" ) else: # add weight to pytorch dict lowerCAmelCase__ : int = np.asarray(UpperCamelCase ) if not isinstance(UpperCamelCase , np.ndarray ) else flax_tensor lowerCAmelCase__ : int = torch.from_numpy(UpperCamelCase ) # remove from missing keys missing_keys.remove(UpperCamelCase ) else: # weight is not expected by PyTorch model unexpected_keys.append(UpperCamelCase ) pt_model.load_state_dict(UpperCamelCase ) # re-transform missing_keys to list lowerCAmelCase__ : Optional[int] = list(UpperCamelCase ) if len(UpperCamelCase ) > 0: logger.warning( """Some weights of the Flax model were not used when initializing the PyTorch model""" f""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing""" f""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture""" """ (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This""" f""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect""" """ to be exactly identical (e.g. initializing a BertForSequenceClassification model from a""" """ FlaxBertForSequenceClassification model).""" ) if len(UpperCamelCase ) > 0: logger.warning( f"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly""" f""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to""" """ use it for predictions and inference.""" ) return pt_model
37
1
'''simple docstring''' import importlib import json import os import sys import tempfile import unittest from pathlib import Path import transformers import transformers.models.auto from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.bert.configuration_bert import BertConfig from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils''')) from test_module.custom_configuration import CustomConfig # noqa E402 _lowerCAmelCase = get_tests_dir('''fixtures/dummy-config.json''') class lowerCAmelCase_( unittest.TestCase ): '''simple docstring''' def UpperCAmelCase_ ( self ) -> Optional[int]: lowerCAmelCase__ : Any = 0 def UpperCAmelCase_ ( self ) -> List[Any]: self.assertIsNotNone(transformers.models.auto.__spec__ ) self.assertIsNotNone(importlib.util.find_spec("""transformers.models.auto""" ) ) def UpperCAmelCase_ ( self ) -> List[str]: lowerCAmelCase__ : Dict = AutoConfig.from_pretrained("""bert-base-uncased""" ) self.assertIsInstance(__UpperCAmelCase ,__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> Optional[int]: lowerCAmelCase__ : Optional[Any] = AutoConfig.from_pretrained(__UpperCAmelCase ) self.assertIsInstance(__UpperCAmelCase ,__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> Optional[Any]: lowerCAmelCase__ : Tuple = AutoConfig.from_pretrained(__UpperCAmelCase ) self.assertIsInstance(__UpperCAmelCase ,__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: lowerCAmelCase__ : Optional[Any] = AutoConfig.for_model("""roberta""" ) self.assertIsInstance(__UpperCAmelCase ,__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> List[Any]: with tempfile.TemporaryDirectory() as tmp_dir: # This model name contains bert and roberta, but roberta ends up being picked. lowerCAmelCase__ : List[str] = os.path.join(__UpperCAmelCase ,"""fake-roberta""" ) os.makedirs(__UpperCAmelCase ,exist_ok=__UpperCAmelCase ) with open(os.path.join(__UpperCAmelCase ,"""config.json""" ) ,"""w""" ) as f: f.write(json.dumps({} ) ) lowerCAmelCase__ : int = AutoConfig.from_pretrained(__UpperCAmelCase ) self.assertEqual(type(__UpperCAmelCase ) ,__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> Optional[Any]: try: AutoConfig.register("""custom""" ,__UpperCAmelCase ) # Wrong model type will raise an error with self.assertRaises(__UpperCAmelCase ): AutoConfig.register("""model""" ,__UpperCAmelCase ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__UpperCAmelCase ): AutoConfig.register("""bert""" ,__UpperCAmelCase ) # Now that the config is registered, it can be used as any other config with the auto-API lowerCAmelCase__ : Any = CustomConfig() with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(__UpperCAmelCase ) lowerCAmelCase__ : int = AutoConfig.from_pretrained(__UpperCAmelCase ) self.assertIsInstance(__UpperCAmelCase ,__UpperCAmelCase ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] def UpperCAmelCase_ ( self ) -> List[Any]: with self.assertRaisesRegex( __UpperCAmelCase ,"""bert-base is not a local folder and is not a valid model identifier""" ): lowerCAmelCase__ : List[Any] = AutoConfig.from_pretrained("""bert-base""" ) def UpperCAmelCase_ ( self ) -> List[str]: with self.assertRaisesRegex( __UpperCAmelCase ,R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): lowerCAmelCase__ : str = AutoConfig.from_pretrained(__UpperCAmelCase ,revision="""aaaaaa""" ) def UpperCAmelCase_ ( self ) -> Dict: with self.assertRaisesRegex( __UpperCAmelCase ,"""hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.""" ,): lowerCAmelCase__ : List[Any] = AutoConfig.from_pretrained("""hf-internal-testing/no-config-test-repo""" ) def UpperCAmelCase_ ( self ) -> List[str]: # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Optional[int] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : List[str] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=__UpperCAmelCase ) self.assertEqual(config.__class__.__name__ ,"""NewModelConfig""" ) # Test config can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = AutoConfig.from_pretrained(__UpperCAmelCase ,trust_remote_code=__UpperCAmelCase ) self.assertEqual(reloaded_config.__class__.__name__ ,"""NewModelConfig""" ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Union[str, Any] = '''new-model''' try: AutoConfig.register("""new-model""" ,__UpperCAmelCase ) # If remote code is not set, the default is to use local lowerCAmelCase__ : Union[str, Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ) self.assertEqual(config.__class__.__name__ ,"""NewModelConfigLocal""" ) # If remote code is disabled, we load the local one. lowerCAmelCase__ : List[str] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=__UpperCAmelCase ) self.assertEqual(config.__class__.__name__ ,"""NewModelConfigLocal""" ) # If remote is enabled, we load from the Hub lowerCAmelCase__ : str = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=__UpperCAmelCase ) self.assertEqual(config.__class__.__name__ ,"""NewModelConfig""" ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"]
37
'''simple docstring''' import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class lowerCAmelCase_: '''simple docstring''' def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=2 ,__UpperCAmelCase=True ,__UpperCAmelCase=False ,__UpperCAmelCase=10 ,__UpperCAmelCase=3 ,__UpperCAmelCase=32 * 4 ,__UpperCAmelCase=32 * 6 ,__UpperCAmelCase=4 ,__UpperCAmelCase=32 ,) -> str: lowerCAmelCase__ : Optional[int] = parent lowerCAmelCase__ : Optional[int] = batch_size lowerCAmelCase__ : Optional[int] = is_training lowerCAmelCase__ : Dict = use_auxiliary_loss lowerCAmelCase__ : Union[str, Any] = num_queries lowerCAmelCase__ : str = num_channels lowerCAmelCase__ : List[str] = min_size lowerCAmelCase__ : int = max_size lowerCAmelCase__ : Optional[Any] = num_labels lowerCAmelCase__ : List[Any] = mask_feature_size def UpperCAmelCase_ ( self ) -> Tuple: lowerCAmelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( __UpperCAmelCase ) lowerCAmelCase__ : str = torch.ones([self.batch_size, self.min_size, self.max_size] ,device=__UpperCAmelCase ) lowerCAmelCase__ : Any = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] ,device=__UpperCAmelCase ) > 0.5 ).float() lowerCAmelCase__ : Optional[int] = (torch.rand((self.batch_size, self.num_labels) ,device=__UpperCAmelCase ) > 0.5).long() lowerCAmelCase__ : Any = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def UpperCAmelCase_ ( self ) -> Dict: return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] ,) ,decoder_config=DetrConfig( decoder_ffn_dim=128 ,num_queries=self.num_queries ,decoder_attention_heads=2 ,d_model=self.mask_feature_size ,) ,mask_feature_size=self.mask_feature_size ,fpn_feature_size=self.mask_feature_size ,num_channels=self.num_channels ,num_labels=self.num_labels ,) def UpperCAmelCase_ ( self ) -> Optional[int]: lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.prepare_config_and_inputs() lowerCAmelCase__ : List[str] = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask} return config, inputs_dict def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Any: lowerCAmelCase__ : Optional[int] = output.encoder_hidden_states lowerCAmelCase__ : Optional[int] = output.pixel_decoder_hidden_states lowerCAmelCase__ : Dict = output.transformer_decoder_hidden_states self.parent.assertTrue(len(__UpperCAmelCase ) ,len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__UpperCAmelCase ) ,len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__UpperCAmelCase ) ,config.decoder_config.decoder_layers ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=False ) -> Optional[Any]: with torch.no_grad(): lowerCAmelCase__ : int = MaskFormerModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : str = model(pixel_values=__UpperCAmelCase ,pixel_mask=__UpperCAmelCase ) lowerCAmelCase__ : int = model(__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape ,(self.batch_size, self.num_queries, self.mask_feature_size) ,) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(__UpperCAmelCase ,__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[int]: lowerCAmelCase__ : Dict = MaskFormerForInstanceSegmentation(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() def comm_check_on_output(__UpperCAmelCase ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape ,(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) ,) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape ,(self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): lowerCAmelCase__ : List[Any] = model(pixel_values=__UpperCAmelCase ,pixel_mask=__UpperCAmelCase ) lowerCAmelCase__ : Dict = model(__UpperCAmelCase ) comm_check_on_output(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = model( pixel_values=__UpperCAmelCase ,pixel_mask=__UpperCAmelCase ,mask_labels=__UpperCAmelCase ,class_labels=__UpperCAmelCase ) comm_check_on_output(__UpperCAmelCase ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape ,torch.Size([1] ) ) @require_torch class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): '''simple docstring''' __lowercase : Optional[Any] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () __lowercase : int = ( {'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) __lowercase : Union[str, Any] = False __lowercase : Dict = False __lowercase : Tuple = False __lowercase : List[Any] = False def UpperCAmelCase_ ( self ) -> Optional[int]: lowerCAmelCase__ : str = MaskFormerModelTester(self ) lowerCAmelCase__ : List[Any] = ConfigTester(self ,config_class=__UpperCAmelCase ,has_text_modality=__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> List[str]: self.config_tester.run_common_tests() def UpperCAmelCase_ ( self ) -> Union[str, Any]: lowerCAmelCase__ , lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__UpperCAmelCase ,**__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> Optional[int]: lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__UpperCAmelCase ) @unittest.skip(reason="""MaskFormer does not use inputs_embeds""" ) def UpperCAmelCase_ ( self ) -> List[Any]: pass @unittest.skip(reason="""MaskFormer does not have a get_input_embeddings method""" ) def UpperCAmelCase_ ( self ) -> str: pass @unittest.skip(reason="""MaskFormer is not a generative model""" ) def UpperCAmelCase_ ( self ) -> Any: pass @unittest.skip(reason="""MaskFormer does not use token embeddings""" ) def UpperCAmelCase_ ( self ) -> List[str]: pass @require_torch_multi_gpu @unittest.skip( reason="""MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def UpperCAmelCase_ ( self ) -> List[str]: pass def UpperCAmelCase_ ( self ) -> Tuple: lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ : str = model_class(__UpperCAmelCase ) lowerCAmelCase__ : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase__ : Dict = [*signature.parameters.keys()] lowerCAmelCase__ : Tuple = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,__UpperCAmelCase ) @slow def UpperCAmelCase_ ( self ) -> Union[str, Any]: for model_name in ["facebook/maskformer-swin-small-coco"]: lowerCAmelCase__ : List[str] = MaskFormerModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> str: lowerCAmelCase__ : List[Any] = (self.model_tester.min_size,) * 2 lowerCAmelCase__ : Any = { """pixel_values""": torch.randn((2, 3, *size) ,device=__UpperCAmelCase ), """mask_labels""": torch.randn((2, 10, *size) ,device=__UpperCAmelCase ), """class_labels""": torch.zeros(2 ,10 ,device=__UpperCAmelCase ).long(), } lowerCAmelCase__ : Tuple = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = model(**__UpperCAmelCase ) self.assertTrue(outputs.loss is not None ) def UpperCAmelCase_ ( self ) -> str: lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__UpperCAmelCase ,**__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> Tuple: lowerCAmelCase__ , lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ : int = model_class(__UpperCAmelCase ).to(__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = model(**__UpperCAmelCase ,output_attentions=__UpperCAmelCase ) self.assertTrue(outputs.attentions is not None ) def UpperCAmelCase_ ( self ) -> int: if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss lowerCAmelCase__ : Dict = self.all_model_classes[1] lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() lowerCAmelCase__ : List[Any] = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.train() lowerCAmelCase__ : List[str] = model(__UpperCAmelCase ,mask_labels=__UpperCAmelCase ,class_labels=__UpperCAmelCase ).loss loss.backward() def UpperCAmelCase_ ( self ) -> List[str]: # only MaskFormerForInstanceSegmentation has the loss lowerCAmelCase__ : Tuple = self.all_model_classes[1] lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() lowerCAmelCase__ : Union[str, Any] = True lowerCAmelCase__ : Tuple = True lowerCAmelCase__ : Optional[Any] = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.train() lowerCAmelCase__ : Dict = model(__UpperCAmelCase ,mask_labels=__UpperCAmelCase ,class_labels=__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() lowerCAmelCase__ : str = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't lowerCAmelCase__ : Union[str, Any] = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() lowerCAmelCase__ : List[Any] = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=__UpperCAmelCase ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) _lowerCAmelCase = 1e-4 def _SCREAMING_SNAKE_CASE ( ): """simple docstring""" lowerCAmelCase__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_vision @slow class lowerCAmelCase_( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCAmelCase_ ( self ) -> List[Any]: return ( MaskFormerImageProcessor.from_pretrained("""facebook/maskformer-swin-small-coco""" ) if is_vision_available() else None ) def UpperCAmelCase_ ( self ) -> Any: lowerCAmelCase__ : Any = MaskFormerModel.from_pretrained("""facebook/maskformer-swin-small-coco""" ).to(__UpperCAmelCase ) lowerCAmelCase__ : str = self.default_image_processor lowerCAmelCase__ : str = prepare_img() lowerCAmelCase__ : Optional[int] = image_processor(__UpperCAmelCase ,return_tensors="""pt""" ).to(__UpperCAmelCase ) lowerCAmelCase__ : Dict = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__UpperCAmelCase ,(1, 3, 800, 1088) ) with torch.no_grad(): lowerCAmelCase__ : Union[str, Any] = model(**__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = torch.tensor( [[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(__UpperCAmelCase ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) ) lowerCAmelCase__ : Dict = torch.tensor( [[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(__UpperCAmelCase ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) ) lowerCAmelCase__ : Optional[int] = torch.tensor( [[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(__UpperCAmelCase ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) ) def UpperCAmelCase_ ( self ) -> Optional[Any]: lowerCAmelCase__ : List[Any] = ( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" ) .to(__UpperCAmelCase ) .eval() ) lowerCAmelCase__ : Optional[Any] = self.default_image_processor lowerCAmelCase__ : List[str] = prepare_img() lowerCAmelCase__ : str = image_processor(__UpperCAmelCase ,return_tensors="""pt""" ).to(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__UpperCAmelCase ,(1, 3, 800, 1088) ) with torch.no_grad(): lowerCAmelCase__ : List[Any] = model(**__UpperCAmelCase ) # masks_queries_logits lowerCAmelCase__ : Optional[int] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape ,(1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ,) lowerCAmelCase__ : Optional[int] = [ [-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3], [-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5], [-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2], ] lowerCAmelCase__ : Optional[int] = torch.tensor(__UpperCAmelCase ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) ) # class_queries_logits lowerCAmelCase__ : Tuple = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape ,(1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) lowerCAmelCase__ : Union[str, Any] = torch.tensor( [ [1.65_12E00, -5.25_72E00, -3.35_19E00], [3.61_69E-02, -5.90_25E00, -2.93_13E00], [1.07_66E-04, -7.76_30E00, -5.12_63E00], ] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) ) def UpperCAmelCase_ ( self ) -> str: lowerCAmelCase__ : List[Any] = ( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-resnet101-coco-stuff""" ) .to(__UpperCAmelCase ) .eval() ) lowerCAmelCase__ : Optional[Any] = self.default_image_processor lowerCAmelCase__ : int = prepare_img() lowerCAmelCase__ : Optional[Any] = image_processor(__UpperCAmelCase ,return_tensors="""pt""" ).to(__UpperCAmelCase ) lowerCAmelCase__ : str = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__UpperCAmelCase ,(1, 3, 800, 1088) ) with torch.no_grad(): lowerCAmelCase__ : str = model(**__UpperCAmelCase ) # masks_queries_logits lowerCAmelCase__ : Optional[Any] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape ,(1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ,) lowerCAmelCase__ : int = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]] lowerCAmelCase__ : List[str] = torch.tensor(__UpperCAmelCase ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) ) # class_queries_logits lowerCAmelCase__ : Optional[Any] = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape ,(1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) lowerCAmelCase__ : Tuple = torch.tensor( [[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) ) def UpperCAmelCase_ ( self ) -> Optional[Any]: lowerCAmelCase__ : str = ( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" ) .to(__UpperCAmelCase ) .eval() ) lowerCAmelCase__ : Dict = self.default_image_processor lowerCAmelCase__ : Union[str, Any] = image_processor( [np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] ,segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] ,return_tensors="""pt""" ,) lowerCAmelCase__ : Tuple = inputs["""pixel_values"""].to(__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = [el.to(__UpperCAmelCase ) for el in inputs["""mask_labels"""]] lowerCAmelCase__ : Union[str, Any] = [el.to(__UpperCAmelCase ) for el in inputs["""class_labels"""]] with torch.no_grad(): lowerCAmelCase__ : Any = model(**__UpperCAmelCase ) self.assertTrue(outputs.loss is not None )
37
1
'''simple docstring''' from __future__ import annotations import time from math import sqrt # 1 for manhattan, 0 for euclidean _lowerCAmelCase = 0 _lowerCAmelCase = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] _lowerCAmelCase = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right _lowerCAmelCase = tuple[int, int] class lowerCAmelCase_: '''simple docstring''' def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,) -> None: lowerCAmelCase__ : List[str] = pos_x lowerCAmelCase__ : Any = pos_y lowerCAmelCase__ : Union[str, Any] = (pos_y, pos_x) lowerCAmelCase__ : Optional[Any] = goal_x lowerCAmelCase__ : int = goal_y lowerCAmelCase__ : int = g_cost lowerCAmelCase__ : int = parent lowerCAmelCase__ : Union[str, Any] = self.calculate_heuristic() lowerCAmelCase__ : str = self.g_cost + self.h_cost def UpperCAmelCase_ ( self ) -> float: lowerCAmelCase__ : Union[str, Any] = self.pos_x - self.goal_x lowerCAmelCase__ : Union[str, Any] = self.pos_y - self.goal_y if HEURISTIC == 1: return abs(__UpperCAmelCase ) + abs(__UpperCAmelCase ) else: return sqrt(dy**2 + dx**2 ) def __lt__( self ,__UpperCAmelCase ) -> bool: return self.f_cost < other.f_cost class lowerCAmelCase_: '''simple docstring''' def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> List[str]: lowerCAmelCase__ : int = Node(start[1] ,start[0] ,goal[1] ,goal[0] ,0 ,__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = Node(goal[1] ,goal[0] ,goal[1] ,goal[0] ,9_9999 ,__UpperCAmelCase ) lowerCAmelCase__ : int = [self.start] lowerCAmelCase__ : list[Node] = [] lowerCAmelCase__ : int = False def UpperCAmelCase_ ( self ) -> list[TPosition]: while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() lowerCAmelCase__ : Optional[Any] = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: return self.retrace_path(__UpperCAmelCase ) self.closed_nodes.append(__UpperCAmelCase ) lowerCAmelCase__ : Tuple = self.get_successors(__UpperCAmelCase ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(__UpperCAmelCase ) else: # retrieve the best current path lowerCAmelCase__ : str = self.open_nodes.pop(self.open_nodes.index(__UpperCAmelCase ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(__UpperCAmelCase ) else: self.open_nodes.append(__UpperCAmelCase ) return [self.start.pos] def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> list[Node]: lowerCAmelCase__ : List[Any] = [] for action in delta: lowerCAmelCase__ : Union[str, Any] = parent.pos_x + action[1] lowerCAmelCase__ : Union[str, Any] = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__UpperCAmelCase ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( __UpperCAmelCase ,__UpperCAmelCase ,self.target.pos_y ,self.target.pos_x ,parent.g_cost + 1 ,__UpperCAmelCase ,) ) return successors def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> list[TPosition]: lowerCAmelCase__ : Dict = node lowerCAmelCase__ : Any = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) lowerCAmelCase__ : List[Any] = current_node.parent path.reverse() return path class lowerCAmelCase_: '''simple docstring''' def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> None: lowerCAmelCase__ : str = AStar(__UpperCAmelCase ,__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = AStar(__UpperCAmelCase ,__UpperCAmelCase ) lowerCAmelCase__ : Tuple = False def UpperCAmelCase_ ( self ) -> list[TPosition]: while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes: self.fwd_astar.open_nodes.sort() self.bwd_astar.open_nodes.sort() lowerCAmelCase__ : Optional[int] = self.fwd_astar.open_nodes.pop(0 ) lowerCAmelCase__ : Any = self.bwd_astar.open_nodes.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: return self.retrace_bidirectional_path( __UpperCAmelCase ,__UpperCAmelCase ) self.fwd_astar.closed_nodes.append(__UpperCAmelCase ) self.bwd_astar.closed_nodes.append(__UpperCAmelCase ) lowerCAmelCase__ : Dict = current_bwd_node lowerCAmelCase__ : Optional[Any] = current_fwd_node lowerCAmelCase__ : int = { self.fwd_astar: self.fwd_astar.get_successors(__UpperCAmelCase ), self.bwd_astar: self.bwd_astar.get_successors(__UpperCAmelCase ), } for astar in [self.fwd_astar, self.bwd_astar]: for child_node in successors[astar]: if child_node in astar.closed_nodes: continue if child_node not in astar.open_nodes: astar.open_nodes.append(__UpperCAmelCase ) else: # retrieve the best current path lowerCAmelCase__ : Any = astar.open_nodes.pop( astar.open_nodes.index(__UpperCAmelCase ) ) if child_node.g_cost < better_node.g_cost: astar.open_nodes.append(__UpperCAmelCase ) else: astar.open_nodes.append(__UpperCAmelCase ) return [self.fwd_astar.start.pos] def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> list[TPosition]: lowerCAmelCase__ : List[Any] = self.fwd_astar.retrace_path(__UpperCAmelCase ) lowerCAmelCase__ : Any = self.bwd_astar.retrace_path(__UpperCAmelCase ) bwd_path.pop() bwd_path.reverse() lowerCAmelCase__ : int = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] _lowerCAmelCase = (0, 0) _lowerCAmelCase = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) _lowerCAmelCase = time.time() _lowerCAmelCase = AStar(init, goal) _lowerCAmelCase = a_star.search() _lowerCAmelCase = time.time() - start_time print(F"""AStar execution time = {end_time:f} seconds""") _lowerCAmelCase = time.time() _lowerCAmelCase = BidirectionalAStar(init, goal) _lowerCAmelCase = time.time() - bd_start_time print(F"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
37
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { '''microsoft/focalnet-tiny''': '''https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json''', } class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Optional[Any] = '''focalnet''' def __init__( self ,__UpperCAmelCase=224 ,__UpperCAmelCase=4 ,__UpperCAmelCase=3 ,__UpperCAmelCase=96 ,__UpperCAmelCase=False ,__UpperCAmelCase=[192, 384, 768, 768] ,__UpperCAmelCase=[2, 2, 6, 2] ,__UpperCAmelCase=[2, 2, 2, 2] ,__UpperCAmelCase=[3, 3, 3, 3] ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=4.0 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=False ,__UpperCAmelCase=1E-4 ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=1E-5 ,__UpperCAmelCase=32 ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,**__UpperCAmelCase ,) -> Optional[Any]: super().__init__(**__UpperCAmelCase ) lowerCAmelCase__ : Dict = image_size lowerCAmelCase__ : int = patch_size lowerCAmelCase__ : str = num_channels lowerCAmelCase__ : Dict = embed_dim lowerCAmelCase__ : List[str] = use_conv_embed lowerCAmelCase__ : List[Any] = hidden_sizes lowerCAmelCase__ : Dict = depths lowerCAmelCase__ : List[str] = focal_levels lowerCAmelCase__ : List[str] = focal_windows lowerCAmelCase__ : Dict = hidden_act lowerCAmelCase__ : Dict = mlp_ratio lowerCAmelCase__ : Tuple = hidden_dropout_prob lowerCAmelCase__ : Tuple = drop_path_rate lowerCAmelCase__ : Dict = use_layerscale lowerCAmelCase__ : Optional[Any] = layerscale_value lowerCAmelCase__ : str = use_post_layernorm lowerCAmelCase__ : Union[str, Any] = use_post_layernorm_in_modulation lowerCAmelCase__ : int = normalize_modulator lowerCAmelCase__ : Optional[Any] = initializer_range lowerCAmelCase__ : List[str] = layer_norm_eps lowerCAmelCase__ : List[Any] = encoder_stride lowerCAmelCase__ : Dict = ["""stem"""] + [F"""stage{idx}""" for idx in range(1 ,len(self.depths ) + 1 )] lowerCAmelCase__ , lowerCAmelCase__ : Any = get_aligned_output_features_output_indices( out_features=__UpperCAmelCase ,out_indices=__UpperCAmelCase ,stage_names=self.stage_names )
37
1
'''simple docstring''' import PIL.Image import PIL.ImageOps from packaging import version from PIL import Image if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('''9.1.0'''): _lowerCAmelCase = { '''linear''': PIL.Image.Resampling.BILINEAR, '''bilinear''': PIL.Image.Resampling.BILINEAR, '''bicubic''': PIL.Image.Resampling.BICUBIC, '''lanczos''': PIL.Image.Resampling.LANCZOS, '''nearest''': PIL.Image.Resampling.NEAREST, } else: _lowerCAmelCase = { '''linear''': PIL.Image.LINEAR, '''bilinear''': PIL.Image.BILINEAR, '''bicubic''': PIL.Image.BICUBIC, '''lanczos''': PIL.Image.LANCZOS, '''nearest''': PIL.Image.NEAREST, } def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : List[str] = (images / 2 + 0.5).clamp(0 , 1 ) lowerCAmelCase__ : Optional[Any] = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() lowerCAmelCase__ : Optional[int] = numpy_to_pil(UpperCamelCase ) return images def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" if images.ndim == 3: lowerCAmelCase__ : List[str] = images[None, ...] lowerCAmelCase__ : int = (images * 255).round().astype("""uint8""" ) if images.shape[-1] == 1: # special case for grayscale (single channel) images lowerCAmelCase__ : int = [Image.fromarray(image.squeeze() , mode="""L""" ) for image in images] else: lowerCAmelCase__ : Tuple = [Image.fromarray(UpperCamelCase ) for image in images] return pil_images
37
'''simple docstring''' import argparse import os.path as osp import re import torch from safetensors.torch import load_file, save_file # =================# # UNet Conversion # # =================# _lowerCAmelCase = [ # (stable-diffusion, HF Diffusers) ('''time_embed.0.weight''', '''time_embedding.linear_1.weight'''), ('''time_embed.0.bias''', '''time_embedding.linear_1.bias'''), ('''time_embed.2.weight''', '''time_embedding.linear_2.weight'''), ('''time_embed.2.bias''', '''time_embedding.linear_2.bias'''), ('''input_blocks.0.0.weight''', '''conv_in.weight'''), ('''input_blocks.0.0.bias''', '''conv_in.bias'''), ('''out.0.weight''', '''conv_norm_out.weight'''), ('''out.0.bias''', '''conv_norm_out.bias'''), ('''out.2.weight''', '''conv_out.weight'''), ('''out.2.bias''', '''conv_out.bias'''), ] _lowerCAmelCase = [ # (stable-diffusion, HF Diffusers) ('''in_layers.0''', '''norm1'''), ('''in_layers.2''', '''conv1'''), ('''out_layers.0''', '''norm2'''), ('''out_layers.3''', '''conv2'''), ('''emb_layers.1''', '''time_emb_proj'''), ('''skip_connection''', '''conv_shortcut'''), ] _lowerCAmelCase = [] # hardcoded number of downblocks and resnets/attentions... # would need smarter logic for other networks. for i in range(4): # loop over downblocks/upblocks for j in range(2): # loop over resnets/attentions for downblocks _lowerCAmelCase = F"""down_blocks.{i}.resnets.{j}.""" _lowerCAmelCase = F"""input_blocks.{3*i + j + 1}.0.""" unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix)) if i < 3: # no attention layers in down_blocks.3 _lowerCAmelCase = F"""down_blocks.{i}.attentions.{j}.""" _lowerCAmelCase = F"""input_blocks.{3*i + j + 1}.1.""" unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix)) for j in range(3): # loop over resnets/attentions for upblocks _lowerCAmelCase = F"""up_blocks.{i}.resnets.{j}.""" _lowerCAmelCase = F"""output_blocks.{3*i + j}.0.""" unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix)) if i > 0: # no attention layers in up_blocks.0 _lowerCAmelCase = F"""up_blocks.{i}.attentions.{j}.""" _lowerCAmelCase = F"""output_blocks.{3*i + j}.1.""" unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix)) if i < 3: # no downsample in down_blocks.3 _lowerCAmelCase = F"""down_blocks.{i}.downsamplers.0.conv.""" _lowerCAmelCase = F"""input_blocks.{3*(i+1)}.0.op.""" unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix)) # no upsample in up_blocks.3 _lowerCAmelCase = F"""up_blocks.{i}.upsamplers.0.""" _lowerCAmelCase = F"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}.""" unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix)) _lowerCAmelCase = '''mid_block.attentions.0.''' _lowerCAmelCase = '''middle_block.1.''' unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix)) for j in range(2): _lowerCAmelCase = F"""mid_block.resnets.{j}.""" _lowerCAmelCase = F"""middle_block.{2*j}.""" unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix)) def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Any = {k: k for k in unet_state_dict.keys()} for sd_name, hf_name in unet_conversion_map: lowerCAmelCase__ : Optional[int] = sd_name for k, v in mapping.items(): if "resnets" in k: for sd_part, hf_part in unet_conversion_map_resnet: lowerCAmelCase__ : Any = v.replace(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : List[Any] = v for k, v in mapping.items(): for sd_part, hf_part in unet_conversion_map_layer: lowerCAmelCase__ : List[Any] = v.replace(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = v lowerCAmelCase__ : Tuple = {v: unet_state_dict[k] for k, v in mapping.items()} return new_state_dict # ================# # VAE Conversion # # ================# _lowerCAmelCase = [ # (stable-diffusion, HF Diffusers) ('''nin_shortcut''', '''conv_shortcut'''), ('''norm_out''', '''conv_norm_out'''), ('''mid.attn_1.''', '''mid_block.attentions.0.'''), ] for i in range(4): # down_blocks have two resnets for j in range(2): _lowerCAmelCase = F"""encoder.down_blocks.{i}.resnets.{j}.""" _lowerCAmelCase = F"""encoder.down.{i}.block.{j}.""" vae_conversion_map.append((sd_down_prefix, hf_down_prefix)) if i < 3: _lowerCAmelCase = F"""down_blocks.{i}.downsamplers.0.""" _lowerCAmelCase = F"""down.{i}.downsample.""" vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix)) _lowerCAmelCase = F"""up_blocks.{i}.upsamplers.0.""" _lowerCAmelCase = F"""up.{3-i}.upsample.""" vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix)) # up_blocks have three resnets # also, up blocks in hf are numbered in reverse from sd for j in range(3): _lowerCAmelCase = F"""decoder.up_blocks.{i}.resnets.{j}.""" _lowerCAmelCase = F"""decoder.up.{3-i}.block.{j}.""" vae_conversion_map.append((sd_up_prefix, hf_up_prefix)) # this part accounts for mid blocks in both the encoder and the decoder for i in range(2): _lowerCAmelCase = F"""mid_block.resnets.{i}.""" _lowerCAmelCase = F"""mid.block_{i+1}.""" vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix)) _lowerCAmelCase = [ # (stable-diffusion, HF Diffusers) ('''norm.''', '''group_norm.'''), ('''q.''', '''query.'''), ('''k.''', '''key.'''), ('''v.''', '''value.'''), ('''proj_out.''', '''proj_attn.'''), ] def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" return w.reshape(*w.shape , 1 , 1 ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Optional[int] = {k: k for k in vae_state_dict.keys()} for k, v in mapping.items(): for sd_part, hf_part in vae_conversion_map: lowerCAmelCase__ : str = v.replace(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = v for k, v in mapping.items(): if "attentions" in k: for sd_part, hf_part in vae_conversion_map_attn: lowerCAmelCase__ : Dict = v.replace(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : List[Any] = v lowerCAmelCase__ : Union[str, Any] = {v: vae_state_dict[k] for k, v in mapping.items()} lowerCAmelCase__ : Tuple = ["""q""", """k""", """v""", """proj_out"""] for k, v in new_state_dict.items(): for weight_name in weights_to_convert: if f"""mid.attn_1.{weight_name}.weight""" in k: print(f"""Reshaping {k} for SD format""" ) lowerCAmelCase__ : Optional[int] = reshape_weight_for_sd(UpperCamelCase ) return new_state_dict # =========================# # Text Encoder Conversion # # =========================# _lowerCAmelCase = [ # (stable-diffusion, HF Diffusers) ('''resblocks.''', '''text_model.encoder.layers.'''), ('''ln_1''', '''layer_norm1'''), ('''ln_2''', '''layer_norm2'''), ('''.c_fc.''', '''.fc1.'''), ('''.c_proj.''', '''.fc2.'''), ('''.attn''', '''.self_attn'''), ('''ln_final.''', '''transformer.text_model.final_layer_norm.'''), ('''token_embedding.weight''', '''transformer.text_model.embeddings.token_embedding.weight'''), ('''positional_embedding''', '''transformer.text_model.embeddings.position_embedding.weight'''), ] _lowerCAmelCase = {re.escape(x[1]): x[0] for x in textenc_conversion_lst} _lowerCAmelCase = re.compile('''|'''.join(protected.keys())) # Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp _lowerCAmelCase = {'''q''': 0, '''k''': 1, '''v''': 2} def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Optional[Any] = {} lowerCAmelCase__ : int = {} lowerCAmelCase__ : List[Any] = {} for k, v in text_enc_dict.items(): if ( k.endswith(""".self_attn.q_proj.weight""" ) or k.endswith(""".self_attn.k_proj.weight""" ) or k.endswith(""".self_attn.v_proj.weight""" ) ): lowerCAmelCase__ : Optional[int] = k[: -len(""".q_proj.weight""" )] lowerCAmelCase__ : Tuple = k[-len("""q_proj.weight""" )] if k_pre not in capture_qkv_weight: lowerCAmelCase__ : List[Any] = [None, None, None] lowerCAmelCase__ : Dict = v continue if ( k.endswith(""".self_attn.q_proj.bias""" ) or k.endswith(""".self_attn.k_proj.bias""" ) or k.endswith(""".self_attn.v_proj.bias""" ) ): lowerCAmelCase__ : str = k[: -len(""".q_proj.bias""" )] lowerCAmelCase__ : List[str] = k[-len("""q_proj.bias""" )] if k_pre not in capture_qkv_bias: lowerCAmelCase__ : Union[str, Any] = [None, None, None] lowerCAmelCase__ : Any = v continue lowerCAmelCase__ : Dict = textenc_pattern.sub(lambda UpperCamelCase : protected[re.escape(m.group(0 ) )] , UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = v for k_pre, tensors in capture_qkv_weight.items(): if None in tensors: raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" ) lowerCAmelCase__ : Any = textenc_pattern.sub(lambda UpperCamelCase : protected[re.escape(m.group(0 ) )] , UpperCamelCase ) lowerCAmelCase__ : Tuple = torch.cat(UpperCamelCase ) for k_pre, tensors in capture_qkv_bias.items(): if None in tensors: raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" ) lowerCAmelCase__ : str = textenc_pattern.sub(lambda UpperCamelCase : protected[re.escape(m.group(0 ) )] , UpperCamelCase ) lowerCAmelCase__ : List[Any] = torch.cat(UpperCamelCase ) return new_state_dict def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" return text_enc_dict if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''') parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''') parser.add_argument( '''--use_safetensors''', action='''store_true''', help='''Save weights use safetensors, default is ckpt.''' ) _lowerCAmelCase = parser.parse_args() assert args.model_path is not None, "Must provide a model path!" assert args.checkpoint_path is not None, "Must provide a checkpoint path!" # Path for safetensors _lowerCAmelCase = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.safetensors''') _lowerCAmelCase = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.safetensors''') _lowerCAmelCase = osp.join(args.model_path, '''text_encoder''', '''model.safetensors''') # Load models from safetensors if it exists, if it doesn't pytorch if osp.exists(unet_path): _lowerCAmelCase = load_file(unet_path, device='''cpu''') else: _lowerCAmelCase = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.bin''') _lowerCAmelCase = torch.load(unet_path, map_location='''cpu''') if osp.exists(vae_path): _lowerCAmelCase = load_file(vae_path, device='''cpu''') else: _lowerCAmelCase = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.bin''') _lowerCAmelCase = torch.load(vae_path, map_location='''cpu''') if osp.exists(text_enc_path): _lowerCAmelCase = load_file(text_enc_path, device='''cpu''') else: _lowerCAmelCase = osp.join(args.model_path, '''text_encoder''', '''pytorch_model.bin''') _lowerCAmelCase = torch.load(text_enc_path, map_location='''cpu''') # Convert the UNet model _lowerCAmelCase = convert_unet_state_dict(unet_state_dict) _lowerCAmelCase = {'''model.diffusion_model.''' + k: v for k, v in unet_state_dict.items()} # Convert the VAE model _lowerCAmelCase = convert_vae_state_dict(vae_state_dict) _lowerCAmelCase = {'''first_stage_model.''' + k: v for k, v in vae_state_dict.items()} # Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper _lowerCAmelCase = '''text_model.encoder.layers.22.layer_norm2.bias''' in text_enc_dict if is_vaa_model: # Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm _lowerCAmelCase = {'''transformer.''' + k: v for k, v in text_enc_dict.items()} _lowerCAmelCase = convert_text_enc_state_dict_vaa(text_enc_dict) _lowerCAmelCase = {'''cond_stage_model.model.''' + k: v for k, v in text_enc_dict.items()} else: _lowerCAmelCase = convert_text_enc_state_dict(text_enc_dict) _lowerCAmelCase = {'''cond_stage_model.transformer.''' + k: v for k, v in text_enc_dict.items()} # Put together new checkpoint _lowerCAmelCase = {**unet_state_dict, **vae_state_dict, **text_enc_dict} if args.half: _lowerCAmelCase = {k: v.half() for k, v in state_dict.items()} if args.use_safetensors: save_file(state_dict, args.checkpoint_path) else: _lowerCAmelCase = {'''state_dict''': state_dict} torch.save(state_dict, args.checkpoint_path)
37
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_deit import DeiTImageProcessor _lowerCAmelCase = logging.get_logger(__name__) class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> None: warnings.warn( """The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use DeiTImageProcessor instead.""" ,__UpperCAmelCase ,) super().__init__(*__UpperCAmelCase ,**__UpperCAmelCase )
37
'''simple docstring''' import os from bleurt import score # From: git+https://github.com/google-research/bleurt.git import datasets _lowerCAmelCase = datasets.logging.get_logger(__name__) _lowerCAmelCase = '''\ @inproceedings{bleurt, title={BLEURT: Learning Robust Metrics for Text Generation}, author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh}, booktitle={ACL}, year={2020}, url={https://arxiv.org/abs/2004.04696} } ''' _lowerCAmelCase = '''\ BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018) and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune it for your specific application (the latter is expected to perform better). See the project\'s README at https://github.com/google-research/bleurt#readme for more information. ''' _lowerCAmelCase = ''' BLEURT score. Args: `predictions` (list of str): prediction/candidate sentences `references` (list of str): reference sentences `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None. Returns: \'scores\': List of scores. Examples: >>> predictions = ["hello there", "general kenobi"] >>> references = ["hello there", "general kenobi"] >>> bleurt = datasets.load_metric("bleurt") >>> results = bleurt.compute(predictions=predictions, references=references) >>> print([round(v, 2) for v in results["scores"]]) [1.03, 1.04] ''' _lowerCAmelCase = { '''bleurt-tiny-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip''', '''bleurt-tiny-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip''', '''bleurt-base-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip''', '''bleurt-base-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip''', '''bleurt-large-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip''', '''bleurt-large-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip''', '''BLEURT-20-D3''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip''', '''BLEURT-20-D6''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip''', '''BLEURT-20-D12''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip''', '''BLEURT-20''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip''', } @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_( datasets.Metric ): '''simple docstring''' def UpperCAmelCase_ ( self ) -> Union[str, Any]: return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,homepage="""https://github.com/google-research/bleurt""" ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { """predictions""": datasets.Value("""string""" ,id="""sequence""" ), """references""": datasets.Value("""string""" ,id="""sequence""" ), } ) ,codebase_urls=["""https://github.com/google-research/bleurt"""] ,reference_urls=["""https://github.com/google-research/bleurt""", """https://arxiv.org/abs/2004.04696"""] ,) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Tuple: # check that config name specifies a valid BLEURT model if self.config_name == "default": logger.warning( """Using default BLEURT-Base checkpoint for sequence maximum length 128. """ """You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512').""" ) lowerCAmelCase__ : str = """bleurt-base-128""" if self.config_name.lower() in CHECKPOINT_URLS: lowerCAmelCase__ : Union[str, Any] = self.config_name.lower() elif self.config_name.upper() in CHECKPOINT_URLS: lowerCAmelCase__ : List[Any] = self.config_name.upper() else: raise KeyError( F"""{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}""" ) # download the model checkpoint specified by self.config_name and set up the scorer lowerCAmelCase__ : int = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] ) lowerCAmelCase__ : Dict = score.BleurtScorer(os.path.join(__UpperCAmelCase ,__UpperCAmelCase ) ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Union[str, Any]: lowerCAmelCase__ : Union[str, Any] = self.scorer.score(references=__UpperCAmelCase ,candidates=__UpperCAmelCase ) return {"scores": scores}
37
1
'''simple docstring''' from cva import destroyAllWindows, imread, imshow, waitKey def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = img.shape[0], img.shape[1] # converting each pixel's color to its negative for i in range(UpperCamelCase ): for j in range(UpperCamelCase ): lowerCAmelCase__ : Union[str, Any] = [255, 255, 255] - img[i][j] return img if __name__ == "__main__": # read original image _lowerCAmelCase = imread('''image_data/lena.jpg''', 1) # convert to its negative _lowerCAmelCase = convert_to_negative(img) # show result image imshow('''negative of original image''', img) waitKey(0) destroyAllWindows()
37
'''simple docstring''' import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch _lowerCAmelCase = logging.get_logger(__name__) class lowerCAmelCase_: '''simple docstring''' def __init__( self ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ) -> str: if not conversation_id: lowerCAmelCase__ : List[str] = uuid.uuida() if past_user_inputs is None: lowerCAmelCase__ : List[Any] = [] if generated_responses is None: lowerCAmelCase__ : str = [] lowerCAmelCase__ : uuid.UUID = conversation_id lowerCAmelCase__ : List[str] = past_user_inputs lowerCAmelCase__ : List[str] = generated_responses lowerCAmelCase__ : Optional[str] = text def __eq__( self ,__UpperCAmelCase ) -> Dict: if not isinstance(__UpperCAmelCase ,__UpperCAmelCase ): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = False ) -> Optional[Any]: if self.new_user_input: if overwrite: logger.warning( F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """ F"""with: \"{text}\".""" ) lowerCAmelCase__ : Optional[int] = text else: logger.warning( F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """ F"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" ) else: lowerCAmelCase__ : Optional[Any] = text def UpperCAmelCase_ ( self ) -> List[Any]: if self.new_user_input: self.past_user_inputs.append(self.new_user_input ) lowerCAmelCase__ : Union[str, Any] = None def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Tuple: self.generated_responses.append(__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> List[str]: for user_input, generated_response in zip(self.past_user_inputs ,self.generated_responses ): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__( self ) -> Tuple: lowerCAmelCase__ : Tuple = F"""Conversation id: {self.uuid} \n""" for is_user, text in self.iter_texts(): lowerCAmelCase__ : Any = """user""" if is_user else """bot""" output += F"""{name} >> {text} \n""" return output @add_end_docstrings( SCREAMING_SNAKE_CASE_ , R''' min_length_for_response (`int`, *optional*, defaults to 32): The minimum length (in number of tokens) for a response. minimum_tokens (`int`, *optional*, defaults to 10): The minimum length of tokens to leave for a response. ''' , ) class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple: super().__init__(*__UpperCAmelCase ,**__UpperCAmelCase ) if self.tokenizer.pad_token_id is None: lowerCAmelCase__ : Tuple = self.tokenizer.eos_token def UpperCAmelCase_ ( self ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,**__UpperCAmelCase ) -> Optional[int]: lowerCAmelCase__ : List[Any] = {} lowerCAmelCase__ : Optional[int] = {} lowerCAmelCase__ : List[str] = {} if min_length_for_response is not None: lowerCAmelCase__ : Any = min_length_for_response if minimum_tokens is not None: lowerCAmelCase__ : Optional[int] = minimum_tokens if "max_length" in generate_kwargs: lowerCAmelCase__ : Optional[Any] = generate_kwargs["""max_length"""] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: lowerCAmelCase__ : int = clean_up_tokenization_spaces if generate_kwargs: forward_params.update(__UpperCAmelCase ) return preprocess_params, forward_params, postprocess_params def __call__( self ,__UpperCAmelCase ,__UpperCAmelCase=0 ,**__UpperCAmelCase ) -> List[str]: lowerCAmelCase__ : Optional[int] = super().__call__(__UpperCAmelCase ,num_workers=__UpperCAmelCase ,**__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) and len(__UpperCAmelCase ) == 1: return outputs[0] return outputs def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=32 ) -> Dict[str, Any]: if not isinstance(__UpperCAmelCase ,__UpperCAmelCase ): raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" ) if conversation.new_user_input is None: raise ValueError( F"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """ """Add user inputs with the conversation's `add_user_input` method""" ) if hasattr(self.tokenizer ,"""_build_conversation_input_ids""" ): lowerCAmelCase__ : str = self.tokenizer._build_conversation_input_ids(__UpperCAmelCase ) else: # If the tokenizer cannot handle conversations, we default to only the old version lowerCAmelCase__ : List[Any] = self._legacy_parse_and_tokenize(__UpperCAmelCase ) if self.framework == "pt": lowerCAmelCase__ : List[Any] = torch.LongTensor([input_ids] ) elif self.framework == "tf": lowerCAmelCase__ : Dict = tf.constant([input_ids] ) return {"input_ids": input_ids, "conversation": conversation} def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=10 ,**__UpperCAmelCase ) -> Dict: lowerCAmelCase__ : Optional[Any] = generate_kwargs.get("""max_length""" ,self.model.config.max_length ) lowerCAmelCase__ : Optional[Any] = model_inputs["""input_ids"""].shape[1] if max_length - minimum_tokens < n: logger.warning(F"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" ) lowerCAmelCase__ : str = max_length - minimum_tokens lowerCAmelCase__ : Union[str, Any] = model_inputs["""input_ids"""][:, -trim:] if "attention_mask" in model_inputs: lowerCAmelCase__ : Tuple = model_inputs["""attention_mask"""][:, -trim:] lowerCAmelCase__ : str = model_inputs.pop("""conversation""" ) lowerCAmelCase__ : Union[str, Any] = max_length lowerCAmelCase__ : Any = self.model.generate(**__UpperCAmelCase ,**__UpperCAmelCase ) if self.model.config.is_encoder_decoder: lowerCAmelCase__ : int = 1 else: lowerCAmelCase__ : int = n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=True ) -> List[str]: lowerCAmelCase__ : Optional[int] = model_outputs["""output_ids"""] lowerCAmelCase__ : Tuple = self.tokenizer.decode( output_ids[0] ,skip_special_tokens=__UpperCAmelCase ,clean_up_tokenization_spaces=__UpperCAmelCase ,) lowerCAmelCase__ : Union[str, Any] = model_outputs["""conversation"""] conversation.mark_processed() conversation.append_response(__UpperCAmelCase ) return conversation def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Dict: lowerCAmelCase__ : Dict = self.tokenizer.eos_token_id lowerCAmelCase__ : int = [] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ) + [eos_token_id] ) else: input_ids.extend(self.tokenizer.encode(__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ) ) if len(__UpperCAmelCase ) > self.tokenizer.model_max_length: lowerCAmelCase__ : Optional[Any] = input_ids[-self.tokenizer.model_max_length :] return input_ids
37
1
'''simple docstring''' from scipy.stats import pearsonr import datasets _lowerCAmelCase = ''' Pearson correlation coefficient and p-value for testing non-correlation. The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. ''' _lowerCAmelCase = ''' Args: predictions (`list` of `int`): Predicted class labels, as returned by a model. references (`list` of `int`): Ground truth labels. return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`. Returns: pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation. p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities. Examples: Example 1-A simple example using only predictions and references. >>> pearsonr_metric = datasets.load_metric("pearsonr") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5]) >>> print(round(results[\'pearsonr\'], 2)) -0.74 Example 2-The same as Example 1, but that also returns the `p-value`. >>> pearsonr_metric = datasets.load_metric("pearsonr") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True) >>> print(sorted(list(results.keys()))) [\'p-value\', \'pearsonr\'] >>> print(round(results[\'pearsonr\'], 2)) -0.74 >>> print(round(results[\'p-value\'], 2)) 0.15 ''' _lowerCAmelCase = ''' @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, Ilhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Antonio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_( datasets.Metric ): '''simple docstring''' def UpperCAmelCase_ ( self ) -> List[Any]: return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { """predictions""": datasets.Value("""float""" ), """references""": datasets.Value("""float""" ), } ) ,reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] ,) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=False ) -> List[Any]: if return_pvalue: lowerCAmelCase__ : Optional[int] = pearsonr(__UpperCAmelCase ,__UpperCAmelCase ) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(__UpperCAmelCase ,__UpperCAmelCase )[0] )}
37
'''simple docstring''' from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax _lowerCAmelCase = logging.get_logger(__name__) @add_end_docstrings(SCREAMING_SNAKE_CASE_ ) class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' def __init__( self ,**__UpperCAmelCase ) -> Tuple: super().__init__(**__UpperCAmelCase ) requires_backends(self ,"""vision""" ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == """tf""" else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self ,__UpperCAmelCase ,**__UpperCAmelCase ) -> str: return super().__call__(__UpperCAmelCase ,**__UpperCAmelCase ) def UpperCAmelCase_ ( self ,**__UpperCAmelCase ) -> str: lowerCAmelCase__ : List[Any] = {} if "candidate_labels" in kwargs: lowerCAmelCase__ : int = kwargs["""candidate_labels"""] if "hypothesis_template" in kwargs: lowerCAmelCase__ : Optional[int] = kwargs["""hypothesis_template"""] return preprocess_params, {}, {} def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=None ,__UpperCAmelCase="This is a photo of {}." ) -> int: lowerCAmelCase__ : str = load_image(__UpperCAmelCase ) lowerCAmelCase__ : Dict = self.image_processor(images=[image] ,return_tensors=self.framework ) lowerCAmelCase__ : List[Any] = candidate_labels lowerCAmelCase__ : List[str] = [hypothesis_template.format(__UpperCAmelCase ) for x in candidate_labels] lowerCAmelCase__ : Optional[Any] = self.tokenizer(__UpperCAmelCase ,return_tensors=self.framework ,padding=__UpperCAmelCase ) lowerCAmelCase__ : Tuple = [text_inputs] return inputs def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Union[str, Any]: lowerCAmelCase__ : Tuple = model_inputs.pop("""candidate_labels""" ) lowerCAmelCase__ : Union[str, Any] = model_inputs.pop("""text_inputs""" ) if isinstance(text_inputs[0] ,__UpperCAmelCase ): lowerCAmelCase__ : int = text_inputs[0] else: # Batching case. lowerCAmelCase__ : Dict = text_inputs[0][0] lowerCAmelCase__ : Any = self.model(**__UpperCAmelCase ,**__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = { """candidate_labels""": candidate_labels, """logits""": outputs.logits_per_image, } return model_outputs def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Any: lowerCAmelCase__ : Union[str, Any] = model_outputs.pop("""candidate_labels""" ) lowerCAmelCase__ : List[str] = model_outputs["""logits"""][0] if self.framework == "pt": lowerCAmelCase__ : List[str] = logits.softmax(dim=-1 ).squeeze(-1 ) lowerCAmelCase__ : Optional[Any] = probs.tolist() if not isinstance(__UpperCAmelCase ,__UpperCAmelCase ): lowerCAmelCase__ : Dict = [scores] elif self.framework == "tf": lowerCAmelCase__ : Any = stable_softmax(__UpperCAmelCase ,axis=-1 ) lowerCAmelCase__ : List[Any] = probs.numpy().tolist() else: raise ValueError(F"""Unsupported framework: {self.framework}""" ) lowerCAmelCase__ : Tuple = [ {"""score""": score, """label""": candidate_label} for score, candidate_label in sorted(zip(__UpperCAmelCase ,__UpperCAmelCase ) ,key=lambda __UpperCAmelCase : -x[0] ) ] return result
37
1
'''simple docstring''' from typing import List, Optional, Tuple, Union import PIL import torch from torchvision import transforms from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput from diffusers.schedulers import DDIMScheduler from diffusers.utils import randn_tensor _lowerCAmelCase = transforms.Compose( [ transforms.Resize((256, 256)), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" if isinstance(UpperCamelCase , torch.Tensor ): return image elif isinstance(UpperCamelCase , PIL.Image.Image ): lowerCAmelCase__ : Union[str, Any] = [image] lowerCAmelCase__ : Any = [trans(img.convert("""RGB""" ) ) for img in image] lowerCAmelCase__ : str = torch.stack(UpperCamelCase ) return image class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> List[Any]: super().__init__() # make sure scheduler can always be converted to DDIM lowerCAmelCase__ : Dict = DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=__UpperCAmelCase ,scheduler=__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Any: if strength < 0 or strength > 1: raise ValueError(F"""The value of strength should in [0.0, 1.0] but is {strength}""" ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Tuple: # get the original timestep using init_timestep lowerCAmelCase__ : str = min(int(num_inference_steps * strength ) ,__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = max(num_inference_steps - init_timestep ,0 ) lowerCAmelCase__ : Optional[Any] = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=None ) -> List[str]: if not isinstance(__UpperCAmelCase ,(torch.Tensor, PIL.Image.Image, list) ): raise ValueError( F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__UpperCAmelCase )}""" ) lowerCAmelCase__ : Tuple = image.to(device=__UpperCAmelCase ,dtype=__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) and len(__UpperCAmelCase ) != batch_size: raise ValueError( F"""You have passed a list of generators of length {len(__UpperCAmelCase )}, but requested an effective batch""" F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" ) lowerCAmelCase__ : Union[str, Any] = init_latents.shape lowerCAmelCase__ : List[str] = randn_tensor(__UpperCAmelCase ,generator=__UpperCAmelCase ,device=__UpperCAmelCase ,dtype=__UpperCAmelCase ) # get latents print("""add noise to latents at timestep""" ,__UpperCAmelCase ) lowerCAmelCase__ : Dict = self.scheduler.add_noise(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) lowerCAmelCase__ : Dict = init_latents return latents @torch.no_grad() def __call__( self ,__UpperCAmelCase = None ,__UpperCAmelCase = 0.8 ,__UpperCAmelCase = 1 ,__UpperCAmelCase = None ,__UpperCAmelCase = 0.0 ,__UpperCAmelCase = 50 ,__UpperCAmelCase = None ,__UpperCAmelCase = "pil" ,__UpperCAmelCase = True ,) -> Union[ImagePipelineOutput, Tuple]: self.check_inputs(__UpperCAmelCase ) # 2. Preprocess image lowerCAmelCase__ : List[str] = preprocess(__UpperCAmelCase ) # 3. set timesteps self.scheduler.set_timesteps(__UpperCAmelCase ,device=self.device ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.get_timesteps(__UpperCAmelCase ,__UpperCAmelCase ,self.device ) lowerCAmelCase__ : Optional[int] = timesteps[:1].repeat(__UpperCAmelCase ) # 4. Prepare latent variables lowerCAmelCase__ : Tuple = self.prepare_latents(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,self.unet.dtype ,self.device ,__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = latents # 5. Denoising loop for t in self.progress_bar(__UpperCAmelCase ): # 1. predict noise model_output lowerCAmelCase__ : str = self.unet(__UpperCAmelCase ,__UpperCAmelCase ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 lowerCAmelCase__ : str = self.scheduler.step( __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,eta=__UpperCAmelCase ,use_clipped_model_output=__UpperCAmelCase ,generator=__UpperCAmelCase ,).prev_sample lowerCAmelCase__ : Optional[Any] = (image / 2 + 0.5).clamp(0 ,1 ) lowerCAmelCase__ : Optional[int] = image.cpu().permute(0 ,2 ,3 ,1 ).numpy() if output_type == "pil": lowerCAmelCase__ : List[str] = self.numpy_to_pil(__UpperCAmelCase ) if not return_dict: return (image, latent_timestep.item()) return ImagePipelineOutput(images=__UpperCAmelCase )
37
'''simple docstring''' import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , ) @pytest.mark.usefixtures('''sm_env''' ) @parameterized_class( [ { '''framework''': '''pytorch''', '''script''': '''run_glue.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.p3.16xlarge''', '''results''': {'''train_runtime''': 6_5_0, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6}, }, { '''framework''': '''pytorch''', '''script''': '''run_ddp.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.p3.16xlarge''', '''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6}, }, { '''framework''': '''tensorflow''', '''script''': '''run_tf_dist.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.p3.16xlarge''', '''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.6, '''eval_loss''': 0.7}, }, ] ) class lowerCAmelCase_( unittest.TestCase ): '''simple docstring''' def UpperCAmelCase_ ( self ) -> Optional[Any]: if self.framework == "pytorch": subprocess.run( F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() ,encoding="""utf-8""" ,check=__UpperCAmelCase ,) assert hasattr(self ,"""env""" ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]: lowerCAmelCase__ : Optional[int] = F"""{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}""" # distributed data settings lowerCAmelCase__ : Any = {"""smdistributed""": {"""dataparallel""": {"""enabled""": True}}} if self.script != """run_ddp.py""" else None # creates estimator return HuggingFace( entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=__UpperCAmelCase ,instance_count=__UpperCAmelCase ,instance_type=self.instance_type ,debugger_hook_config=__UpperCAmelCase ,hyperparameters={**self.env.distributed_hyperparameters, """model_name_or_path""": self.model_name_or_path} ,metric_definitions=self.env.metric_definitions ,distribution=__UpperCAmelCase ,py_version="""py36""" ,) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]: TrainingJobAnalytics(__UpperCAmelCase ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" ) @parameterized.expand([(2,)] ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Any: # create estimator lowerCAmelCase__ : List[Any] = self.create_estimator(__UpperCAmelCase ) # run training estimator.fit() # result dataframe lowerCAmelCase__ : Any = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis lowerCAmelCase__ : int = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] ) lowerCAmelCase__ : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping lowerCAmelCase__ : List[str] = ( Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" ,99_9999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy ) assert all(t <= self.results["""eval_loss"""] for t in eval_loss ) # dump tests result into json file to share in PR with open(F"""{estimator.latest_training_job.name}.json""" ,"""w""" ) as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} ,__UpperCAmelCase )
37
1
'''simple docstring''' from __future__ import annotations def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" if b == 0: return (1, 0) ((lowerCAmelCase__) , (lowerCAmelCase__)) : Union[str, Any] = extended_euclid(UpperCamelCase , a % b ) lowerCAmelCase__ : Union[str, Any] = a // b return (y, x - k * y) def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" ((lowerCAmelCase__) , (lowerCAmelCase__)) : Union[str, Any] = extended_euclid(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : Any = na * na lowerCAmelCase__ : Optional[int] = ra * x * na + ra * y * na return (n % m + m) % m def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" ((lowerCAmelCase__) , (lowerCAmelCase__)) : List[Any] = extended_euclid(UpperCamelCase , UpperCamelCase ) if b < 0: lowerCAmelCase__ : Tuple = (b % n + n) % n return b def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = invert_modulo(UpperCamelCase , UpperCamelCase ), invert_modulo(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : str = na * na lowerCAmelCase__ : int = ra * x * na + ra * y * na return (n % m + m) % m if __name__ == "__main__": from doctest import testmod testmod(name='''chinese_remainder_theorem''', verbose=True) testmod(name='''chinese_remainder_theorem2''', verbose=True) testmod(name='''invert_modulo''', verbose=True) testmod(name='''extended_euclid''', verbose=True)
37
'''simple docstring''' import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = {'''vocab_file''': '''spiece.model'''} _lowerCAmelCase = { '''vocab_file''': { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''', } } _lowerCAmelCase = { '''albert-base-v1''': 512, '''albert-large-v1''': 512, '''albert-xlarge-v1''': 512, '''albert-xxlarge-v1''': 512, '''albert-base-v2''': 512, '''albert-large-v2''': 512, '''albert-xlarge-v2''': 512, '''albert-xxlarge-v2''': 512, } _lowerCAmelCase = '''▁''' class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Any = VOCAB_FILES_NAMES __lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP __lowercase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=False ,__UpperCAmelCase="[CLS]" ,__UpperCAmelCase="[SEP]" ,__UpperCAmelCase="<unk>" ,__UpperCAmelCase="[SEP]" ,__UpperCAmelCase="<pad>" ,__UpperCAmelCase="[CLS]" ,__UpperCAmelCase="[MASK]" ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> None: # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. lowerCAmelCase__ : Tuple = ( AddedToken(__UpperCAmelCase ,lstrip=__UpperCAmelCase ,rstrip=__UpperCAmelCase ,normalized=__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else mask_token ) lowerCAmelCase__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=__UpperCAmelCase ,remove_space=__UpperCAmelCase ,keep_accents=__UpperCAmelCase ,bos_token=__UpperCAmelCase ,eos_token=__UpperCAmelCase ,unk_token=__UpperCAmelCase ,sep_token=__UpperCAmelCase ,pad_token=__UpperCAmelCase ,cls_token=__UpperCAmelCase ,mask_token=__UpperCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**__UpperCAmelCase ,) lowerCAmelCase__ : str = do_lower_case lowerCAmelCase__ : int = remove_space lowerCAmelCase__ : Tuple = keep_accents lowerCAmelCase__ : Any = vocab_file lowerCAmelCase__ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__UpperCAmelCase ) @property def UpperCAmelCase_ ( self ) -> Optional[int]: return len(self.sp_model ) def UpperCAmelCase_ ( self ) -> Optional[Any]: lowerCAmelCase__ : Union[str, Any] = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> Any: lowerCAmelCase__ : Optional[Any] = self.__dict__.copy() lowerCAmelCase__ : Optional[Any] = None return state def __setstate__( self ,__UpperCAmelCase ) -> List[Any]: lowerCAmelCase__ : List[str] = d # for backward compatibility if not hasattr(self ,"""sp_model_kwargs""" ): lowerCAmelCase__ : Union[str, Any] = {} lowerCAmelCase__ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[int]: if self.remove_space: lowerCAmelCase__ : int = """ """.join(inputs.strip().split() ) else: lowerCAmelCase__ : str = inputs lowerCAmelCase__ : Tuple = outputs.replace("""``""" ,"""\"""" ).replace("""''""" ,"""\"""" ) if not self.keep_accents: lowerCAmelCase__ : Any = unicodedata.normalize("""NFKD""" ,__UpperCAmelCase ) lowerCAmelCase__ : Dict = """""".join([c for c in outputs if not unicodedata.combining(__UpperCAmelCase )] ) if self.do_lower_case: lowerCAmelCase__ : Tuple = outputs.lower() return outputs def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[str]: lowerCAmelCase__ : List[str] = self.preprocess_text(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = self.sp_model.encode(__UpperCAmelCase ,out_type=__UpperCAmelCase ) lowerCAmelCase__ : str = [] for piece in pieces: if len(__UpperCAmelCase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit(): lowerCAmelCase__ : List[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(__UpperCAmelCase ,"""""" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: lowerCAmelCase__ : str = cur_pieces[1:] else: lowerCAmelCase__ : int = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(__UpperCAmelCase ) else: new_pieces.append(__UpperCAmelCase ) return new_pieces def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[str]: return self.sp_model.PieceToId(__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Any: return self.sp_model.IdToPiece(__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str: lowerCAmelCase__ : str = [] lowerCAmelCase__ : Tuple = """""" lowerCAmelCase__ : Tuple = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__UpperCAmelCase ) + token lowerCAmelCase__ : Union[str, Any] = True lowerCAmelCase__ : List[str] = [] else: current_sub_tokens.append(__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = False out_string += self.sp_model.decode(__UpperCAmelCase ) return out_string.strip() def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> List[int]: lowerCAmelCase__ : int = [self.sep_token_id] lowerCAmelCase__ : Dict = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCAmelCase ,token_ids_a=__UpperCAmelCase ,already_has_special_tokens=__UpperCAmelCase ) if token_ids_a is not None: return [1] + ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1] return [1] + ([0] * len(__UpperCAmelCase )) + [1] def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> List[int]: lowerCAmelCase__ : List[str] = [self.sep_token_id] lowerCAmelCase__ : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(__UpperCAmelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase__ : int = os.path.join( __UpperCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,__UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__UpperCAmelCase ,"""wb""" ) as fi: lowerCAmelCase__ : List[Any] = self.sp_model.serialized_model_proto() fi.write(__UpperCAmelCase ) return (out_vocab_file,)
37
1
'''simple docstring''' from __future__ import annotations from collections.abc import MutableSequence class lowerCAmelCase_: '''simple docstring''' def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> None: if len(__UpperCAmelCase ) != degree + 1: raise ValueError( """The number of coefficients should be equal to the degree + 1.""" ) lowerCAmelCase__ : list[float] = list(__UpperCAmelCase ) lowerCAmelCase__ : str = degree def __add__( self ,__UpperCAmelCase ) -> Polynomial: if self.degree > polynomial_a.degree: lowerCAmelCase__ : Dict = self.coefficients[:] for i in range(polynomial_a.degree + 1 ): coefficients[i] += polynomial_a.coefficients[i] return Polynomial(self.degree ,__UpperCAmelCase ) else: lowerCAmelCase__ : str = polynomial_a.coefficients[:] for i in range(self.degree + 1 ): coefficients[i] += self.coefficients[i] return Polynomial(polynomial_a.degree ,__UpperCAmelCase ) def __sub__( self ,__UpperCAmelCase ) -> Polynomial: return self + polynomial_a * Polynomial(0 ,[-1] ) def __neg__( self ) -> Polynomial: return Polynomial(self.degree ,[-c for c in self.coefficients] ) def __mul__( self ,__UpperCAmelCase ) -> Polynomial: lowerCAmelCase__ : list[float] = [0] * (self.degree + polynomial_a.degree + 1) for i in range(self.degree + 1 ): for j in range(polynomial_a.degree + 1 ): coefficients[i + j] += ( self.coefficients[i] * polynomial_a.coefficients[j] ) return Polynomial(self.degree + polynomial_a.degree ,__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> int | float: lowerCAmelCase__ : int | float = 0 for i in range(self.degree + 1 ): result += self.coefficients[i] * (substitution**i) return result def __str__( self ) -> str: lowerCAmelCase__ : Any = """""" for i in range(self.degree ,-1 ,-1 ): if self.coefficients[i] == 0: continue elif self.coefficients[i] > 0: if polynomial: polynomial += " + " else: polynomial += " - " if i == 0: polynomial += str(abs(self.coefficients[i] ) ) elif i == 1: polynomial += str(abs(self.coefficients[i] ) ) + "x" else: polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(__UpperCAmelCase ) return polynomial def __repr__( self ) -> str: return self.__str__() def UpperCAmelCase_ ( self ) -> Polynomial: lowerCAmelCase__ : list[float] = [0] * self.degree for i in range(self.degree ): lowerCAmelCase__ : List[Any] = self.coefficients[i + 1] * (i + 1) return Polynomial(self.degree - 1 ,__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase = 0 ) -> Polynomial: lowerCAmelCase__ : list[float] = [0] * (self.degree + 2) lowerCAmelCase__ : List[str] = constant for i in range(self.degree + 1 ): lowerCAmelCase__ : Tuple = self.coefficients[i] / (i + 1) return Polynomial(self.degree + 1 ,__UpperCAmelCase ) def __eq__( self ,__UpperCAmelCase ) -> bool: if not isinstance(__UpperCAmelCase ,__UpperCAmelCase ): return False if self.degree != polynomial_a.degree: return False for i in range(self.degree + 1 ): if self.coefficients[i] != polynomial_a.coefficients[i]: return False return True def __ne__( self ,__UpperCAmelCase ) -> bool: return not self.__eq__(__UpperCAmelCase )
37
'''simple docstring''' import json import os from typing import Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', } _lowerCAmelCase = { '''vocab_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'''}, '''merges_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'''}, } _lowerCAmelCase = { '''ctrl''': 256, } _lowerCAmelCase = { '''Pregnancy''': 16_8629, '''Christianity''': 7675, '''Explain''': 10_6423, '''Fitness''': 6_3440, '''Saving''': 6_3163, '''Ask''': 2_7171, '''Ass''': 9_5985, '''Joke''': 16_3509, '''Questions''': 4_5622, '''Thoughts''': 4_9605, '''Retail''': 5_2342, '''Feminism''': 16_4338, '''Writing''': 1_1992, '''Atheism''': 19_2263, '''Netflix''': 4_8616, '''Computing''': 3_9639, '''Opinion''': 4_3213, '''Alone''': 4_4967, '''Funny''': 5_8917, '''Gaming''': 4_0358, '''Human''': 4088, '''India''': 1331, '''Joker''': 7_7138, '''Diet''': 3_6206, '''Legal''': 1_1859, '''Norman''': 4939, '''Tip''': 7_2689, '''Weight''': 5_2343, '''Movies''': 4_6273, '''Running''': 2_3425, '''Science''': 2090, '''Horror''': 3_7793, '''Confession''': 6_0572, '''Finance''': 1_2250, '''Politics''': 1_6360, '''Scary''': 19_1985, '''Support''': 1_2654, '''Technologies''': 3_2516, '''Teenage''': 6_6160, '''Event''': 3_2769, '''Learned''': 6_7460, '''Notion''': 18_2770, '''Wikipedia''': 3_7583, '''Books''': 6665, '''Extract''': 7_6050, '''Confessions''': 10_2701, '''Conspiracy''': 7_5932, '''Links''': 6_3674, '''Narcissus''': 15_0425, '''Relationship''': 5_4766, '''Relationships''': 13_4796, '''Reviews''': 4_1671, '''News''': 4256, '''Translation''': 2_6820, '''multilingual''': 12_8406, } def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Optional[int] = set() lowerCAmelCase__ : str = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCAmelCase__ : List[Any] = char lowerCAmelCase__ : Optional[Any] = set(UpperCamelCase ) return pairs class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Dict = VOCAB_FILES_NAMES __lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP __lowercase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowercase : Any = CONTROL_CODES def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase="<unk>" ,**__UpperCAmelCase ) -> Optional[int]: super().__init__(unk_token=__UpperCAmelCase ,**__UpperCAmelCase ) with open(__UpperCAmelCase ,encoding="""utf-8""" ) as vocab_handle: lowerCAmelCase__ : int = json.load(__UpperCAmelCase ) lowerCAmelCase__ : Any = {v: k for k, v in self.encoder.items()} with open(__UpperCAmelCase ,encoding="""utf-8""" ) as merges_handle: lowerCAmelCase__ : Union[str, Any] = merges_handle.read().split("""\n""" )[1:-1] lowerCAmelCase__ : Optional[Any] = [tuple(merge.split() ) for merge in merges] lowerCAmelCase__ : int = dict(zip(__UpperCAmelCase ,range(len(__UpperCAmelCase ) ) ) ) lowerCAmelCase__ : List[Any] = {} @property def UpperCAmelCase_ ( self ) -> Optional[Any]: return len(self.encoder ) def UpperCAmelCase_ ( self ) -> Optional[Any]: return dict(self.encoder ,**self.added_tokens_encoder ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> int: if token in self.cache: return self.cache[token] lowerCAmelCase__ : Optional[Any] = tuple(__UpperCAmelCase ) lowerCAmelCase__ : List[str] = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] ) lowerCAmelCase__ : Any = get_pairs(__UpperCAmelCase ) if not pairs: return token while True: lowerCAmelCase__ : List[str] = min(__UpperCAmelCase ,key=lambda __UpperCAmelCase : self.bpe_ranks.get(__UpperCAmelCase ,float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = bigram lowerCAmelCase__ : List[str] = [] lowerCAmelCase__ : Dict = 0 while i < len(__UpperCAmelCase ): try: lowerCAmelCase__ : Optional[Any] = word.index(__UpperCAmelCase ,__UpperCAmelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCAmelCase__ : Dict = j if word[i] == first and i < len(__UpperCAmelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCAmelCase__ : Tuple = tuple(__UpperCAmelCase ) lowerCAmelCase__ : Tuple = new_word if len(__UpperCAmelCase ) == 1: break else: lowerCAmelCase__ : Dict = get_pairs(__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = """@@ """.join(__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = word[:-4] lowerCAmelCase__ : Optional[Any] = word return word def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Dict: lowerCAmelCase__ : Optional[int] = [] lowerCAmelCase__ : Any = re.findall(R"""\S+\n?""" ,__UpperCAmelCase ) for token in words: split_tokens.extend(list(self.bpe(__UpperCAmelCase ).split(""" """ ) ) ) return split_tokens def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]: return self.encoder.get(__UpperCAmelCase ,self.encoder.get(self.unk_token ) ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Tuple: return self.decoder.get(__UpperCAmelCase ,self.unk_token ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Tuple: lowerCAmelCase__ : Any = """ """.join(__UpperCAmelCase ).replace("""@@ """ ,"""""" ).strip() return out_string def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(__UpperCAmelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase__ : List[Any] = os.path.join( __UpperCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) lowerCAmelCase__ : Optional[int] = os.path.join( __UpperCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(__UpperCAmelCase ,"""w""" ,encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=__UpperCAmelCase ,ensure_ascii=__UpperCAmelCase ) + """\n""" ) lowerCAmelCase__ : int = 0 with open(__UpperCAmelCase ,"""w""" ,encoding="""utf-8""" ) as writer: writer.write("""#version: 0.2\n""" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda __UpperCAmelCase : kv[1] ): if index != token_index: logger.warning( F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" """ Please check that the tokenizer is not corrupted!""" ) lowerCAmelCase__ : Dict = token_index writer.write(""" """.join(__UpperCAmelCase ) + """\n""" ) index += 1 return vocab_file, merge_file # def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True): # filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)) # tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens) # tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far) # return ''.join(tokens_generated_so_far)
37
1
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : int = """""" for word_or_phrase in separated: if not isinstance(UpperCamelCase , UpperCamelCase ): raise Exception("""join() accepts only strings to be joined""" ) joined += word_or_phrase + separator return joined.strip(UpperCamelCase ) if __name__ == "__main__": from doctest import testmod testmod()
37
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( UpperCamelCase = "The quick brown fox jumps over the lazy dog" , ): """simple docstring""" lowerCAmelCase__ : str = set() # Replace all the whitespace in our sentence lowerCAmelCase__ : Tuple = input_str.replace(""" """ , """""" ) for alpha in input_str: if "a" <= alpha.lower() <= "z": frequency.add(alpha.lower() ) return len(UpperCamelCase ) == 26 def _SCREAMING_SNAKE_CASE ( UpperCamelCase = "The quick brown fox jumps over the lazy dog" , ): """simple docstring""" lowerCAmelCase__ : Any = [False] * 26 for char in input_str: if char.islower(): lowerCAmelCase__ : Optional[Any] = True elif char.isupper(): lowerCAmelCase__ : Any = True return all(UpperCamelCase ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase = "The quick brown fox jumps over the lazy dog" , ): """simple docstring""" return len({char for char in input_str.lower() if char.isalpha()} ) == 26 def _SCREAMING_SNAKE_CASE ( ): """simple docstring""" from timeit import timeit lowerCAmelCase__ : Union[str, Any] = """from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest""" print(timeit("""is_pangram()""" , setup=UpperCamelCase ) ) print(timeit("""is_pangram_faster()""" , setup=UpperCamelCase ) ) print(timeit("""is_pangram_fastest()""" , setup=UpperCamelCase ) ) # 5.348480500048026, 2.6477354579837993, 1.8470395830227062 # 5.036091582966037, 2.644472333951853, 1.8869528750656173 if __name__ == "__main__": import doctest doctest.testmod() benchmark()
37
1
'''simple docstring''' import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = {'''vocab_file''': '''spiece.model'''} _lowerCAmelCase = { '''vocab_file''': { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''', } } _lowerCAmelCase = { '''albert-base-v1''': 512, '''albert-large-v1''': 512, '''albert-xlarge-v1''': 512, '''albert-xxlarge-v1''': 512, '''albert-base-v2''': 512, '''albert-large-v2''': 512, '''albert-xlarge-v2''': 512, '''albert-xxlarge-v2''': 512, } _lowerCAmelCase = '''▁''' class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Any = VOCAB_FILES_NAMES __lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP __lowercase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=False ,__UpperCAmelCase="[CLS]" ,__UpperCAmelCase="[SEP]" ,__UpperCAmelCase="<unk>" ,__UpperCAmelCase="[SEP]" ,__UpperCAmelCase="<pad>" ,__UpperCAmelCase="[CLS]" ,__UpperCAmelCase="[MASK]" ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> None: # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. lowerCAmelCase__ : Tuple = ( AddedToken(__UpperCAmelCase ,lstrip=__UpperCAmelCase ,rstrip=__UpperCAmelCase ,normalized=__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else mask_token ) lowerCAmelCase__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=__UpperCAmelCase ,remove_space=__UpperCAmelCase ,keep_accents=__UpperCAmelCase ,bos_token=__UpperCAmelCase ,eos_token=__UpperCAmelCase ,unk_token=__UpperCAmelCase ,sep_token=__UpperCAmelCase ,pad_token=__UpperCAmelCase ,cls_token=__UpperCAmelCase ,mask_token=__UpperCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**__UpperCAmelCase ,) lowerCAmelCase__ : str = do_lower_case lowerCAmelCase__ : int = remove_space lowerCAmelCase__ : Tuple = keep_accents lowerCAmelCase__ : Any = vocab_file lowerCAmelCase__ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__UpperCAmelCase ) @property def UpperCAmelCase_ ( self ) -> Optional[int]: return len(self.sp_model ) def UpperCAmelCase_ ( self ) -> Optional[Any]: lowerCAmelCase__ : Union[str, Any] = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> Any: lowerCAmelCase__ : Optional[Any] = self.__dict__.copy() lowerCAmelCase__ : Optional[Any] = None return state def __setstate__( self ,__UpperCAmelCase ) -> List[Any]: lowerCAmelCase__ : List[str] = d # for backward compatibility if not hasattr(self ,"""sp_model_kwargs""" ): lowerCAmelCase__ : Union[str, Any] = {} lowerCAmelCase__ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[int]: if self.remove_space: lowerCAmelCase__ : int = """ """.join(inputs.strip().split() ) else: lowerCAmelCase__ : str = inputs lowerCAmelCase__ : Tuple = outputs.replace("""``""" ,"""\"""" ).replace("""''""" ,"""\"""" ) if not self.keep_accents: lowerCAmelCase__ : Any = unicodedata.normalize("""NFKD""" ,__UpperCAmelCase ) lowerCAmelCase__ : Dict = """""".join([c for c in outputs if not unicodedata.combining(__UpperCAmelCase )] ) if self.do_lower_case: lowerCAmelCase__ : Tuple = outputs.lower() return outputs def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[str]: lowerCAmelCase__ : List[str] = self.preprocess_text(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = self.sp_model.encode(__UpperCAmelCase ,out_type=__UpperCAmelCase ) lowerCAmelCase__ : str = [] for piece in pieces: if len(__UpperCAmelCase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit(): lowerCAmelCase__ : List[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(__UpperCAmelCase ,"""""" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: lowerCAmelCase__ : str = cur_pieces[1:] else: lowerCAmelCase__ : int = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(__UpperCAmelCase ) else: new_pieces.append(__UpperCAmelCase ) return new_pieces def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[str]: return self.sp_model.PieceToId(__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Any: return self.sp_model.IdToPiece(__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str: lowerCAmelCase__ : str = [] lowerCAmelCase__ : Tuple = """""" lowerCAmelCase__ : Tuple = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__UpperCAmelCase ) + token lowerCAmelCase__ : Union[str, Any] = True lowerCAmelCase__ : List[str] = [] else: current_sub_tokens.append(__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = False out_string += self.sp_model.decode(__UpperCAmelCase ) return out_string.strip() def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> List[int]: lowerCAmelCase__ : int = [self.sep_token_id] lowerCAmelCase__ : Dict = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCAmelCase ,token_ids_a=__UpperCAmelCase ,already_has_special_tokens=__UpperCAmelCase ) if token_ids_a is not None: return [1] + ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1] return [1] + ([0] * len(__UpperCAmelCase )) + [1] def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> List[int]: lowerCAmelCase__ : List[str] = [self.sep_token_id] lowerCAmelCase__ : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(__UpperCAmelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase__ : int = os.path.join( __UpperCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,__UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__UpperCAmelCase ,"""wb""" ) as fi: lowerCAmelCase__ : List[Any] = self.sp_model.serialized_model_proto() fi.write(__UpperCAmelCase ) return (out_vocab_file,)
37
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Tuple = abs(UpperCamelCase ) lowerCAmelCase__ : List[Any] = 0 while n > 0: res += n % 10 n //= 10 return res def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Union[str, Any] = abs(UpperCamelCase ) return n if n < 10 else n % 10 + sum_of_digits(n // 10 ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" return sum(int(UpperCamelCase ) for c in str(abs(UpperCamelCase ) ) ) def _SCREAMING_SNAKE_CASE ( ): """simple docstring""" from collections.abc import Callable from timeit import timeit def benchmark_a_function(UpperCamelCase , UpperCamelCase ) -> None: lowerCAmelCase__ : str = f"""{func.__name__}({value})""" lowerCAmelCase__ : str = timeit(f"""__main__.{call}""" , setup="""import __main__""" ) print(f"""{call:56} = {func(UpperCamelCase )} -- {timing:.4f} seconds""" ) for value in (262144, 1125899906842624, 1267650600228229401496703205376): for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact): benchmark_a_function(UpperCamelCase , UpperCamelCase ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
37
1
'''simple docstring''' from __future__ import annotations from sys import maxsize from typing import Generic, TypeVar _lowerCAmelCase = TypeVar('''T''') def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" return (position - 1) // 2 def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" return (2 * position) + 1 def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" return (2 * position) + 2 class lowerCAmelCase_( Generic[T] ): '''simple docstring''' def __init__( self ) -> None: lowerCAmelCase__ : list[tuple[T, int]] = [] lowerCAmelCase__ : dict[T, int] = {} lowerCAmelCase__ : int = 0 def __len__( self ) -> int: return self.elements def __repr__( self ) -> str: return str(self.heap ) def UpperCAmelCase_ ( self ) -> bool: # Check if the priority queue is empty return self.elements == 0 def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> None: # Add an element with given priority to the queue self.heap.append((elem, weight) ) lowerCAmelCase__ : str = self.elements self.elements += 1 self._bubble_up(__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> T: # Remove and return the element with lowest weight (highest priority) if self.elements > 1: self._swap_nodes(0 ,self.elements - 1 ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.heap.pop() del self.position_map[elem] self.elements -= 1 if self.elements > 0: lowerCAmelCase__ , lowerCAmelCase__ : str = self.heap[0] self._bubble_down(__UpperCAmelCase ) return elem def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> None: # Update the weight of the given key lowerCAmelCase__ : int = self.position_map[elem] lowerCAmelCase__ : List[Any] = (elem, weight) if position > 0: lowerCAmelCase__ : Dict = get_parent_position(__UpperCAmelCase ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.heap[parent_position] if parent_weight > weight: self._bubble_up(__UpperCAmelCase ) else: self._bubble_down(__UpperCAmelCase ) else: self._bubble_down(__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> None: # Place a node at the proper position (upward movement) [to be used internally # only] lowerCAmelCase__ : List[str] = self.position_map[elem] if curr_pos == 0: return None lowerCAmelCase__ : int = get_parent_position(__UpperCAmelCase ) lowerCAmelCase__ , lowerCAmelCase__ : Any = self.heap[curr_pos] lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.heap[parent_position] if parent_weight > weight: self._swap_nodes(__UpperCAmelCase ,__UpperCAmelCase ) return self._bubble_up(__UpperCAmelCase ) return None def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> None: # Place a node at the proper position (downward movement) [to be used # internally only] lowerCAmelCase__ : List[Any] = self.position_map[elem] lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.heap[curr_pos] lowerCAmelCase__ : str = get_child_left_position(__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = get_child_right_position(__UpperCAmelCase ) if child_left_position < self.elements and child_right_position < self.elements: lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.heap[child_left_position] lowerCAmelCase__ , lowerCAmelCase__ : Any = self.heap[child_right_position] if child_right_weight < child_left_weight and child_right_weight < weight: self._swap_nodes(__UpperCAmelCase ,__UpperCAmelCase ) return self._bubble_down(__UpperCAmelCase ) if child_left_position < self.elements: lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.heap[child_left_position] if child_left_weight < weight: self._swap_nodes(__UpperCAmelCase ,__UpperCAmelCase ) return self._bubble_down(__UpperCAmelCase ) else: return None if child_right_position < self.elements: lowerCAmelCase__ , lowerCAmelCase__ : Any = self.heap[child_right_position] if child_right_weight < weight: self._swap_nodes(__UpperCAmelCase ,__UpperCAmelCase ) return self._bubble_down(__UpperCAmelCase ) return None def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> None: # Swap the nodes at the given positions lowerCAmelCase__ : str = self.heap[nodea_pos][0] lowerCAmelCase__ : Dict = self.heap[nodea_pos][0] lowerCAmelCase__ , lowerCAmelCase__ : Tuple = ( self.heap[nodea_pos], self.heap[nodea_pos], ) lowerCAmelCase__ : int = nodea_pos lowerCAmelCase__ : int = nodea_pos class lowerCAmelCase_( Generic[T] ): '''simple docstring''' def __init__( self ) -> None: lowerCAmelCase__ : dict[T, dict[T, int]] = {} lowerCAmelCase__ : int = 0 def __repr__( self ) -> str: return str(self.connections ) def __len__( self ) -> int: return self.nodes def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> None: # Add a node in the graph if it is not in the graph if node not in self.connections: lowerCAmelCase__ : Optional[int] = {} self.nodes += 1 def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> None: # Add an edge between 2 nodes in the graph self.add_node(__UpperCAmelCase ) self.add_node(__UpperCAmelCase ) lowerCAmelCase__ : Any = weight lowerCAmelCase__ : Tuple = weight def _SCREAMING_SNAKE_CASE ( UpperCamelCase , ): """simple docstring""" lowerCAmelCase__ : dict[T, int] = {node: maxsize for node in graph.connections} lowerCAmelCase__ : dict[T, T | None] = {node: None for node in graph.connections} lowerCAmelCase__ : MinPriorityQueue[T] = MinPriorityQueue() for node, weight in dist.items(): priority_queue.push(UpperCamelCase , UpperCamelCase ) if priority_queue.is_empty(): return dist, parent # initialization lowerCAmelCase__ : List[Any] = priority_queue.extract_min() lowerCAmelCase__ : str = 0 for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: lowerCAmelCase__ : Any = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(UpperCamelCase , dist[neighbour] ) lowerCAmelCase__ : List[str] = node # running prim's algorithm while not priority_queue.is_empty(): lowerCAmelCase__ : Any = priority_queue.extract_min() for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: lowerCAmelCase__ : Optional[int] = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(UpperCamelCase , dist[neighbour] ) lowerCAmelCase__ : Optional[int] = node return dist, parent
37
'''simple docstring''' # coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers _lowerCAmelCase = '''3''' print('''Python version:''', sys.version) print('''transformers version:''', transformers.__version__) try: import torch print('''Torch version:''', torch.__version__) print('''Cuda available:''', torch.cuda.is_available()) print('''Cuda version:''', torch.version.cuda) print('''CuDNN version:''', torch.backends.cudnn.version()) print('''Number of GPUs available:''', torch.cuda.device_count()) print('''NCCL version:''', torch.cuda.nccl.version()) except ImportError: print('''Torch version:''', None) try: import deepspeed print('''DeepSpeed version:''', deepspeed.__version__) except ImportError: print('''DeepSpeed version:''', None) try: import tensorflow as tf print('''TensorFlow version:''', tf.__version__) print('''TF GPUs available:''', bool(tf.config.list_physical_devices('''GPU'''))) print('''Number of TF GPUs available:''', len(tf.config.list_physical_devices('''GPU'''))) except ImportError: print('''TensorFlow version:''', None)
37
1
'''simple docstring''' import pytest from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs @pytest.mark.parametrize( """kwargs, expected""" , [ ({"""num_shards""": 0, """max_num_jobs""": 1}, []), ({"""num_shards""": 10, """max_num_jobs""": 1}, [range(10 )]), ({"""num_shards""": 10, """max_num_jobs""": 10}, [range(UpperCamelCase , i + 1 ) for i in range(10 )]), ({"""num_shards""": 1, """max_num_jobs""": 10}, [range(1 )]), ({"""num_shards""": 10, """max_num_jobs""": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]), ({"""num_shards""": 3, """max_num_jobs""": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]), ] , ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Optional[Any] = _distribute_shards(**UpperCamelCase ) assert out == expected @pytest.mark.parametrize( """gen_kwargs, max_num_jobs, expected""" , [ ({"""foo""": 0}, 10, [{"""foo""": 0}]), ({"""shards""": [0, 1, 2, 3]}, 1, [{"""shards""": [0, 1, 2, 3]}]), ({"""shards""": [0, 1, 2, 3]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}, {"""shards""": [2]}, {"""shards""": [3]}]), ({"""shards""": [0, 1]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}]), ({"""shards""": [0, 1, 2, 3]}, 2, [{"""shards""": [0, 1]}, {"""shards""": [2, 3]}]), ] , ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : List[Any] = _split_gen_kwargs(UpperCamelCase , UpperCamelCase ) assert out == expected @pytest.mark.parametrize( """gen_kwargs, expected""" , [ ({"""foo""": 0}, 1), ({"""shards""": [0]}, 1), ({"""shards""": [0, 1, 2, 3]}, 4), ({"""shards""": [0, 1, 2, 3], """foo""": 0}, 4), ({"""shards""": [0, 1, 2, 3], """other""": (0, 1)}, 4), ({"""shards""": [0, 1, 2, 3], """shards2""": [0, 1]}, RuntimeError), ] , ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" if expected is RuntimeError: with pytest.raises(UpperCamelCase ): _number_of_shards_in_gen_kwargs(UpperCamelCase ) else: lowerCAmelCase__ : Union[str, Any] = _number_of_shards_in_gen_kwargs(UpperCamelCase ) assert out == expected
37
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { '''facebook/xlm-roberta-xl''': '''https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json''', '''facebook/xlm-roberta-xxl''': '''https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json''', # See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl } class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : int = '''xlm-roberta-xl''' def __init__( self ,__UpperCAmelCase=25_0880 ,__UpperCAmelCase=2560 ,__UpperCAmelCase=36 ,__UpperCAmelCase=32 ,__UpperCAmelCase=1_0240 ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=514 ,__UpperCAmelCase=1 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=1E-05 ,__UpperCAmelCase=1 ,__UpperCAmelCase=0 ,__UpperCAmelCase=2 ,__UpperCAmelCase="absolute" ,__UpperCAmelCase=True ,__UpperCAmelCase=None ,**__UpperCAmelCase ,) -> str: super().__init__(pad_token_id=__UpperCAmelCase ,bos_token_id=__UpperCAmelCase ,eos_token_id=__UpperCAmelCase ,**__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = vocab_size lowerCAmelCase__ : int = hidden_size lowerCAmelCase__ : int = num_hidden_layers lowerCAmelCase__ : str = num_attention_heads lowerCAmelCase__ : int = hidden_act lowerCAmelCase__ : Dict = intermediate_size lowerCAmelCase__ : List[Any] = hidden_dropout_prob lowerCAmelCase__ : str = attention_probs_dropout_prob lowerCAmelCase__ : Optional[int] = max_position_embeddings lowerCAmelCase__ : List[str] = type_vocab_size lowerCAmelCase__ : List[Any] = initializer_range lowerCAmelCase__ : Tuple = layer_norm_eps lowerCAmelCase__ : int = position_embedding_type lowerCAmelCase__ : Optional[Any] = use_cache lowerCAmelCase__ : Optional[Any] = classifier_dropout class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' @property def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": lowerCAmelCase__ : Dict = {0: """batch""", 1: """choice""", 2: """sequence"""} else: lowerCAmelCase__ : Any = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
37
1
'''simple docstring''' import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase=1 ): """simple docstring""" if n_shave_prefix_segments >= 0: return ".".join(path.split(""".""" )[n_shave_prefix_segments:] ) else: return ".".join(path.split(""".""" )[:n_shave_prefix_segments] ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase=0 ): """simple docstring""" lowerCAmelCase__ : Union[str, Any] = [] for old_item in old_list: lowerCAmelCase__ : Optional[Any] = old_item.replace("""in_layers.0""" , """norm1""" ) lowerCAmelCase__ : Optional[int] = new_item.replace("""in_layers.2""" , """conv1""" ) lowerCAmelCase__ : Dict = new_item.replace("""out_layers.0""" , """norm2""" ) lowerCAmelCase__ : str = new_item.replace("""out_layers.3""" , """conv2""" ) lowerCAmelCase__ : str = new_item.replace("""emb_layers.1""" , """time_emb_proj""" ) lowerCAmelCase__ : Optional[Any] = new_item.replace("""skip_connection""" , """conv_shortcut""" ) lowerCAmelCase__ : Union[str, Any] = shave_segments(UpperCamelCase , n_shave_prefix_segments=UpperCamelCase ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase=0 ): """simple docstring""" lowerCAmelCase__ : int = [] for old_item in old_list: lowerCAmelCase__ : List[str] = old_item lowerCAmelCase__ : int = new_item.replace("""norm.weight""" , """group_norm.weight""" ) lowerCAmelCase__ : Optional[Any] = new_item.replace("""norm.bias""" , """group_norm.bias""" ) lowerCAmelCase__ : Optional[Any] = new_item.replace("""proj_out.weight""" , """proj_attn.weight""" ) lowerCAmelCase__ : int = new_item.replace("""proj_out.bias""" , """proj_attn.bias""" ) lowerCAmelCase__ : str = shave_segments(UpperCamelCase , n_shave_prefix_segments=UpperCamelCase ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): lowerCAmelCase__ : Any = old_checkpoint[path] lowerCAmelCase__ : int = old_tensor.shape[0] // 3 lowerCAmelCase__ : int = (-1, channels) if len(old_tensor.shape ) == 3 else (-1) lowerCAmelCase__ : Tuple = old_tensor.shape[0] // config["""num_head_channels"""] // 3 lowerCAmelCase__ : List[Any] = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = old_tensor.split(channels // num_heads , dim=1 ) lowerCAmelCase__ : int = query.reshape(UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = key.reshape(UpperCamelCase ) lowerCAmelCase__ : Optional[int] = value.reshape(UpperCamelCase ) for path in paths: lowerCAmelCase__ : Any = path["""new"""] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here lowerCAmelCase__ : Any = new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" ) lowerCAmelCase__ : Any = new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" ) lowerCAmelCase__ : Any = new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" ) if additional_replacements is not None: for replacement in additional_replacements: lowerCAmelCase__ : Any = new_path.replace(replacement["""old"""] , replacement["""new"""] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: lowerCAmelCase__ : List[Any] = old_checkpoint[path["""old"""]][:, :, 0] else: lowerCAmelCase__ : Dict = old_checkpoint[path["""old"""]] def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : str = {} lowerCAmelCase__ : str = checkpoint["""time_embed.0.weight"""] lowerCAmelCase__ : List[Any] = checkpoint["""time_embed.0.bias"""] lowerCAmelCase__ : int = checkpoint["""time_embed.2.weight"""] lowerCAmelCase__ : List[str] = checkpoint["""time_embed.2.bias"""] lowerCAmelCase__ : str = checkpoint["""input_blocks.0.0.weight"""] lowerCAmelCase__ : Any = checkpoint["""input_blocks.0.0.bias"""] lowerCAmelCase__ : Union[str, Any] = checkpoint["""out.0.weight"""] lowerCAmelCase__ : Union[str, Any] = checkpoint["""out.0.bias"""] lowerCAmelCase__ : str = checkpoint["""out.2.weight"""] lowerCAmelCase__ : Tuple = checkpoint["""out.2.bias"""] # Retrieves the keys for the input blocks only lowerCAmelCase__ : Optional[Any] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} ) lowerCAmelCase__ : Optional[Any] = { layer_id: [key for key in checkpoint if f"""input_blocks.{layer_id}""" in key] for layer_id in range(UpperCamelCase ) } # Retrieves the keys for the middle blocks only lowerCAmelCase__ : Union[str, Any] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} ) lowerCAmelCase__ : Union[str, Any] = { layer_id: [key for key in checkpoint if f"""middle_block.{layer_id}""" in key] for layer_id in range(UpperCamelCase ) } # Retrieves the keys for the output blocks only lowerCAmelCase__ : List[str] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} ) lowerCAmelCase__ : List[Any] = { layer_id: [key for key in checkpoint if f"""output_blocks.{layer_id}""" in key] for layer_id in range(UpperCamelCase ) } for i in range(1 , UpperCamelCase ): lowerCAmelCase__ : Dict = (i - 1) // (config["""num_res_blocks"""] + 1) lowerCAmelCase__ : Tuple = (i - 1) % (config["""num_res_blocks"""] + 1) lowerCAmelCase__ : Optional[int] = [key for key in input_blocks[i] if f"""input_blocks.{i}.0""" in key] lowerCAmelCase__ : Optional[Any] = [key for key in input_blocks[i] if f"""input_blocks.{i}.1""" in key] if f"""input_blocks.{i}.0.op.weight""" in checkpoint: lowerCAmelCase__ : Optional[int] = checkpoint[ f"""input_blocks.{i}.0.op.weight""" ] lowerCAmelCase__ : Tuple = checkpoint[ f"""input_blocks.{i}.0.op.bias""" ] continue lowerCAmelCase__ : Optional[Any] = renew_resnet_paths(UpperCamelCase ) lowerCAmelCase__ : Dict = {"""old""": f"""input_blocks.{i}.0""", """new""": f"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""} lowerCAmelCase__ : Optional[Any] = {"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""} assign_to_checkpoint( UpperCamelCase , UpperCamelCase , UpperCamelCase , additional_replacements=[meta_path, resnet_op] , config=UpperCamelCase ) if len(UpperCamelCase ): lowerCAmelCase__ : Optional[Any] = renew_attention_paths(UpperCamelCase ) lowerCAmelCase__ : Tuple = { """old""": f"""input_blocks.{i}.1""", """new""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}""", } lowerCAmelCase__ : List[str] = { f"""input_blocks.{i}.1.qkv.bias""": { """key""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""", """query""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""", """value""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""", }, f"""input_blocks.{i}.1.qkv.weight""": { """key""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""", """query""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""", """value""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""", }, } assign_to_checkpoint( UpperCamelCase , UpperCamelCase , UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=UpperCamelCase , config=UpperCamelCase , ) lowerCAmelCase__ : Dict = middle_blocks[0] lowerCAmelCase__ : Union[str, Any] = middle_blocks[1] lowerCAmelCase__ : Dict = middle_blocks[2] lowerCAmelCase__ : Any = renew_resnet_paths(UpperCamelCase ) assign_to_checkpoint(UpperCamelCase , UpperCamelCase , UpperCamelCase , config=UpperCamelCase ) lowerCAmelCase__ : Dict = renew_resnet_paths(UpperCamelCase ) assign_to_checkpoint(UpperCamelCase , UpperCamelCase , UpperCamelCase , config=UpperCamelCase ) lowerCAmelCase__ : Optional[int] = renew_attention_paths(UpperCamelCase ) lowerCAmelCase__ : Optional[int] = { """middle_block.1.qkv.bias""": { """key""": """mid_block.attentions.0.key.bias""", """query""": """mid_block.attentions.0.query.bias""", """value""": """mid_block.attentions.0.value.bias""", }, """middle_block.1.qkv.weight""": { """key""": """mid_block.attentions.0.key.weight""", """query""": """mid_block.attentions.0.query.weight""", """value""": """mid_block.attentions.0.value.weight""", }, } assign_to_checkpoint( UpperCamelCase , UpperCamelCase , UpperCamelCase , attention_paths_to_split=UpperCamelCase , config=UpperCamelCase ) for i in range(UpperCamelCase ): lowerCAmelCase__ : Tuple = i // (config["""num_res_blocks"""] + 1) lowerCAmelCase__ : List[str] = i % (config["""num_res_blocks"""] + 1) lowerCAmelCase__ : int = [shave_segments(UpperCamelCase , 2 ) for name in output_blocks[i]] lowerCAmelCase__ : Union[str, Any] = {} for layer in output_block_layers: lowerCAmelCase__ , lowerCAmelCase__ : Any = layer.split(""".""" )[0], shave_segments(UpperCamelCase , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(UpperCamelCase ) else: lowerCAmelCase__ : str = [layer_name] if len(UpperCamelCase ) > 1: lowerCAmelCase__ : str = [key for key in output_blocks[i] if f"""output_blocks.{i}.0""" in key] lowerCAmelCase__ : Dict = [key for key in output_blocks[i] if f"""output_blocks.{i}.1""" in key] lowerCAmelCase__ : Optional[int] = renew_resnet_paths(UpperCamelCase ) lowerCAmelCase__ : int = renew_resnet_paths(UpperCamelCase ) lowerCAmelCase__ : Optional[int] = {"""old""": f"""output_blocks.{i}.0""", """new""": f"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""} assign_to_checkpoint(UpperCamelCase , UpperCamelCase , UpperCamelCase , additional_replacements=[meta_path] , config=UpperCamelCase ) if ["conv.weight", "conv.bias"] in output_block_list.values(): lowerCAmelCase__ : List[Any] = list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] ) lowerCAmelCase__ : int = checkpoint[ f"""output_blocks.{i}.{index}.conv.weight""" ] lowerCAmelCase__ : int = checkpoint[ f"""output_blocks.{i}.{index}.conv.bias""" ] # Clear attentions as they have been attributed above. if len(UpperCamelCase ) == 2: lowerCAmelCase__ : Tuple = [] if len(UpperCamelCase ): lowerCAmelCase__ : Dict = renew_attention_paths(UpperCamelCase ) lowerCAmelCase__ : Tuple = { """old""": f"""output_blocks.{i}.1""", """new""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}""", } lowerCAmelCase__ : Tuple = { f"""output_blocks.{i}.1.qkv.bias""": { """key""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""", """query""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""", """value""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""", }, f"""output_blocks.{i}.1.qkv.weight""": { """key""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""", """query""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""", """value""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""", }, } assign_to_checkpoint( UpperCamelCase , UpperCamelCase , UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=UpperCamelCase , ) else: lowerCAmelCase__ : int = renew_resnet_paths(UpperCamelCase , n_shave_prefix_segments=1 ) for path in resnet_0_paths: lowerCAmelCase__ : Tuple = """.""".join(["""output_blocks""", str(UpperCamelCase ), path["""old"""]] ) lowerCAmelCase__ : List[Any] = """.""".join(["""up_blocks""", str(UpperCamelCase ), """resnets""", str(UpperCamelCase ), path["""new"""]] ) lowerCAmelCase__ : Union[str, Any] = checkpoint[old_path] return new_checkpoint if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the architecture.''', ) parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') _lowerCAmelCase = parser.parse_args() _lowerCAmelCase = torch.load(args.checkpoint_path) with open(args.config_file) as f: _lowerCAmelCase = json.loads(f.read()) _lowerCAmelCase = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] _lowerCAmelCase = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: _lowerCAmelCase = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) _lowerCAmelCase = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) _lowerCAmelCase = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
37
'''simple docstring''' from __future__ import annotations import math from collections import Counter from string import ascii_lowercase def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ , lowerCAmelCase__ : List[str] = analyze_text(UpperCamelCase ) lowerCAmelCase__ : Optional[int] = list(""" """ + ascii_lowercase ) # what is our total sum of probabilities. lowerCAmelCase__ : List[Any] = sum(single_char_strings.values() ) # one length string lowerCAmelCase__ : Optional[int] = 0 # for each alpha we go in our dict and if it is in it we calculate entropy for ch in my_alphas: if ch in single_char_strings: lowerCAmelCase__ : List[Any] = single_char_strings[ch] lowerCAmelCase__ : List[Any] = my_str / all_sum my_fir_sum += prob * math.loga(UpperCamelCase ) # entropy formula. # print entropy print(f"""{round(-1 * my_fir_sum ):.1f}""" ) # two len string lowerCAmelCase__ : Dict = sum(two_char_strings.values() ) lowerCAmelCase__ : int = 0 # for each alpha (two in size) calculate entropy. for cha in my_alphas: for cha in my_alphas: lowerCAmelCase__ : Union[str, Any] = cha + cha if sequence in two_char_strings: lowerCAmelCase__ : Dict = two_char_strings[sequence] lowerCAmelCase__ : Tuple = int(UpperCamelCase ) / all_sum my_sec_sum += prob * math.loga(UpperCamelCase ) # print second entropy print(f"""{round(-1 * my_sec_sum ):.1f}""" ) # print the difference between them print(f"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Optional[Any] = Counter() # type: ignore lowerCAmelCase__ : Tuple = Counter() # type: ignore single_char_strings[text[-1]] += 1 # first case when we have space at start. two_char_strings[" " + text[0]] += 1 for i in range(0 , len(UpperCamelCase ) - 1 ): single_char_strings[text[i]] += 1 two_char_strings[text[i : i + 2]] += 1 return single_char_strings, two_char_strings def _SCREAMING_SNAKE_CASE ( ): """simple docstring""" import doctest doctest.testmod() # text = ( # "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark " # "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest " # "jointure saw horrible. He private he on be imagine suppose. Fertile " # "beloved evident through no service elderly is. Blind there if every no so " # "at. Own neglected you preferred way sincerity delivered his attempted. To " # "of message cottage windows do besides against uncivil. Delightful " # "unreserved impossible few estimating men favourable see entreaties. She " # "propriety immediate was improving. He or entrance humoured likewise " # "moderate. Much nor game son say feel. Fat make met can must form into " # "gate. Me we offending prevailed discovery. " # ) # calculate_prob(text) if __name__ == "__main__": main()
37
1
'''simple docstring''' # Logistic Regression from scratch # In[62]: # In[63]: # importing all the required libraries import numpy as np from matplotlib import pyplot as plt from sklearn import datasets def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" return 1 / (1 + np.exp(-z )) def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" return (-y * np.log(UpperCamelCase ) - (1 - y) * np.log(1 - h )).mean() def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : str = np.dot(UpperCamelCase , UpperCamelCase ) return np.sum(y * scores - np.log(1 + np.exp(UpperCamelCase ) ) ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=70000 ): """simple docstring""" lowerCAmelCase__ : Union[str, Any] = np.zeros(x.shape[1] ) for iterations in range(UpperCamelCase ): lowerCAmelCase__ : Dict = np.dot(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = sigmoid_function(UpperCamelCase ) lowerCAmelCase__ : Any = np.dot(x.T , h - y ) / y.size lowerCAmelCase__ : Dict = theta - alpha * gradient # updating the weights lowerCAmelCase__ : Dict = np.dot(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : int = sigmoid_function(UpperCamelCase ) lowerCAmelCase__ : Any = cost_function(UpperCamelCase , UpperCamelCase ) if iterations % 100 == 0: print(f"""loss: {j} \t""" ) # printing the loss after every 100 iterations return theta # In[68]: if __name__ == "__main__": _lowerCAmelCase = datasets.load_iris() _lowerCAmelCase = iris.data[:, :2] _lowerCAmelCase = (iris.target != 0) * 1 _lowerCAmelCase = 0.1 _lowerCAmelCase = logistic_reg(alpha, x, y, max_iterations=7_0000) print('''theta: ''', theta) # printing the theta i.e our weights vector def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" return sigmoid_function( np.dot(UpperCamelCase , UpperCamelCase ) ) # predicting the value of probability from the logistic regression algorithm plt.figure(figsize=(10, 6)) plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''') plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''') ((_lowerCAmelCase) , (_lowerCAmelCase)) = (x[:, 0].min(), x[:, 0].max()) ((_lowerCAmelCase) , (_lowerCAmelCase)) = (x[:, 1].min(), x[:, 1].max()) ((_lowerCAmelCase) , (_lowerCAmelCase)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max)) _lowerCAmelCase = np.c_[xxa.ravel(), xxa.ravel()] _lowerCAmelCase = predict_prob(grid).reshape(xxa.shape) plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''') plt.legend() plt.show()
37
'''simple docstring''' import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase=1 ): """simple docstring""" if n_shave_prefix_segments >= 0: return ".".join(path.split(""".""" )[n_shave_prefix_segments:] ) else: return ".".join(path.split(""".""" )[:n_shave_prefix_segments] ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase=0 ): """simple docstring""" lowerCAmelCase__ : Union[str, Any] = [] for old_item in old_list: lowerCAmelCase__ : Optional[Any] = old_item.replace("""in_layers.0""" , """norm1""" ) lowerCAmelCase__ : Optional[int] = new_item.replace("""in_layers.2""" , """conv1""" ) lowerCAmelCase__ : Dict = new_item.replace("""out_layers.0""" , """norm2""" ) lowerCAmelCase__ : str = new_item.replace("""out_layers.3""" , """conv2""" ) lowerCAmelCase__ : str = new_item.replace("""emb_layers.1""" , """time_emb_proj""" ) lowerCAmelCase__ : Optional[Any] = new_item.replace("""skip_connection""" , """conv_shortcut""" ) lowerCAmelCase__ : Union[str, Any] = shave_segments(UpperCamelCase , n_shave_prefix_segments=UpperCamelCase ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase=0 ): """simple docstring""" lowerCAmelCase__ : int = [] for old_item in old_list: lowerCAmelCase__ : List[str] = old_item lowerCAmelCase__ : int = new_item.replace("""norm.weight""" , """group_norm.weight""" ) lowerCAmelCase__ : Optional[Any] = new_item.replace("""norm.bias""" , """group_norm.bias""" ) lowerCAmelCase__ : Optional[Any] = new_item.replace("""proj_out.weight""" , """proj_attn.weight""" ) lowerCAmelCase__ : int = new_item.replace("""proj_out.bias""" , """proj_attn.bias""" ) lowerCAmelCase__ : str = shave_segments(UpperCamelCase , n_shave_prefix_segments=UpperCamelCase ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): lowerCAmelCase__ : Any = old_checkpoint[path] lowerCAmelCase__ : int = old_tensor.shape[0] // 3 lowerCAmelCase__ : int = (-1, channels) if len(old_tensor.shape ) == 3 else (-1) lowerCAmelCase__ : Tuple = old_tensor.shape[0] // config["""num_head_channels"""] // 3 lowerCAmelCase__ : List[Any] = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = old_tensor.split(channels // num_heads , dim=1 ) lowerCAmelCase__ : int = query.reshape(UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = key.reshape(UpperCamelCase ) lowerCAmelCase__ : Optional[int] = value.reshape(UpperCamelCase ) for path in paths: lowerCAmelCase__ : Any = path["""new"""] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here lowerCAmelCase__ : Any = new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" ) lowerCAmelCase__ : Any = new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" ) lowerCAmelCase__ : Any = new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" ) if additional_replacements is not None: for replacement in additional_replacements: lowerCAmelCase__ : Any = new_path.replace(replacement["""old"""] , replacement["""new"""] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: lowerCAmelCase__ : List[Any] = old_checkpoint[path["""old"""]][:, :, 0] else: lowerCAmelCase__ : Dict = old_checkpoint[path["""old"""]] def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : str = {} lowerCAmelCase__ : str = checkpoint["""time_embed.0.weight"""] lowerCAmelCase__ : List[Any] = checkpoint["""time_embed.0.bias"""] lowerCAmelCase__ : int = checkpoint["""time_embed.2.weight"""] lowerCAmelCase__ : List[str] = checkpoint["""time_embed.2.bias"""] lowerCAmelCase__ : str = checkpoint["""input_blocks.0.0.weight"""] lowerCAmelCase__ : Any = checkpoint["""input_blocks.0.0.bias"""] lowerCAmelCase__ : Union[str, Any] = checkpoint["""out.0.weight"""] lowerCAmelCase__ : Union[str, Any] = checkpoint["""out.0.bias"""] lowerCAmelCase__ : str = checkpoint["""out.2.weight"""] lowerCAmelCase__ : Tuple = checkpoint["""out.2.bias"""] # Retrieves the keys for the input blocks only lowerCAmelCase__ : Optional[Any] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} ) lowerCAmelCase__ : Optional[Any] = { layer_id: [key for key in checkpoint if f"""input_blocks.{layer_id}""" in key] for layer_id in range(UpperCamelCase ) } # Retrieves the keys for the middle blocks only lowerCAmelCase__ : Union[str, Any] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} ) lowerCAmelCase__ : Union[str, Any] = { layer_id: [key for key in checkpoint if f"""middle_block.{layer_id}""" in key] for layer_id in range(UpperCamelCase ) } # Retrieves the keys for the output blocks only lowerCAmelCase__ : List[str] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} ) lowerCAmelCase__ : List[Any] = { layer_id: [key for key in checkpoint if f"""output_blocks.{layer_id}""" in key] for layer_id in range(UpperCamelCase ) } for i in range(1 , UpperCamelCase ): lowerCAmelCase__ : Dict = (i - 1) // (config["""num_res_blocks"""] + 1) lowerCAmelCase__ : Tuple = (i - 1) % (config["""num_res_blocks"""] + 1) lowerCAmelCase__ : Optional[int] = [key for key in input_blocks[i] if f"""input_blocks.{i}.0""" in key] lowerCAmelCase__ : Optional[Any] = [key for key in input_blocks[i] if f"""input_blocks.{i}.1""" in key] if f"""input_blocks.{i}.0.op.weight""" in checkpoint: lowerCAmelCase__ : Optional[int] = checkpoint[ f"""input_blocks.{i}.0.op.weight""" ] lowerCAmelCase__ : Tuple = checkpoint[ f"""input_blocks.{i}.0.op.bias""" ] continue lowerCAmelCase__ : Optional[Any] = renew_resnet_paths(UpperCamelCase ) lowerCAmelCase__ : Dict = {"""old""": f"""input_blocks.{i}.0""", """new""": f"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""} lowerCAmelCase__ : Optional[Any] = {"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""} assign_to_checkpoint( UpperCamelCase , UpperCamelCase , UpperCamelCase , additional_replacements=[meta_path, resnet_op] , config=UpperCamelCase ) if len(UpperCamelCase ): lowerCAmelCase__ : Optional[Any] = renew_attention_paths(UpperCamelCase ) lowerCAmelCase__ : Tuple = { """old""": f"""input_blocks.{i}.1""", """new""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}""", } lowerCAmelCase__ : List[str] = { f"""input_blocks.{i}.1.qkv.bias""": { """key""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""", """query""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""", """value""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""", }, f"""input_blocks.{i}.1.qkv.weight""": { """key""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""", """query""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""", """value""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""", }, } assign_to_checkpoint( UpperCamelCase , UpperCamelCase , UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=UpperCamelCase , config=UpperCamelCase , ) lowerCAmelCase__ : Dict = middle_blocks[0] lowerCAmelCase__ : Union[str, Any] = middle_blocks[1] lowerCAmelCase__ : Dict = middle_blocks[2] lowerCAmelCase__ : Any = renew_resnet_paths(UpperCamelCase ) assign_to_checkpoint(UpperCamelCase , UpperCamelCase , UpperCamelCase , config=UpperCamelCase ) lowerCAmelCase__ : Dict = renew_resnet_paths(UpperCamelCase ) assign_to_checkpoint(UpperCamelCase , UpperCamelCase , UpperCamelCase , config=UpperCamelCase ) lowerCAmelCase__ : Optional[int] = renew_attention_paths(UpperCamelCase ) lowerCAmelCase__ : Optional[int] = { """middle_block.1.qkv.bias""": { """key""": """mid_block.attentions.0.key.bias""", """query""": """mid_block.attentions.0.query.bias""", """value""": """mid_block.attentions.0.value.bias""", }, """middle_block.1.qkv.weight""": { """key""": """mid_block.attentions.0.key.weight""", """query""": """mid_block.attentions.0.query.weight""", """value""": """mid_block.attentions.0.value.weight""", }, } assign_to_checkpoint( UpperCamelCase , UpperCamelCase , UpperCamelCase , attention_paths_to_split=UpperCamelCase , config=UpperCamelCase ) for i in range(UpperCamelCase ): lowerCAmelCase__ : Tuple = i // (config["""num_res_blocks"""] + 1) lowerCAmelCase__ : List[str] = i % (config["""num_res_blocks"""] + 1) lowerCAmelCase__ : int = [shave_segments(UpperCamelCase , 2 ) for name in output_blocks[i]] lowerCAmelCase__ : Union[str, Any] = {} for layer in output_block_layers: lowerCAmelCase__ , lowerCAmelCase__ : Any = layer.split(""".""" )[0], shave_segments(UpperCamelCase , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(UpperCamelCase ) else: lowerCAmelCase__ : str = [layer_name] if len(UpperCamelCase ) > 1: lowerCAmelCase__ : str = [key for key in output_blocks[i] if f"""output_blocks.{i}.0""" in key] lowerCAmelCase__ : Dict = [key for key in output_blocks[i] if f"""output_blocks.{i}.1""" in key] lowerCAmelCase__ : Optional[int] = renew_resnet_paths(UpperCamelCase ) lowerCAmelCase__ : int = renew_resnet_paths(UpperCamelCase ) lowerCAmelCase__ : Optional[int] = {"""old""": f"""output_blocks.{i}.0""", """new""": f"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""} assign_to_checkpoint(UpperCamelCase , UpperCamelCase , UpperCamelCase , additional_replacements=[meta_path] , config=UpperCamelCase ) if ["conv.weight", "conv.bias"] in output_block_list.values(): lowerCAmelCase__ : List[Any] = list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] ) lowerCAmelCase__ : int = checkpoint[ f"""output_blocks.{i}.{index}.conv.weight""" ] lowerCAmelCase__ : int = checkpoint[ f"""output_blocks.{i}.{index}.conv.bias""" ] # Clear attentions as they have been attributed above. if len(UpperCamelCase ) == 2: lowerCAmelCase__ : Tuple = [] if len(UpperCamelCase ): lowerCAmelCase__ : Dict = renew_attention_paths(UpperCamelCase ) lowerCAmelCase__ : Tuple = { """old""": f"""output_blocks.{i}.1""", """new""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}""", } lowerCAmelCase__ : Tuple = { f"""output_blocks.{i}.1.qkv.bias""": { """key""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""", """query""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""", """value""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""", }, f"""output_blocks.{i}.1.qkv.weight""": { """key""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""", """query""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""", """value""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""", }, } assign_to_checkpoint( UpperCamelCase , UpperCamelCase , UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=UpperCamelCase , ) else: lowerCAmelCase__ : int = renew_resnet_paths(UpperCamelCase , n_shave_prefix_segments=1 ) for path in resnet_0_paths: lowerCAmelCase__ : Tuple = """.""".join(["""output_blocks""", str(UpperCamelCase ), path["""old"""]] ) lowerCAmelCase__ : List[Any] = """.""".join(["""up_blocks""", str(UpperCamelCase ), """resnets""", str(UpperCamelCase ), path["""new"""]] ) lowerCAmelCase__ : Union[str, Any] = checkpoint[old_path] return new_checkpoint if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the architecture.''', ) parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') _lowerCAmelCase = parser.parse_args() _lowerCAmelCase = torch.load(args.checkpoint_path) with open(args.config_file) as f: _lowerCAmelCase = json.loads(f.read()) _lowerCAmelCase = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] _lowerCAmelCase = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: _lowerCAmelCase = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) _lowerCAmelCase = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) _lowerCAmelCase = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
37
1
'''simple docstring''' import argparse import logging import sys from unittest.mock import patch import run_glue_deebert from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow logging.basicConfig(level=logging.DEBUG) _lowerCAmelCase = logging.getLogger() def _SCREAMING_SNAKE_CASE ( ): """simple docstring""" lowerCAmelCase__ : List[Any] = argparse.ArgumentParser() parser.add_argument("""-f""" ) lowerCAmelCase__ : Any = parser.parse_args() return args.f class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' def UpperCAmelCase_ ( self ) -> None: lowerCAmelCase__ : int = logging.StreamHandler(sys.stdout ) logger.addHandler(__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Union[str, Any]: lowerCAmelCase__ : int = get_gpu_count() if n_gpu > 1: pass # XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560 # script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py" # distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split() # cmd = [sys.executable] + distributed_args + args # execute_subprocess_async(cmd, env=self.get_env()) # XXX: test the results - need to save them first into .json file else: args.insert(0 ,"""run_glue_deebert.py""" ) with patch.object(__UpperCAmelCase ,"""argv""" ,__UpperCAmelCase ): lowerCAmelCase__ : Tuple = run_glue_deebert.main() for value in result.values(): self.assertGreaterEqual(__UpperCAmelCase ,0.6_6_6 ) @slow @require_torch_non_multi_gpu def UpperCAmelCase_ ( self ) -> int: lowerCAmelCase__ : str = """ --model_type roberta --model_name_or_path roberta-base --task_name MRPC --do_train --do_eval --do_lower_case --data_dir ./tests/fixtures/tests_samples/MRPC/ --max_seq_length 128 --per_gpu_eval_batch_size=1 --per_gpu_train_batch_size=8 --learning_rate 2e-4 --num_train_epochs 3 --overwrite_output_dir --seed 42 --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --plot_data_dir ./examples/deebert/results/ --save_steps 0 --overwrite_cache --eval_after_first_stage """.split() self.run_and_check(__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = """ --model_type roberta --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --task_name MRPC --do_eval --do_lower_case --data_dir ./tests/fixtures/tests_samples/MRPC/ --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --plot_data_dir ./examples/deebert/results/ --max_seq_length 128 --eval_each_highway --eval_highway --overwrite_cache --per_gpu_eval_batch_size=1 """.split() self.run_and_check(__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = """ --model_type roberta --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --task_name MRPC --do_eval --do_lower_case --data_dir ./tests/fixtures/tests_samples/MRPC/ --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --plot_data_dir ./examples/deebert/results/ --max_seq_length 128 --early_exit_entropy 0.1 --eval_highway --overwrite_cache --per_gpu_eval_batch_size=1 """.split() self.run_and_check(__UpperCAmelCase )
37
'''simple docstring''' from math import sqrt def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and ( number >= 0 ), "'number' must been an int and positive" lowerCAmelCase__ : int = True # 0 and 1 are none primes. if number <= 1: lowerCAmelCase__ : Optional[Any] = False for divisor in range(2 , int(round(sqrt(UpperCamelCase ) ) ) + 1 ): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: lowerCAmelCase__ : Any = False break # precondition assert isinstance(UpperCamelCase , UpperCamelCase ), "'status' must been from type bool" return status def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N lowerCAmelCase__ : List[str] = list(range(2 , n + 1 ) ) lowerCAmelCase__ : str = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(UpperCamelCase ) ): for j in range(i + 1 , len(UpperCamelCase ) ): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): lowerCAmelCase__ : List[Any] = 0 # filters actual prime numbers. lowerCAmelCase__ : List[Any] = [x for x in begin_list if x != 0] # precondition assert isinstance(UpperCamelCase , UpperCamelCase ), "'ans' must been from type list" return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and (n > 2), "'N' must been an int and > 2" lowerCAmelCase__ : List[str] = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2 , n + 1 ): if is_prime(UpperCamelCase ): ans.append(UpperCamelCase ) # precondition assert isinstance(UpperCamelCase , UpperCamelCase ), "'ans' must been from type list" return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and number >= 0, "'number' must been an int and >= 0" lowerCAmelCase__ : Optional[Any] = [] # this list will be returns of the function. # potential prime number factors. lowerCAmelCase__ : Dict = 2 lowerCAmelCase__ : Dict = number if number == 0 or number == 1: ans.append(UpperCamelCase ) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(UpperCamelCase ): while quotient != 1: if is_prime(UpperCamelCase ) and (quotient % factor == 0): ans.append(UpperCamelCase ) quotient /= factor else: factor += 1 else: ans.append(UpperCamelCase ) # precondition assert isinstance(UpperCamelCase , UpperCamelCase ), "'ans' must been from type list" return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and ( number >= 0 ), "'number' bust been an int and >= 0" lowerCAmelCase__ : Optional[int] = 0 # prime factorization of 'number' lowerCAmelCase__ : List[str] = prime_factorization(UpperCamelCase ) lowerCAmelCase__ : Any = max(UpperCamelCase ) # precondition assert isinstance(UpperCamelCase , UpperCamelCase ), "'ans' must been from type int" return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and ( number >= 0 ), "'number' bust been an int and >= 0" lowerCAmelCase__ : List[Any] = 0 # prime factorization of 'number' lowerCAmelCase__ : List[str] = prime_factorization(UpperCamelCase ) lowerCAmelCase__ : Optional[int] = min(UpperCamelCase ) # precondition assert isinstance(UpperCamelCase , UpperCamelCase ), "'ans' must been from type int" return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ), "'number' must been an int" assert isinstance(number % 2 == 0 , UpperCamelCase ), "compare bust been from type bool" return number % 2 == 0 def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ), "'number' must been an int" assert isinstance(number % 2 != 0 , UpperCamelCase ), "compare bust been from type bool" return number % 2 != 0 def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert ( isinstance(UpperCamelCase , UpperCamelCase ) and (number > 2) and is_even(UpperCamelCase ) ), "'number' must been an int, even and > 2" lowerCAmelCase__ : Dict = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' lowerCAmelCase__ : Dict = get_prime_numbers(UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = len(UpperCamelCase ) # run variable for while-loops. lowerCAmelCase__ : List[str] = 0 lowerCAmelCase__ : List[Any] = None # exit variable. for break up the loops lowerCAmelCase__ : Any = True while i < len_pn and loop: lowerCAmelCase__ : List[Any] = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: lowerCAmelCase__ : Optional[Any] = False ans.append(prime_numbers[i] ) ans.append(prime_numbers[j] ) j += 1 i += 1 # precondition assert ( isinstance(UpperCamelCase , UpperCamelCase ) and (len(UpperCamelCase ) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0] ) and is_prime(ans[1] ) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" assert ( isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase ) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." lowerCAmelCase__ : int = 0 while numbera != 0: lowerCAmelCase__ : Any = numbera % numbera lowerCAmelCase__ : str = numbera lowerCAmelCase__ : List[str] = rest # precondition assert isinstance(UpperCamelCase , UpperCamelCase ) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" assert ( isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase ) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." lowerCAmelCase__ : int = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' lowerCAmelCase__ : int = prime_factorization(UpperCamelCase ) lowerCAmelCase__ : Any = prime_factorization(UpperCamelCase ) elif numbera == 1 or numbera == 1: lowerCAmelCase__ : Optional[Any] = [] lowerCAmelCase__ : Dict = [] lowerCAmelCase__ : List[str] = max(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : Tuple = 0 lowerCAmelCase__ : str = 0 lowerCAmelCase__ : List[Any] = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: lowerCAmelCase__ : int = prime_fac_a.count(UpperCamelCase ) lowerCAmelCase__ : Any = prime_fac_a.count(UpperCamelCase ) for _ in range(max(UpperCamelCase , UpperCamelCase ) ): ans *= n else: lowerCAmelCase__ : Any = prime_fac_a.count(UpperCamelCase ) for _ in range(UpperCamelCase ): ans *= n done.append(UpperCamelCase ) # iterates through primeFac2 for n in prime_fac_a: if n not in done: lowerCAmelCase__ : Optional[int] = prime_fac_a.count(UpperCamelCase ) for _ in range(UpperCamelCase ): ans *= n done.append(UpperCamelCase ) # precondition assert isinstance(UpperCamelCase , UpperCamelCase ) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and (n >= 0), "'number' must been a positive int" lowerCAmelCase__ : Optional[Any] = 0 lowerCAmelCase__ : Tuple = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(UpperCamelCase ): ans += 1 # precondition assert isinstance(UpperCamelCase , UpperCamelCase ) and is_prime( UpperCamelCase ), "'ans' must been a prime number and from type int" return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" assert ( is_prime(UpperCamelCase ) and is_prime(UpperCamelCase ) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" lowerCAmelCase__ : Dict = p_number_a + 1 # jump to the next number lowerCAmelCase__ : List[Any] = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(UpperCamelCase ): number += 1 while number < p_number_a: ans.append(UpperCamelCase ) number += 1 # fetch the next prime number. while not is_prime(UpperCamelCase ): number += 1 # precondition assert ( isinstance(UpperCamelCase , UpperCamelCase ) and ans[0] != p_number_a and ans[len(UpperCamelCase ) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and (n >= 1), "'n' must been int and >= 1" lowerCAmelCase__ : List[Any] = [] # will be returned. for divisor in range(1 , n + 1 ): if n % divisor == 0: ans.append(UpperCamelCase ) # precondition assert ans[0] == 1 and ans[len(UpperCamelCase ) - 1] == n, "Error in function getDivisiors(...)" return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and ( number > 1 ), "'number' must been an int and >= 1" lowerCAmelCase__ : Optional[int] = get_divisors(UpperCamelCase ) # precondition assert ( isinstance(UpperCamelCase , UpperCamelCase ) and (divisors[0] == 1) and (divisors[len(UpperCamelCase ) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1] ) == number def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" assert ( isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase ) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. lowerCAmelCase__ : int = gcd(abs(UpperCamelCase ) , abs(UpperCamelCase ) ) # precondition assert ( isinstance(UpperCamelCase , UpperCamelCase ) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and (n >= 0), "'n' must been a int and >= 0" lowerCAmelCase__ : str = 1 # this will be return. for factor in range(1 , n + 1 ): ans *= factor return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and (n >= 0), "'n' must been an int and >= 0" lowerCAmelCase__ : List[Any] = 0 lowerCAmelCase__ : Any = 1 lowerCAmelCase__ : Optional[Any] = 1 # this will be return for _ in range(n - 1 ): lowerCAmelCase__ : Dict = ans ans += fiba lowerCAmelCase__ : str = tmp return ans
37
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { '''microsoft/focalnet-tiny''': '''https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json''', } class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Optional[Any] = '''focalnet''' def __init__( self ,__UpperCAmelCase=224 ,__UpperCAmelCase=4 ,__UpperCAmelCase=3 ,__UpperCAmelCase=96 ,__UpperCAmelCase=False ,__UpperCAmelCase=[192, 384, 768, 768] ,__UpperCAmelCase=[2, 2, 6, 2] ,__UpperCAmelCase=[2, 2, 2, 2] ,__UpperCAmelCase=[3, 3, 3, 3] ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=4.0 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=False ,__UpperCAmelCase=1E-4 ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=1E-5 ,__UpperCAmelCase=32 ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,**__UpperCAmelCase ,) -> Optional[Any]: super().__init__(**__UpperCAmelCase ) lowerCAmelCase__ : Dict = image_size lowerCAmelCase__ : int = patch_size lowerCAmelCase__ : str = num_channels lowerCAmelCase__ : Dict = embed_dim lowerCAmelCase__ : List[str] = use_conv_embed lowerCAmelCase__ : List[Any] = hidden_sizes lowerCAmelCase__ : Dict = depths lowerCAmelCase__ : List[str] = focal_levels lowerCAmelCase__ : List[str] = focal_windows lowerCAmelCase__ : Dict = hidden_act lowerCAmelCase__ : Dict = mlp_ratio lowerCAmelCase__ : Tuple = hidden_dropout_prob lowerCAmelCase__ : Tuple = drop_path_rate lowerCAmelCase__ : Dict = use_layerscale lowerCAmelCase__ : Optional[Any] = layerscale_value lowerCAmelCase__ : str = use_post_layernorm lowerCAmelCase__ : Union[str, Any] = use_post_layernorm_in_modulation lowerCAmelCase__ : int = normalize_modulator lowerCAmelCase__ : Optional[Any] = initializer_range lowerCAmelCase__ : List[str] = layer_norm_eps lowerCAmelCase__ : List[Any] = encoder_stride lowerCAmelCase__ : Dict = ["""stem"""] + [F"""stage{idx}""" for idx in range(1 ,len(self.depths ) + 1 )] lowerCAmelCase__ , lowerCAmelCase__ : Any = get_aligned_output_features_output_indices( out_features=__UpperCAmelCase ,out_indices=__UpperCAmelCase ,stage_names=self.stage_names )
37
'''simple docstring''' from sklearn.metrics import fa_score, matthews_corrcoef import datasets from .record_evaluation import evaluate as evaluate_record _lowerCAmelCase = '''\ @article{wang2019superglue, title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems}, author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R}, journal={arXiv preprint arXiv:1905.00537}, year={2019} } ''' _lowerCAmelCase = '''\ SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after GLUE with a new set of more difficult language understanding tasks, improved resources, and a new public leaderboard. ''' _lowerCAmelCase = ''' Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset. Args: predictions: list of predictions to score. Depending on the SuperGlUE subset: - for \'record\': list of question-answer dictionaries with the following keys: - \'idx\': index of the question as specified by the dataset - \'prediction_text\': the predicted answer text - for \'multirc\': list of question-answer dictionaries with the following keys: - \'idx\': index of the question-answer pair as specified by the dataset - \'prediction\': the predicted answer label - otherwise: list of predicted labels references: list of reference labels. Depending on the SuperGLUE subset: - for \'record\': list of question-answers dictionaries with the following keys: - \'idx\': index of the question as specified by the dataset - \'answers\': list of possible answers - otherwise: list of reference labels Returns: depending on the SuperGLUE subset: - for \'record\': - \'exact_match\': Exact match between answer and gold answer - \'f1\': F1 score - for \'multirc\': - \'exact_match\': Exact match between answer and gold answer - \'f1_m\': Per-question macro-F1 score - \'f1_a\': Average F1 score over all answers - for \'axb\': \'matthews_correlation\': Matthew Correlation - for \'cb\': - \'accuracy\': Accuracy - \'f1\': F1 score - for all others: - \'accuracy\': Accuracy Examples: >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"] >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\') >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0, \'f1\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\') >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}] >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'exact_match\': 1.0, \'f1\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\') >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\') >>> references = [0, 1] >>> predictions = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'matthews_correlation\': 1.0} ''' def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" return float((preds == labels).mean() ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase="binary" ): """simple docstring""" lowerCAmelCase__ : Any = simple_accuracy(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : Tuple = float(fa_score(y_true=UpperCamelCase , y_pred=UpperCamelCase , average=UpperCamelCase ) ) return { "accuracy": acc, "f1": fa, } def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : List[str] = {} for id_pred, label in zip(UpperCamelCase , UpperCamelCase ): lowerCAmelCase__ : str = f"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}""" lowerCAmelCase__ : Dict = id_pred["""prediction"""] if question_id in question_map: question_map[question_id].append((pred, label) ) else: lowerCAmelCase__ : Optional[int] = [(pred, label)] lowerCAmelCase__ , lowerCAmelCase__ : int = [], [] for question, preds_labels in question_map.items(): lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = zip(*UpperCamelCase ) lowerCAmelCase__ : List[Any] = fa_score(y_true=UpperCamelCase , y_pred=UpperCamelCase , average="""macro""" ) fas.append(UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = int(sum(pred == label for pred, label in preds_labels ) == len(UpperCamelCase ) ) ems.append(UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = float(sum(UpperCamelCase ) / len(UpperCamelCase ) ) lowerCAmelCase__ : List[Any] = sum(UpperCamelCase ) / len(UpperCamelCase ) lowerCAmelCase__ : Dict = float(fa_score(y_true=UpperCamelCase , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) ) return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a} @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_( datasets.Metric ): '''simple docstring''' def UpperCAmelCase_ ( self ) -> Optional[Any]: if self.config_name not in [ "boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg", ]: raise KeyError( """You should supply a configuration name selected in """ """[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" ) return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(self._get_feature_types() ) ,codebase_urls=[] ,reference_urls=[] ,format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None ,) def UpperCAmelCase_ ( self ) -> str: if self.config_name == "record": return { "predictions": { "idx": { "passage": datasets.Value("""int64""" ), "query": datasets.Value("""int64""" ), }, "prediction_text": datasets.Value("""string""" ), }, "references": { "idx": { "passage": datasets.Value("""int64""" ), "query": datasets.Value("""int64""" ), }, "answers": datasets.Sequence(datasets.Value("""string""" ) ), }, } elif self.config_name == "multirc": return { "predictions": { "idx": { "answer": datasets.Value("""int64""" ), "paragraph": datasets.Value("""int64""" ), "question": datasets.Value("""int64""" ), }, "prediction": datasets.Value("""int64""" ), }, "references": datasets.Value("""int64""" ), } else: return { "predictions": datasets.Value("""int64""" ), "references": datasets.Value("""int64""" ), } def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Any: if self.config_name == "axb": return {"matthews_correlation": matthews_corrcoef(__UpperCAmelCase ,__UpperCAmelCase )} elif self.config_name == "cb": return acc_and_fa(__UpperCAmelCase ,__UpperCAmelCase ,fa_avg="""macro""" ) elif self.config_name == "record": lowerCAmelCase__ : Optional[Any] = [ { """qas""": [ {"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]} for ref in references ] } ] lowerCAmelCase__ : Union[str, Any] = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions} return evaluate_record(__UpperCAmelCase ,__UpperCAmelCase )[0] elif self.config_name == "multirc": return evaluate_multirc(__UpperCAmelCase ,__UpperCAmelCase ) elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]: return {"accuracy": simple_accuracy(__UpperCAmelCase ,__UpperCAmelCase )} else: raise KeyError( """You should supply a configuration name selected in """ """[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
37
1
'''simple docstring''' from math import sqrt def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(sqrt(UpperCamelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def _SCREAMING_SNAKE_CASE ( UpperCamelCase = 10001 ): """simple docstring""" lowerCAmelCase__ : Dict = 0 lowerCAmelCase__ : Optional[int] = 1 while count != nth and number < 3: number += 1 if is_prime(UpperCamelCase ): count += 1 while count != nth: number += 2 if is_prime(UpperCamelCase ): count += 1 return number if __name__ == "__main__": print(F"""{solution() = }""")
37
'''simple docstring''' import unittest from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryDirectory from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline from transformers.convert_graph_to_onnx import ( convert, ensure_valid_input, generate_identified_filename, infer_shapes, quantize, ) from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow class lowerCAmelCase_: '''simple docstring''' def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[Any]: return None class lowerCAmelCase_: '''simple docstring''' def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Tuple: return None class lowerCAmelCase_( unittest.TestCase ): '''simple docstring''' __lowercase : Dict = [ # (model_name, model_kwargs) ('''bert-base-cased''', {}), ('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore ] @require_tf @slow def UpperCAmelCase_ ( self ) -> int: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__UpperCAmelCase ,"""tf""" ,12 ,**__UpperCAmelCase ) @require_torch @slow def UpperCAmelCase_ ( self ) -> Union[str, Any]: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__UpperCAmelCase ,"""pt""" ,12 ,**__UpperCAmelCase ) @require_torch @slow def UpperCAmelCase_ ( self ) -> Any: from transformers import BertModel lowerCAmelCase__ : Optional[int] = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""] with NamedTemporaryFile(mode="""w+t""" ) as vocab_file: vocab_file.write("""\n""".join(__UpperCAmelCase ) ) vocab_file.flush() lowerCAmelCase__ : Dict = BertTokenizerFast(vocab_file.name ) with TemporaryDirectory() as bert_save_dir: lowerCAmelCase__ : Tuple = BertModel(BertConfig(vocab_size=len(__UpperCAmelCase ) ) ) model.save_pretrained(__UpperCAmelCase ) self._test_export(__UpperCAmelCase ,"""pt""" ,12 ,__UpperCAmelCase ) @require_tf @slow def UpperCAmelCase_ ( self ) -> List[str]: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: lowerCAmelCase__ : Dict = self._test_export(__UpperCAmelCase ,"""tf""" ,12 ,**__UpperCAmelCase ) lowerCAmelCase__ : List[str] = quantize(Path(__UpperCAmelCase ) ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__UpperCAmelCase ).stat().st_size: self.fail("""Quantized model is bigger than initial ONNX model""" ) @require_torch @slow def UpperCAmelCase_ ( self ) -> List[Any]: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: lowerCAmelCase__ : Any = self._test_export(__UpperCAmelCase ,"""pt""" ,12 ,**__UpperCAmelCase ) lowerCAmelCase__ : Dict = quantize(__UpperCAmelCase ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__UpperCAmelCase ).stat().st_size: self.fail("""Quantized model is bigger than initial ONNX model""" ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=None ,**__UpperCAmelCase ) -> Optional[Any]: try: # Compute path with TemporaryDirectory() as tempdir: lowerCAmelCase__ : Optional[int] = Path(__UpperCAmelCase ).joinpath("""model.onnx""" ) # Remove folder if exists if path.parent.exists(): path.parent.rmdir() # Export convert(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,**__UpperCAmelCase ) return path except Exception as e: self.fail(__UpperCAmelCase ) @require_torch @require_tokenizers @slow def UpperCAmelCase_ ( self ) -> Union[str, Any]: from transformers import BertModel lowerCAmelCase__ : List[Any] = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) ) lowerCAmelCase__ : Union[str, Any] = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" ) self._test_infer_dynamic_axis(__UpperCAmelCase ,__UpperCAmelCase ,"""pt""" ) @require_tf @require_tokenizers @slow def UpperCAmelCase_ ( self ) -> Optional[int]: from transformers import TFBertModel lowerCAmelCase__ : int = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) ) lowerCAmelCase__ : Optional[int] = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" ) self._test_infer_dynamic_axis(__UpperCAmelCase ,__UpperCAmelCase ,"""tf""" ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Tuple: lowerCAmelCase__ : Any = FeatureExtractionPipeline(__UpperCAmelCase ,__UpperCAmelCase ) lowerCAmelCase__ : List[str] = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""] lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = infer_shapes(__UpperCAmelCase ,__UpperCAmelCase ) # Assert all variables are present self.assertEqual(len(__UpperCAmelCase ) ,len(__UpperCAmelCase ) ) self.assertTrue(all(var_name in shapes for var_name in variable_names ) ) self.assertSequenceEqual(variable_names[:3] ,__UpperCAmelCase ) self.assertSequenceEqual(variable_names[3:] ,__UpperCAmelCase ) # Assert inputs are {0: batch, 1: sequence} for var_name in ["input_ids", "token_type_ids", "attention_mask"]: self.assertDictEqual(shapes[var_name] ,{0: """batch""", 1: """sequence"""} ) # Assert outputs are {0: batch, 1: sequence} and {0: batch} self.assertDictEqual(shapes["""output_0"""] ,{0: """batch""", 1: """sequence"""} ) self.assertDictEqual(shapes["""output_1"""] ,{0: """batch"""} ) def UpperCAmelCase_ ( self ) -> Optional[int]: lowerCAmelCase__ : List[str] = ["""input_ids""", """attention_mask""", """token_type_ids"""] lowerCAmelCase__ : Union[str, Any] = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]} lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = ensure_valid_input(FuncContiguousArgs() ,__UpperCAmelCase ,__UpperCAmelCase ) # Should have exactly the same number of args (all are valid) self.assertEqual(len(__UpperCAmelCase ) ,3 ) # Should have exactly the same input names self.assertEqual(set(__UpperCAmelCase ) ,set(__UpperCAmelCase ) ) # Parameter should be reordered according to their respective place in the function: # (input_ids, token_type_ids, attention_mask) self.assertEqual(__UpperCAmelCase ,(tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) ) # Generated args are interleaved with another args (for instance parameter "past" in GPT2) lowerCAmelCase__ , lowerCAmelCase__ : int = ensure_valid_input(FuncNonContiguousArgs() ,__UpperCAmelCase ,__UpperCAmelCase ) # Should have exactly the one arg (all before the one not provided "some_other_args") self.assertEqual(len(__UpperCAmelCase ) ,1 ) self.assertEqual(len(__UpperCAmelCase ) ,1 ) # Should have only "input_ids" self.assertEqual(inputs_args[0] ,tokens["""input_ids"""] ) self.assertEqual(ordered_input_names[0] ,"""input_ids""" ) def UpperCAmelCase_ ( self ) -> Tuple: lowerCAmelCase__ : Dict = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) ,"""-test""" ) self.assertEqual("""/home/something/my_fake_model-test.onnx""" ,generated.as_posix() )
37
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _lowerCAmelCase = { '''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''], '''tokenization_mvp''': ['''MvpTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = ['''MvpTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = [ '''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MvpForCausalLM''', '''MvpForConditionalGeneration''', '''MvpForQuestionAnswering''', '''MvpForSequenceClassification''', '''MvpModel''', '''MvpPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys _lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
37
'''simple docstring''' from maths.prime_factors import prime_factors def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" if not isinstance(UpperCamelCase , UpperCamelCase ): lowerCAmelCase__ : int = f"""Input value of [number={number}] must be an integer""" raise TypeError(UpperCamelCase ) if number < 1: raise ValueError("""Input must be a positive integer""" ) return -1 if len(prime_factors(UpperCamelCase ) ) % 2 else 1 if __name__ == "__main__": import doctest doctest.testmod()
37
1
'''simple docstring''' _lowerCAmelCase = { '''Pillow''': '''Pillow<10.0.0''', '''accelerate''': '''accelerate>=0.20.3''', '''av''': '''av==9.2.0''', '''beautifulsoup4''': '''beautifulsoup4''', '''black''': '''black~=23.1''', '''codecarbon''': '''codecarbon==1.2.0''', '''cookiecutter''': '''cookiecutter==1.7.3''', '''dataclasses''': '''dataclasses''', '''datasets''': '''datasets!=2.5.0''', '''decord''': '''decord==0.6.0''', '''deepspeed''': '''deepspeed>=0.9.3''', '''diffusers''': '''diffusers''', '''dill''': '''dill<0.3.5''', '''evaluate''': '''evaluate>=0.2.0''', '''fairscale''': '''fairscale>0.3''', '''faiss-cpu''': '''faiss-cpu''', '''fastapi''': '''fastapi''', '''filelock''': '''filelock''', '''flax''': '''flax>=0.4.1,<=0.7.0''', '''ftfy''': '''ftfy''', '''fugashi''': '''fugashi>=1.0''', '''GitPython''': '''GitPython<3.1.19''', '''hf-doc-builder''': '''hf-doc-builder>=0.3.0''', '''huggingface-hub''': '''huggingface-hub>=0.14.1,<1.0''', '''importlib_metadata''': '''importlib_metadata''', '''ipadic''': '''ipadic>=1.0.0,<2.0''', '''isort''': '''isort>=5.5.4''', '''jax''': '''jax>=0.2.8,!=0.3.2,<=0.4.13''', '''jaxlib''': '''jaxlib>=0.1.65,<=0.4.13''', '''jieba''': '''jieba''', '''kenlm''': '''kenlm''', '''keras-nlp''': '''keras-nlp>=0.3.1''', '''librosa''': '''librosa''', '''nltk''': '''nltk''', '''natten''': '''natten>=0.14.6''', '''numpy''': '''numpy>=1.17''', '''onnxconverter-common''': '''onnxconverter-common''', '''onnxruntime-tools''': '''onnxruntime-tools>=1.4.2''', '''onnxruntime''': '''onnxruntime>=1.4.0''', '''opencv-python''': '''opencv-python''', '''optuna''': '''optuna''', '''optax''': '''optax>=0.0.8,<=0.1.4''', '''packaging''': '''packaging>=20.0''', '''parameterized''': '''parameterized''', '''phonemizer''': '''phonemizer''', '''protobuf''': '''protobuf''', '''psutil''': '''psutil''', '''pyyaml''': '''pyyaml>=5.1''', '''pydantic''': '''pydantic<2''', '''pytest''': '''pytest>=7.2.0''', '''pytest-timeout''': '''pytest-timeout''', '''pytest-xdist''': '''pytest-xdist''', '''python''': '''python>=3.8.0''', '''ray[tune]''': '''ray[tune]''', '''regex''': '''regex!=2019.12.17''', '''requests''': '''requests''', '''rhoknp''': '''rhoknp>=1.1.0,<1.3.1''', '''rjieba''': '''rjieba''', '''rouge-score''': '''rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1''', '''ruff''': '''ruff>=0.0.241,<=0.0.259''', '''sacrebleu''': '''sacrebleu>=1.4.12,<2.0.0''', '''sacremoses''': '''sacremoses''', '''safetensors''': '''safetensors>=0.3.1''', '''sagemaker''': '''sagemaker>=2.31.0''', '''scikit-learn''': '''scikit-learn''', '''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''', '''sigopt''': '''sigopt''', '''starlette''': '''starlette''', '''sudachipy''': '''sudachipy>=0.6.6''', '''sudachidict_core''': '''sudachidict_core>=20220729''', '''tensorflow-cpu''': '''tensorflow-cpu>=2.6,<2.14''', '''tensorflow''': '''tensorflow>=2.6,<2.14''', '''tensorflow-text''': '''tensorflow-text<2.14''', '''tf2onnx''': '''tf2onnx''', '''timeout-decorator''': '''timeout-decorator''', '''timm''': '''timm''', '''tokenizers''': '''tokenizers>=0.11.1,!=0.11.3,<0.14''', '''torch''': '''torch>=1.9,!=1.12.0''', '''torchaudio''': '''torchaudio''', '''torchvision''': '''torchvision''', '''pyctcdecode''': '''pyctcdecode>=0.4.0''', '''tqdm''': '''tqdm>=4.27''', '''unidic''': '''unidic>=1.0.2''', '''unidic_lite''': '''unidic_lite>=1.0.7''', '''urllib3''': '''urllib3<2.0.0''', '''uvicorn''': '''uvicorn''', }
37
'''simple docstring''' import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = {'''vocab_file''': '''spiece.model'''} _lowerCAmelCase = { '''vocab_file''': { '''AI-Sweden/gpt-sw3-126m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model''', '''AI-Sweden/gpt-sw3-350m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model''', '''AI-Sweden/gpt-sw3-1.6b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model''', '''AI-Sweden/gpt-sw3-6.7b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model''', '''AI-Sweden/gpt-sw3-20b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model''', } } _lowerCAmelCase = { '''AI-Sweden/gpt-sw3-126m''': 2048, '''AI-Sweden/gpt-sw3-350m''': 2048, '''AI-Sweden/gpt-sw3-1.6b''': 2048, '''AI-Sweden/gpt-sw3-6.7b''': 2048, '''AI-Sweden/gpt-sw3-20b''': 2048, } class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Dict = VOCAB_FILES_NAMES __lowercase : str = PRETRAINED_VOCAB_FILES_MAP __lowercase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowercase : Optional[int] = ['''input_ids''', '''attention_mask'''] def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> None: lowerCAmelCase__ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs lowerCAmelCase__ : Dict = kwargs.get("""name_or_path""" ) if name_or_path is None: logger.warning( """name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,""" """ you are testing the model, this can safely be ignored""" ) lowerCAmelCase__ : Tuple = """None""" # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing lowerCAmelCase__ : Union[str, Any] = """<|endoftext|>""" if eos_token is None else eos_token lowerCAmelCase__ : Dict = """<unk>""" if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: lowerCAmelCase__ : Any = unk_token if pad_token is None else pad_token lowerCAmelCase__ : Dict = eos_token if bos_token is None else bos_token else: lowerCAmelCase__ : List[str] = """<pad>""" if pad_token is None else pad_token lowerCAmelCase__ : Optional[int] = """<s>""" if bos_token is None else bos_token super().__init__( do_lower_case=__UpperCAmelCase ,remove_space=__UpperCAmelCase ,keep_accents=__UpperCAmelCase ,bos_token=__UpperCAmelCase ,eos_token=__UpperCAmelCase ,unk_token=__UpperCAmelCase ,pad_token=__UpperCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**__UpperCAmelCase ,) lowerCAmelCase__ : Optional[int] = do_lower_case lowerCAmelCase__ : Dict = remove_space lowerCAmelCase__ : Optional[Any] = keep_accents lowerCAmelCase__ : int = vocab_file lowerCAmelCase__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__UpperCAmelCase ) # Used for whitespace normalization in input texts # fmt : off lowerCAmelCase__ : int = {""" """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """""", """„"""} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing lowerCAmelCase__ : List[str] = re.compile( F"""[{''.join(map(__UpperCAmelCase ,list(range(0 ,9 ) ) + list(range(11 ,32 ) ) + list(range(127 ,160 ) ) + [160, 173, 8203] ) )}]""" ) def __getstate__( self ) -> Any: lowerCAmelCase__ : int = self.__dict__.copy() lowerCAmelCase__ : Optional[int] = None return state def __setstate__( self ,__UpperCAmelCase ) -> List[str]: lowerCAmelCase__ : List[str] = d # for backward compatibility if not hasattr(self ,"""sp_model_kwargs""" ): lowerCAmelCase__ : Tuple = {} lowerCAmelCase__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def UpperCAmelCase_ ( self ) -> int: return len(self.sp_model ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str: lowerCAmelCase__ : Tuple = self.non_printing_characters_re.sub("""""" ,__UpperCAmelCase ) # Normalize whitespaces lowerCAmelCase__ : List[Any] = """""".join([char if char not in self.whitespaces else """ """ for char in text] ) # NFC Unicode normalization lowerCAmelCase__ : List[Any] = unicodedata.normalize("""NFC""" ,__UpperCAmelCase ) return text def UpperCAmelCase_ ( self ,__UpperCAmelCase ,**__UpperCAmelCase ) -> List[str]: lowerCAmelCase__ : List[Any] = self.preprocess_text(__UpperCAmelCase ) return self.sp_model.encode(__UpperCAmelCase ,out_type=__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> int: return self.sp_model.PieceToId(__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str: return self.sp_model.IdToPiece(__UpperCAmelCase ) @staticmethod def UpperCAmelCase_ ( __UpperCAmelCase ) -> str: return out_string def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str: lowerCAmelCase__ : int = [] lowerCAmelCase__ : Optional[int] = """""" lowerCAmelCase__ : Tuple = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__UpperCAmelCase ) + token lowerCAmelCase__ : Union[str, Any] = True lowerCAmelCase__ : Optional[Any] = [] else: current_sub_tokens.append(__UpperCAmelCase ) lowerCAmelCase__ : Any = False out_string += self.sp_model.decode(__UpperCAmelCase ) return out_string def UpperCAmelCase_ ( self ) -> Dict[str, int]: lowerCAmelCase__ : Optional[int] = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(__UpperCAmelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase__ : Optional[int] = os.path.join( __UpperCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,__UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__UpperCAmelCase ,"""wb""" ) as fi: lowerCAmelCase__ : str = self.sp_model.serialized_model_proto() fi.write(__UpperCAmelCase ) return (out_vocab_file,) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]: if isinstance(__UpperCAmelCase ,__UpperCAmelCase ): lowerCAmelCase__ : Tuple = self.preprocess_text(__UpperCAmelCase ) lowerCAmelCase__ : int = self.sp_model.encode(__UpperCAmelCase ) else: lowerCAmelCase__ : int = [self.preprocess_text(__UpperCAmelCase ) for t in text] lowerCAmelCase__ : Any = self.sp_model.encode(__UpperCAmelCase ) if return_tensors is True or return_tensors == "pt": lowerCAmelCase__ : Tuple = torch.tensor(__UpperCAmelCase ) return token_ids def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str: return self.sp_model.decode(__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[int]: lowerCAmelCase__ : List[Any] = [F"""User: {text}""" if is_user else F"""Bot: {text}""" for is_user, text in conversation.iter_texts()] lowerCAmelCase__ : Any = ( F"""{self.eos_token}{self.bos_token}""" + F"""{self.bos_token}""".join(__UpperCAmelCase ) + F"""{self.bos_token}Bot:""" ) return self.encode(text=__UpperCAmelCase )
37
1
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : int = 0 # if input_string is "aba" than new_input_string become "a|b|a" lowerCAmelCase__ : Union[str, Any] = """""" lowerCAmelCase__ : Tuple = """""" # append each character + "|" in new_string for range(0, length-1) for i in input_string[: len(UpperCamelCase ) - 1]: new_input_string += i + "|" # append last character new_input_string += input_string[-1] # we will store the starting and ending of previous furthest ending palindromic # substring lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = 0, 0 # length[i] shows the length of palindromic substring with center i lowerCAmelCase__ : str = [1 for i in range(len(UpperCamelCase ) )] # for each character in new_string find corresponding palindromic string lowerCAmelCase__ : Optional[int] = 0 for j in range(len(UpperCamelCase ) ): lowerCAmelCase__ : Tuple = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 ) while ( j - k >= 0 and j + k < len(UpperCamelCase ) and new_input_string[k + j] == new_input_string[j - k] ): k += 1 lowerCAmelCase__ : str = 2 * k - 1 # does this string is ending after the previously explored end (that is r) ? # if yes the update the new r to the last index of this if j + k - 1 > r: lowerCAmelCase__ : Tuple = j - k + 1 # noqa: E741 lowerCAmelCase__ : Dict = j + k - 1 # update max_length and start position if max_length < length[j]: lowerCAmelCase__ : List[str] = length[j] lowerCAmelCase__ : Union[str, Any] = j # create that string lowerCAmelCase__ : List[Any] = new_input_string[start - max_length // 2 : start + max_length // 2 + 1] for i in s: if i != "|": output_string += i return output_string if __name__ == "__main__": import doctest doctest.testmod()
37
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class lowerCAmelCase_( unittest.TestCase ): '''simple docstring''' @slow def UpperCAmelCase_ ( self ) -> Union[str, Any]: lowerCAmelCase__ : Tuple = XLMRobertaModel.from_pretrained("""xlm-roberta-base""" ) lowerCAmelCase__ : Optional[int] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] ) # The dog is cute and lives in the garden house lowerCAmelCase__ : str = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim lowerCAmelCase__ : Dict = torch.tensor( [[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): lowerCAmelCase__ : Optional[int] = model(__UpperCAmelCase )["""last_hidden_state"""].detach() self.assertEqual(output.shape ,__UpperCAmelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] ,__UpperCAmelCase ,atol=1E-3 ) ) @slow def UpperCAmelCase_ ( self ) -> int: lowerCAmelCase__ : Union[str, Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-large""" ) lowerCAmelCase__ : Optional[Any] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] ) # The dog is cute and lives in the garden house lowerCAmelCase__ : Dict = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim lowerCAmelCase__ : Union[str, Any] = torch.tensor( [[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase )["""last_hidden_state"""].detach() self.assertEqual(output.shape ,__UpperCAmelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] ,__UpperCAmelCase ,atol=1E-3 ) )
37
1
'''simple docstring''' import os import re from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { '''vocab_file''': '''vocab.txt''', '''merges_file''': '''bpe.codes''', } _lowerCAmelCase = { '''vocab_file''': { '''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt''', '''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt''', }, '''merges_file''': { '''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes''', '''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes''', }, } _lowerCAmelCase = { '''vinai/phobert-base''': 256, '''vinai/phobert-large''': 256, } def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : str = set() lowerCAmelCase__ : Optional[Any] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCAmelCase__ : Dict = char lowerCAmelCase__ : Any = set(UpperCamelCase ) return pairs class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Union[str, Any] = VOCAB_FILES_NAMES __lowercase : int = PRETRAINED_VOCAB_FILES_MAP __lowercase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase="<s>" ,__UpperCAmelCase="</s>" ,__UpperCAmelCase="</s>" ,__UpperCAmelCase="<s>" ,__UpperCAmelCase="<unk>" ,__UpperCAmelCase="<pad>" ,__UpperCAmelCase="<mask>" ,**__UpperCAmelCase ,) -> List[str]: super().__init__( bos_token=__UpperCAmelCase ,eos_token=__UpperCAmelCase ,unk_token=__UpperCAmelCase ,sep_token=__UpperCAmelCase ,cls_token=__UpperCAmelCase ,pad_token=__UpperCAmelCase ,mask_token=__UpperCAmelCase ,**__UpperCAmelCase ,) lowerCAmelCase__ : List[Any] = vocab_file lowerCAmelCase__ : Dict = merges_file lowerCAmelCase__ : int = {} lowerCAmelCase__ : str = 0 lowerCAmelCase__ : Optional[Any] = 1 lowerCAmelCase__ : Union[str, Any] = 2 lowerCAmelCase__ : Optional[int] = 3 self.add_from_file(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = {v: k for k, v in self.encoder.items()} with open(__UpperCAmelCase ,encoding="""utf-8""" ) as merges_handle: lowerCAmelCase__ : int = merges_handle.read().split("""\n""" )[:-1] lowerCAmelCase__ : Optional[int] = [tuple(merge.split()[:-1] ) for merge in merges] lowerCAmelCase__ : Dict = dict(zip(__UpperCAmelCase ,range(len(__UpperCAmelCase ) ) ) ) lowerCAmelCase__ : Dict = {} def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCAmelCase__ : Optional[int] = [self.cls_token_id] lowerCAmelCase__ : Tuple = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCAmelCase ,token_ids_a=__UpperCAmelCase ,already_has_special_tokens=__UpperCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(__UpperCAmelCase )) + [1] return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1] def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> List[int]: lowerCAmelCase__ : Optional[Any] = [self.sep_token_id] lowerCAmelCase__ : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def UpperCAmelCase_ ( self ) -> Tuple: return len(self.encoder ) def UpperCAmelCase_ ( self ) -> Optional[Any]: return dict(self.encoder ,**self.added_tokens_encoder ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[str]: if token in self.cache: return self.cache[token] lowerCAmelCase__ : List[Any] = tuple(__UpperCAmelCase ) lowerCAmelCase__ : List[str] = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] ) lowerCAmelCase__ : Optional[int] = get_pairs(__UpperCAmelCase ) if not pairs: return token while True: lowerCAmelCase__ : Tuple = min(__UpperCAmelCase ,key=lambda __UpperCAmelCase : self.bpe_ranks.get(__UpperCAmelCase ,float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = bigram lowerCAmelCase__ : List[str] = [] lowerCAmelCase__ : List[str] = 0 while i < len(__UpperCAmelCase ): try: lowerCAmelCase__ : Any = word.index(__UpperCAmelCase ,__UpperCAmelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCAmelCase__ : Dict = j if word[i] == first and i < len(__UpperCAmelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCAmelCase__ : str = tuple(__UpperCAmelCase ) lowerCAmelCase__ : int = new_word if len(__UpperCAmelCase ) == 1: break else: lowerCAmelCase__ : Any = get_pairs(__UpperCAmelCase ) lowerCAmelCase__ : List[str] = """@@ """.join(__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = word[:-4] lowerCAmelCase__ : Optional[Any] = word return word def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[int]: lowerCAmelCase__ : Dict = [] lowerCAmelCase__ : str = re.findall(R"""\S+\n?""" ,__UpperCAmelCase ) for token in words: split_tokens.extend(list(self.bpe(__UpperCAmelCase ).split(""" """ ) ) ) return split_tokens def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Union[str, Any]: return self.encoder.get(__UpperCAmelCase ,self.encoder.get(self.unk_token ) ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Tuple: return self.decoder.get(__UpperCAmelCase ,self.unk_token ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[str]: lowerCAmelCase__ : List[str] = """ """.join(__UpperCAmelCase ).replace("""@@ """ ,"""""" ).strip() return out_string def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(__UpperCAmelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase__ : List[str] = os.path.join( __UpperCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) lowerCAmelCase__ : List[str] = os.path.join( __UpperCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ): copyfile(self.vocab_file ,__UpperCAmelCase ) if os.path.abspath(self.merges_file ) != os.path.abspath(__UpperCAmelCase ): copyfile(self.merges_file ,__UpperCAmelCase ) return out_vocab_file, out_merge_file def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Tuple: if isinstance(__UpperCAmelCase ,__UpperCAmelCase ): try: with open(__UpperCAmelCase ,"""r""" ,encoding="""utf-8""" ) as fd: self.add_from_file(__UpperCAmelCase ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception(F"""Incorrect encoding detected in {f}, please rebuild the dataset""" ) return lowerCAmelCase__ : Tuple = f.readlines() for lineTmp in lines: lowerCAmelCase__ : Optional[int] = lineTmp.strip() lowerCAmelCase__ : List[str] = line.rfind(""" """ ) if idx == -1: raise ValueError("""Incorrect dictionary format, expected '<token> <cnt>'""" ) lowerCAmelCase__ : Optional[Any] = line[:idx] lowerCAmelCase__ : int = len(self.encoder )
37
'''simple docstring''' from pickle import UnpicklingError import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict from ..utils import logging _lowerCAmelCase = logging.get_logger(__name__) def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" try: with open(UpperCamelCase , """rb""" ) as flax_state_f: lowerCAmelCase__ : Union[str, Any] = from_bytes(UpperCamelCase , flax_state_f.read() ) except UnpicklingError as e: try: with open(UpperCamelCase ) as f: if f.read().startswith("""version""" ): raise OSError( """You seem to have cloned a repository without having git-lfs installed. Please""" """ install git-lfs and run `git lfs install` followed by `git lfs pull` in the""" """ folder you cloned.""" ) else: raise ValueError from e except (UnicodeDecodeError, ValueError): raise EnvironmentError(f"""Unable to convert {model_file} to Flax deserializable object. """ ) return load_flax_weights_in_pytorch_model(UpperCamelCase , UpperCamelCase ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" try: import torch # noqa: F401 except ImportError: logger.error( """Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see""" """ https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation""" """ instructions.""" ) raise # check if we have bf16 weights lowerCAmelCase__ : str = flatten_dict(jax.tree_util.tree_map(lambda UpperCamelCase : x.dtype == jnp.bfloataa , UpperCamelCase ) ).values() if any(UpperCamelCase ): # convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( """Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """ """before loading those in PyTorch model.""" ) lowerCAmelCase__ : Dict = jax.tree_util.tree_map( lambda UpperCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , UpperCamelCase ) lowerCAmelCase__ : Any = """""" lowerCAmelCase__ : Any = flatten_dict(UpperCamelCase , sep=""".""" ) lowerCAmelCase__ : Optional[int] = pt_model.state_dict() # keep track of unexpected & missing keys lowerCAmelCase__ : Optional[Any] = [] lowerCAmelCase__ : int = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): lowerCAmelCase__ : Union[str, Any] = flax_key_tuple.split(""".""" ) if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4: lowerCAmelCase__ : Optional[int] = flax_key_tuple_array[:-1] + ["""weight"""] lowerCAmelCase__ : Any = jnp.transpose(UpperCamelCase , (3, 2, 0, 1) ) elif flax_key_tuple_array[-1] == "kernel": lowerCAmelCase__ : str = flax_key_tuple_array[:-1] + ["""weight"""] lowerCAmelCase__ : Any = flax_tensor.T elif flax_key_tuple_array[-1] == "scale": lowerCAmelCase__ : int = flax_key_tuple_array[:-1] + ["""weight"""] if "time_embedding" not in flax_key_tuple_array: for i, flax_key_tuple_string in enumerate(UpperCamelCase ): lowerCAmelCase__ : List[str] = ( flax_key_tuple_string.replace("""_0""" , """.0""" ) .replace("""_1""" , """.1""" ) .replace("""_2""" , """.2""" ) .replace("""_3""" , """.3""" ) .replace("""_4""" , """.4""" ) .replace("""_5""" , """.5""" ) .replace("""_6""" , """.6""" ) .replace("""_7""" , """.7""" ) .replace("""_8""" , """.8""" ) .replace("""_9""" , """.9""" ) ) lowerCAmelCase__ : Union[str, Any] = """.""".join(UpperCamelCase ) if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( f"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """ f"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" ) else: # add weight to pytorch dict lowerCAmelCase__ : int = np.asarray(UpperCamelCase ) if not isinstance(UpperCamelCase , np.ndarray ) else flax_tensor lowerCAmelCase__ : int = torch.from_numpy(UpperCamelCase ) # remove from missing keys missing_keys.remove(UpperCamelCase ) else: # weight is not expected by PyTorch model unexpected_keys.append(UpperCamelCase ) pt_model.load_state_dict(UpperCamelCase ) # re-transform missing_keys to list lowerCAmelCase__ : Optional[int] = list(UpperCamelCase ) if len(UpperCamelCase ) > 0: logger.warning( """Some weights of the Flax model were not used when initializing the PyTorch model""" f""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing""" f""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture""" """ (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This""" f""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect""" """ to be exactly identical (e.g. initializing a BertForSequenceClassification model from a""" """ FlaxBertForSequenceClassification model).""" ) if len(UpperCamelCase ) > 0: logger.warning( f"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly""" f""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to""" """ use it for predictions and inference.""" ) return pt_model
37
1
'''simple docstring''' # Author: OMKAR PATHAK, Nwachukwu Chidiebere # Use a Python dictionary to construct the graph. from __future__ import annotations from pprint import pformat from typing import Generic, TypeVar _lowerCAmelCase = TypeVar('''T''') class lowerCAmelCase_( Generic[T] ): '''simple docstring''' def __init__( self ,__UpperCAmelCase = True ) -> None: lowerCAmelCase__ : dict[T, list[T]] = {} # dictionary of lists lowerCAmelCase__ : Optional[int] = directed def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> GraphAdjacencyList[T]: if not self.directed: # For undirected graphs # if both source vertex and destination vertex are both present in the # adjacency list, add destination vertex to source vertex list of adjacent # vertices and add source vertex to destination vertex list of adjacent # vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(__UpperCAmelCase ) self.adj_list[destination_vertex].append(__UpperCAmelCase ) # if only source vertex is present in adjacency list, add destination vertex # to source vertex list of adjacent vertices, then create a new vertex with # destination vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(__UpperCAmelCase ) lowerCAmelCase__ : List[str] = [source_vertex] # if only destination vertex is present in adjacency list, add source vertex # to destination vertex list of adjacent vertices, then create a new vertex # with source vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif destination_vertex in self.adj_list: self.adj_list[destination_vertex].append(__UpperCAmelCase ) lowerCAmelCase__ : str = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and assign a list # containing the destination vertex as it's first adjacent vertex also # create a new vertex with destination vertex as key and assign a list # containing the source vertex as it's first adjacent vertex. else: lowerCAmelCase__ : List[Any] = [destination_vertex] lowerCAmelCase__ : str = [source_vertex] else: # For directed graphs # if both source vertex and destination vertex are present in adjacency # list, add destination vertex to source vertex list of adjacent vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(__UpperCAmelCase ) # if only source vertex is present in adjacency list, add destination # vertex to source vertex list of adjacent vertices and create a new vertex # with destination vertex as key, which has no adjacent vertex elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(__UpperCAmelCase ) lowerCAmelCase__ : List[str] = [] # if only destination vertex is present in adjacency list, create a new # vertex with source vertex as key and assign a list containing destination # vertex as first adjacent vertex elif destination_vertex in self.adj_list: lowerCAmelCase__ : List[str] = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and a list containing # destination vertex as it's first adjacent vertex. Then create a new vertex # with destination vertex as key, which has no adjacent vertex else: lowerCAmelCase__ : Dict = [destination_vertex] lowerCAmelCase__ : Optional[Any] = [] return self def __repr__( self ) -> str: return pformat(self.adj_list )
37
'''simple docstring''' import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class lowerCAmelCase_: '''simple docstring''' def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=2 ,__UpperCAmelCase=True ,__UpperCAmelCase=False ,__UpperCAmelCase=10 ,__UpperCAmelCase=3 ,__UpperCAmelCase=32 * 4 ,__UpperCAmelCase=32 * 6 ,__UpperCAmelCase=4 ,__UpperCAmelCase=32 ,) -> str: lowerCAmelCase__ : Optional[int] = parent lowerCAmelCase__ : Optional[int] = batch_size lowerCAmelCase__ : Optional[int] = is_training lowerCAmelCase__ : Dict = use_auxiliary_loss lowerCAmelCase__ : Union[str, Any] = num_queries lowerCAmelCase__ : str = num_channels lowerCAmelCase__ : List[str] = min_size lowerCAmelCase__ : int = max_size lowerCAmelCase__ : Optional[Any] = num_labels lowerCAmelCase__ : List[Any] = mask_feature_size def UpperCAmelCase_ ( self ) -> Tuple: lowerCAmelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( __UpperCAmelCase ) lowerCAmelCase__ : str = torch.ones([self.batch_size, self.min_size, self.max_size] ,device=__UpperCAmelCase ) lowerCAmelCase__ : Any = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] ,device=__UpperCAmelCase ) > 0.5 ).float() lowerCAmelCase__ : Optional[int] = (torch.rand((self.batch_size, self.num_labels) ,device=__UpperCAmelCase ) > 0.5).long() lowerCAmelCase__ : Any = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def UpperCAmelCase_ ( self ) -> Dict: return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] ,) ,decoder_config=DetrConfig( decoder_ffn_dim=128 ,num_queries=self.num_queries ,decoder_attention_heads=2 ,d_model=self.mask_feature_size ,) ,mask_feature_size=self.mask_feature_size ,fpn_feature_size=self.mask_feature_size ,num_channels=self.num_channels ,num_labels=self.num_labels ,) def UpperCAmelCase_ ( self ) -> Optional[int]: lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.prepare_config_and_inputs() lowerCAmelCase__ : List[str] = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask} return config, inputs_dict def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Any: lowerCAmelCase__ : Optional[int] = output.encoder_hidden_states lowerCAmelCase__ : Optional[int] = output.pixel_decoder_hidden_states lowerCAmelCase__ : Dict = output.transformer_decoder_hidden_states self.parent.assertTrue(len(__UpperCAmelCase ) ,len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__UpperCAmelCase ) ,len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__UpperCAmelCase ) ,config.decoder_config.decoder_layers ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=False ) -> Optional[Any]: with torch.no_grad(): lowerCAmelCase__ : int = MaskFormerModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : str = model(pixel_values=__UpperCAmelCase ,pixel_mask=__UpperCAmelCase ) lowerCAmelCase__ : int = model(__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape ,(self.batch_size, self.num_queries, self.mask_feature_size) ,) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(__UpperCAmelCase ,__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[int]: lowerCAmelCase__ : Dict = MaskFormerForInstanceSegmentation(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() def comm_check_on_output(__UpperCAmelCase ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape ,(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) ,) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape ,(self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): lowerCAmelCase__ : List[Any] = model(pixel_values=__UpperCAmelCase ,pixel_mask=__UpperCAmelCase ) lowerCAmelCase__ : Dict = model(__UpperCAmelCase ) comm_check_on_output(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = model( pixel_values=__UpperCAmelCase ,pixel_mask=__UpperCAmelCase ,mask_labels=__UpperCAmelCase ,class_labels=__UpperCAmelCase ) comm_check_on_output(__UpperCAmelCase ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape ,torch.Size([1] ) ) @require_torch class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): '''simple docstring''' __lowercase : Optional[Any] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () __lowercase : int = ( {'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) __lowercase : Union[str, Any] = False __lowercase : Dict = False __lowercase : Tuple = False __lowercase : List[Any] = False def UpperCAmelCase_ ( self ) -> Optional[int]: lowerCAmelCase__ : str = MaskFormerModelTester(self ) lowerCAmelCase__ : List[Any] = ConfigTester(self ,config_class=__UpperCAmelCase ,has_text_modality=__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> List[str]: self.config_tester.run_common_tests() def UpperCAmelCase_ ( self ) -> Union[str, Any]: lowerCAmelCase__ , lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__UpperCAmelCase ,**__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> Optional[int]: lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__UpperCAmelCase ) @unittest.skip(reason="""MaskFormer does not use inputs_embeds""" ) def UpperCAmelCase_ ( self ) -> List[Any]: pass @unittest.skip(reason="""MaskFormer does not have a get_input_embeddings method""" ) def UpperCAmelCase_ ( self ) -> str: pass @unittest.skip(reason="""MaskFormer is not a generative model""" ) def UpperCAmelCase_ ( self ) -> Any: pass @unittest.skip(reason="""MaskFormer does not use token embeddings""" ) def UpperCAmelCase_ ( self ) -> List[str]: pass @require_torch_multi_gpu @unittest.skip( reason="""MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def UpperCAmelCase_ ( self ) -> List[str]: pass def UpperCAmelCase_ ( self ) -> Tuple: lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ : str = model_class(__UpperCAmelCase ) lowerCAmelCase__ : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase__ : Dict = [*signature.parameters.keys()] lowerCAmelCase__ : Tuple = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,__UpperCAmelCase ) @slow def UpperCAmelCase_ ( self ) -> Union[str, Any]: for model_name in ["facebook/maskformer-swin-small-coco"]: lowerCAmelCase__ : List[str] = MaskFormerModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> str: lowerCAmelCase__ : List[Any] = (self.model_tester.min_size,) * 2 lowerCAmelCase__ : Any = { """pixel_values""": torch.randn((2, 3, *size) ,device=__UpperCAmelCase ), """mask_labels""": torch.randn((2, 10, *size) ,device=__UpperCAmelCase ), """class_labels""": torch.zeros(2 ,10 ,device=__UpperCAmelCase ).long(), } lowerCAmelCase__ : Tuple = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = model(**__UpperCAmelCase ) self.assertTrue(outputs.loss is not None ) def UpperCAmelCase_ ( self ) -> str: lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__UpperCAmelCase ,**__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> Tuple: lowerCAmelCase__ , lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ : int = model_class(__UpperCAmelCase ).to(__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = model(**__UpperCAmelCase ,output_attentions=__UpperCAmelCase ) self.assertTrue(outputs.attentions is not None ) def UpperCAmelCase_ ( self ) -> int: if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss lowerCAmelCase__ : Dict = self.all_model_classes[1] lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() lowerCAmelCase__ : List[Any] = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.train() lowerCAmelCase__ : List[str] = model(__UpperCAmelCase ,mask_labels=__UpperCAmelCase ,class_labels=__UpperCAmelCase ).loss loss.backward() def UpperCAmelCase_ ( self ) -> List[str]: # only MaskFormerForInstanceSegmentation has the loss lowerCAmelCase__ : Tuple = self.all_model_classes[1] lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() lowerCAmelCase__ : Union[str, Any] = True lowerCAmelCase__ : Tuple = True lowerCAmelCase__ : Optional[Any] = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.train() lowerCAmelCase__ : Dict = model(__UpperCAmelCase ,mask_labels=__UpperCAmelCase ,class_labels=__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() lowerCAmelCase__ : str = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't lowerCAmelCase__ : Union[str, Any] = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() lowerCAmelCase__ : List[Any] = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=__UpperCAmelCase ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) _lowerCAmelCase = 1e-4 def _SCREAMING_SNAKE_CASE ( ): """simple docstring""" lowerCAmelCase__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_vision @slow class lowerCAmelCase_( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCAmelCase_ ( self ) -> List[Any]: return ( MaskFormerImageProcessor.from_pretrained("""facebook/maskformer-swin-small-coco""" ) if is_vision_available() else None ) def UpperCAmelCase_ ( self ) -> Any: lowerCAmelCase__ : Any = MaskFormerModel.from_pretrained("""facebook/maskformer-swin-small-coco""" ).to(__UpperCAmelCase ) lowerCAmelCase__ : str = self.default_image_processor lowerCAmelCase__ : str = prepare_img() lowerCAmelCase__ : Optional[int] = image_processor(__UpperCAmelCase ,return_tensors="""pt""" ).to(__UpperCAmelCase ) lowerCAmelCase__ : Dict = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__UpperCAmelCase ,(1, 3, 800, 1088) ) with torch.no_grad(): lowerCAmelCase__ : Union[str, Any] = model(**__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = torch.tensor( [[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(__UpperCAmelCase ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) ) lowerCAmelCase__ : Dict = torch.tensor( [[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(__UpperCAmelCase ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) ) lowerCAmelCase__ : Optional[int] = torch.tensor( [[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(__UpperCAmelCase ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) ) def UpperCAmelCase_ ( self ) -> Optional[Any]: lowerCAmelCase__ : List[Any] = ( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" ) .to(__UpperCAmelCase ) .eval() ) lowerCAmelCase__ : Optional[Any] = self.default_image_processor lowerCAmelCase__ : List[str] = prepare_img() lowerCAmelCase__ : str = image_processor(__UpperCAmelCase ,return_tensors="""pt""" ).to(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__UpperCAmelCase ,(1, 3, 800, 1088) ) with torch.no_grad(): lowerCAmelCase__ : List[Any] = model(**__UpperCAmelCase ) # masks_queries_logits lowerCAmelCase__ : Optional[int] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape ,(1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ,) lowerCAmelCase__ : Optional[int] = [ [-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3], [-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5], [-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2], ] lowerCAmelCase__ : Optional[int] = torch.tensor(__UpperCAmelCase ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) ) # class_queries_logits lowerCAmelCase__ : Tuple = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape ,(1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) lowerCAmelCase__ : Union[str, Any] = torch.tensor( [ [1.65_12E00, -5.25_72E00, -3.35_19E00], [3.61_69E-02, -5.90_25E00, -2.93_13E00], [1.07_66E-04, -7.76_30E00, -5.12_63E00], ] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) ) def UpperCAmelCase_ ( self ) -> str: lowerCAmelCase__ : List[Any] = ( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-resnet101-coco-stuff""" ) .to(__UpperCAmelCase ) .eval() ) lowerCAmelCase__ : Optional[Any] = self.default_image_processor lowerCAmelCase__ : int = prepare_img() lowerCAmelCase__ : Optional[Any] = image_processor(__UpperCAmelCase ,return_tensors="""pt""" ).to(__UpperCAmelCase ) lowerCAmelCase__ : str = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__UpperCAmelCase ,(1, 3, 800, 1088) ) with torch.no_grad(): lowerCAmelCase__ : str = model(**__UpperCAmelCase ) # masks_queries_logits lowerCAmelCase__ : Optional[Any] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape ,(1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ,) lowerCAmelCase__ : int = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]] lowerCAmelCase__ : List[str] = torch.tensor(__UpperCAmelCase ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) ) # class_queries_logits lowerCAmelCase__ : Optional[Any] = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape ,(1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) lowerCAmelCase__ : Tuple = torch.tensor( [[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) ) def UpperCAmelCase_ ( self ) -> Optional[Any]: lowerCAmelCase__ : str = ( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" ) .to(__UpperCAmelCase ) .eval() ) lowerCAmelCase__ : Dict = self.default_image_processor lowerCAmelCase__ : Union[str, Any] = image_processor( [np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] ,segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] ,return_tensors="""pt""" ,) lowerCAmelCase__ : Tuple = inputs["""pixel_values"""].to(__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = [el.to(__UpperCAmelCase ) for el in inputs["""mask_labels"""]] lowerCAmelCase__ : Union[str, Any] = [el.to(__UpperCAmelCase ) for el in inputs["""class_labels"""]] with torch.no_grad(): lowerCAmelCase__ : Any = model(**__UpperCAmelCase ) self.assertTrue(outputs.loss is not None )
37
1
'''simple docstring''' from __future__ import annotations import math def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" if depth < 0: raise ValueError("""Depth cannot be less than 0""" ) if not scores: raise ValueError("""Scores cannot be empty""" ) if depth == height: return scores[node_index] return ( max( minimax(depth + 1 , node_index * 2 , UpperCamelCase , UpperCamelCase , UpperCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase , UpperCamelCase , UpperCamelCase ) , ) if is_max else min( minimax(depth + 1 , node_index * 2 , UpperCamelCase , UpperCamelCase , UpperCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase , UpperCamelCase , UpperCamelCase ) , ) ) def _SCREAMING_SNAKE_CASE ( ): """simple docstring""" lowerCAmelCase__ : Tuple = [90, 23, 6, 33, 21, 65, 123, 34423] lowerCAmelCase__ : Optional[int] = math.log(len(UpperCamelCase ) , 2 ) print(f"""Optimal value : {minimax(0 , 0 , UpperCamelCase , UpperCamelCase , UpperCamelCase )}""" ) if __name__ == "__main__": import doctest doctest.testmod() main()
37
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { '''microsoft/focalnet-tiny''': '''https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json''', } class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Optional[Any] = '''focalnet''' def __init__( self ,__UpperCAmelCase=224 ,__UpperCAmelCase=4 ,__UpperCAmelCase=3 ,__UpperCAmelCase=96 ,__UpperCAmelCase=False ,__UpperCAmelCase=[192, 384, 768, 768] ,__UpperCAmelCase=[2, 2, 6, 2] ,__UpperCAmelCase=[2, 2, 2, 2] ,__UpperCAmelCase=[3, 3, 3, 3] ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=4.0 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=False ,__UpperCAmelCase=1E-4 ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=1E-5 ,__UpperCAmelCase=32 ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,**__UpperCAmelCase ,) -> Optional[Any]: super().__init__(**__UpperCAmelCase ) lowerCAmelCase__ : Dict = image_size lowerCAmelCase__ : int = patch_size lowerCAmelCase__ : str = num_channels lowerCAmelCase__ : Dict = embed_dim lowerCAmelCase__ : List[str] = use_conv_embed lowerCAmelCase__ : List[Any] = hidden_sizes lowerCAmelCase__ : Dict = depths lowerCAmelCase__ : List[str] = focal_levels lowerCAmelCase__ : List[str] = focal_windows lowerCAmelCase__ : Dict = hidden_act lowerCAmelCase__ : Dict = mlp_ratio lowerCAmelCase__ : Tuple = hidden_dropout_prob lowerCAmelCase__ : Tuple = drop_path_rate lowerCAmelCase__ : Dict = use_layerscale lowerCAmelCase__ : Optional[Any] = layerscale_value lowerCAmelCase__ : str = use_post_layernorm lowerCAmelCase__ : Union[str, Any] = use_post_layernorm_in_modulation lowerCAmelCase__ : int = normalize_modulator lowerCAmelCase__ : Optional[Any] = initializer_range lowerCAmelCase__ : List[str] = layer_norm_eps lowerCAmelCase__ : List[Any] = encoder_stride lowerCAmelCase__ : Dict = ["""stem"""] + [F"""stage{idx}""" for idx in range(1 ,len(self.depths ) + 1 )] lowerCAmelCase__ , lowerCAmelCase__ : Any = get_aligned_output_features_output_indices( out_features=__UpperCAmelCase ,out_indices=__UpperCAmelCase ,stage_names=self.stage_names )
37
1
'''simple docstring''' import collections import os import re from pathlib import Path _lowerCAmelCase = '''src/transformers''' # Matches is_xxx_available() _lowerCAmelCase = re.compile(R'''is\_([a-z_]*)_available()''') # Catches a one-line _import_struct = {xxx} _lowerCAmelCase = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''') # Catches a line with a key-values pattern: "bla": ["foo", "bar"] _lowerCAmelCase = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''') # Catches a line if not is_foo_available _lowerCAmelCase = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''') # Catches a line _import_struct["bla"].append("foo") _lowerCAmelCase = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''') # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] _lowerCAmelCase = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''') # Catches a line with an object between quotes and a comma: "MyModel", _lowerCAmelCase = re.compile(R'''^\s+"([^"]+)",''') # Catches a line with objects between brackets only: ["foo", "bar"], _lowerCAmelCase = re.compile(R'''^\s+\[([^\]]+)\]''') # Catches a line with from foo import bar, bla, boo _lowerCAmelCase = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''') # Catches a line with try: _lowerCAmelCase = re.compile(R'''^\s*try:''') # Catches a line with else: _lowerCAmelCase = re.compile(R'''^\s*else:''') def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" if _re_test_backend.search(UpperCamelCase ) is None: return None lowerCAmelCase__ : Any = [b[0] for b in _re_backend.findall(UpperCamelCase )] backends.sort() return "_and_".join(UpperCamelCase ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" with open(UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCAmelCase__ : Union[str, Any] = f.readlines() lowerCAmelCase__ : str = 0 while line_index < len(UpperCamelCase ) and not lines[line_index].startswith("""_import_structure = {""" ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(UpperCamelCase ): return None # First grab the objects without a specific backend in _import_structure lowerCAmelCase__ : int = [] while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None: lowerCAmelCase__ : Optional[int] = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(UpperCamelCase ): lowerCAmelCase__ : Any = _re_one_line_import_struct.search(UpperCamelCase ).groups()[0] lowerCAmelCase__ : List[Any] = re.findall(R"""\[([^\]]+)\]""" , UpperCamelCase ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(""", """ )] ) line_index += 1 continue lowerCAmelCase__ : Union[str, Any] = _re_import_struct_key_value.search(UpperCamelCase ) if single_line_import_search is not None: lowerCAmelCase__ : int = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(UpperCamelCase ) > 0] objects.extend(UpperCamelCase ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) line_index += 1 lowerCAmelCase__ : Tuple = {"""none""": objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith("""if TYPE_CHECKING""" ): # If the line is an if not is_backend_available, we grab all objects associated. lowerCAmelCase__ : List[str] = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: lowerCAmelCase__ : int = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 lowerCAmelCase__ : Tuple = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ): lowerCAmelCase__ : Dict = lines[line_index] if _re_import_struct_add_one.search(UpperCamelCase ) is not None: objects.append(_re_import_struct_add_one.search(UpperCamelCase ).groups()[0] ) elif _re_import_struct_add_many.search(UpperCamelCase ) is not None: lowerCAmelCase__ : int = _re_import_struct_add_many.search(UpperCamelCase ).groups()[0].split(""", """ ) lowerCAmelCase__ : Union[str, Any] = [obj[1:-1] for obj in imports if len(UpperCamelCase ) > 0] objects.extend(UpperCamelCase ) elif _re_between_brackets.search(UpperCamelCase ) is not None: lowerCAmelCase__ : List[Any] = _re_between_brackets.search(UpperCamelCase ).groups()[0].split(""", """ ) lowerCAmelCase__ : Dict = [obj[1:-1] for obj in imports if len(UpperCamelCase ) > 0] objects.extend(UpperCamelCase ) elif _re_quote_object.search(UpperCamelCase ) is not None: objects.append(_re_quote_object.search(UpperCamelCase ).groups()[0] ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) elif line.startswith(""" """ * 12 + """\"""" ): objects.append(line[13:-3] ) line_index += 1 lowerCAmelCase__ : List[str] = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend lowerCAmelCase__ : Tuple = [] while ( line_index < len(UpperCamelCase ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith("""else""" ) ): lowerCAmelCase__ : int = lines[line_index] lowerCAmelCase__ : Dict = _re_import.search(UpperCamelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 8 ): objects.append(line[8:-2] ) line_index += 1 lowerCAmelCase__ : Optional[Any] = {"""none""": objects} # Let's continue with backend-specific objects while line_index < len(UpperCamelCase ): # If the line is an if is_backend_available, we grab all objects associated. lowerCAmelCase__ : List[str] = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: lowerCAmelCase__ : int = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 lowerCAmelCase__ : Optional[int] = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ): lowerCAmelCase__ : Optional[Any] = lines[line_index] lowerCAmelCase__ : int = _re_import.search(UpperCamelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 12 ): objects.append(line[12:-2] ) line_index += 1 lowerCAmelCase__ : Tuple = objects else: line_index += 1 return import_dict_objects, type_hint_objects def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" def find_duplicates(UpperCamelCase ): return [k for k, v in collections.Counter(UpperCamelCase ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] lowerCAmelCase__ : Any = [] for key in import_dict_objects.keys(): lowerCAmelCase__ : Dict = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(f"""Duplicate _import_structure definitions for: {duplicate_imports}""" ) lowerCAmelCase__ : List[Any] = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(f"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): lowerCAmelCase__ : int = """base imports""" if key == """none""" else f"""{key} backend""" errors.append(f"""Differences for {name}:""" ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(f""" {a} in TYPE_HINT but not in _import_structure.""" ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(f""" {a} in _import_structure but not in TYPE_HINT.""" ) return errors def _SCREAMING_SNAKE_CASE ( ): """simple docstring""" lowerCAmelCase__ : List[str] = [] for root, _, files in os.walk(UpperCamelCase ): if "__init__.py" in files: lowerCAmelCase__ : List[str] = os.path.join(UpperCamelCase , """__init__.py""" ) lowerCAmelCase__ : List[Any] = parse_init(UpperCamelCase ) if objects is not None: lowerCAmelCase__ : Dict = analyze_results(*UpperCamelCase ) if len(UpperCamelCase ) > 0: lowerCAmelCase__ : List[str] = f"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}""" failures.append("""\n""".join(UpperCamelCase ) ) if len(UpperCamelCase ) > 0: raise ValueError("""\n\n""".join(UpperCamelCase ) ) def _SCREAMING_SNAKE_CASE ( ): """simple docstring""" lowerCAmelCase__ : int = [] for path, directories, files in os.walk(UpperCamelCase ): for folder in directories: # Ignore private modules if folder.startswith("""_""" ): directories.remove(UpperCamelCase ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(UpperCamelCase ) / folder).glob("""*.py""" ) ) ) == 0: continue lowerCAmelCase__ : List[Any] = str((Path(UpperCamelCase ) / folder).relative_to(UpperCamelCase ) ) lowerCAmelCase__ : str = short_path.replace(os.path.sep , """.""" ) submodules.append(UpperCamelCase ) for fname in files: if fname == "__init__.py": continue lowerCAmelCase__ : str = str((Path(UpperCamelCase ) / fname).relative_to(UpperCamelCase ) ) lowerCAmelCase__ : int = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" ) if len(submodule.split(""".""" ) ) == 1: submodules.append(UpperCamelCase ) return submodules _lowerCAmelCase = [ '''convert_pytorch_checkpoint_to_tf2''', '''modeling_flax_pytorch_utils''', '''models.esm.openfold_utils''', ] def _SCREAMING_SNAKE_CASE ( ): """simple docstring""" from transformers.utils import direct_transformers_import lowerCAmelCase__ : Dict = direct_transformers_import(UpperCamelCase ) lowerCAmelCase__ : Any = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(UpperCamelCase , """__init__.py""" ) , """r""" ) as f: lowerCAmelCase__ : int = f.read() import_structure_keys.update(set(re.findall(R"""import_structure\[\"([^\"]*)\"\]""" , UpperCamelCase ) ) ) lowerCAmelCase__ : Tuple = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(UpperCamelCase ) > 0: lowerCAmelCase__ : Optional[Any] = """\n""".join(f"""- {module}""" for module in module_not_registered ) raise ValueError( """The following submodules are not properly registed in the main init of Transformers:\n""" f"""{list_of_modules}\n""" """Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" ) if __name__ == "__main__": check_all_inits() check_submodules()
37
'''simple docstring''' import argparse import os.path as osp import re import torch from safetensors.torch import load_file, save_file # =================# # UNet Conversion # # =================# _lowerCAmelCase = [ # (stable-diffusion, HF Diffusers) ('''time_embed.0.weight''', '''time_embedding.linear_1.weight'''), ('''time_embed.0.bias''', '''time_embedding.linear_1.bias'''), ('''time_embed.2.weight''', '''time_embedding.linear_2.weight'''), ('''time_embed.2.bias''', '''time_embedding.linear_2.bias'''), ('''input_blocks.0.0.weight''', '''conv_in.weight'''), ('''input_blocks.0.0.bias''', '''conv_in.bias'''), ('''out.0.weight''', '''conv_norm_out.weight'''), ('''out.0.bias''', '''conv_norm_out.bias'''), ('''out.2.weight''', '''conv_out.weight'''), ('''out.2.bias''', '''conv_out.bias'''), ] _lowerCAmelCase = [ # (stable-diffusion, HF Diffusers) ('''in_layers.0''', '''norm1'''), ('''in_layers.2''', '''conv1'''), ('''out_layers.0''', '''norm2'''), ('''out_layers.3''', '''conv2'''), ('''emb_layers.1''', '''time_emb_proj'''), ('''skip_connection''', '''conv_shortcut'''), ] _lowerCAmelCase = [] # hardcoded number of downblocks and resnets/attentions... # would need smarter logic for other networks. for i in range(4): # loop over downblocks/upblocks for j in range(2): # loop over resnets/attentions for downblocks _lowerCAmelCase = F"""down_blocks.{i}.resnets.{j}.""" _lowerCAmelCase = F"""input_blocks.{3*i + j + 1}.0.""" unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix)) if i < 3: # no attention layers in down_blocks.3 _lowerCAmelCase = F"""down_blocks.{i}.attentions.{j}.""" _lowerCAmelCase = F"""input_blocks.{3*i + j + 1}.1.""" unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix)) for j in range(3): # loop over resnets/attentions for upblocks _lowerCAmelCase = F"""up_blocks.{i}.resnets.{j}.""" _lowerCAmelCase = F"""output_blocks.{3*i + j}.0.""" unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix)) if i > 0: # no attention layers in up_blocks.0 _lowerCAmelCase = F"""up_blocks.{i}.attentions.{j}.""" _lowerCAmelCase = F"""output_blocks.{3*i + j}.1.""" unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix)) if i < 3: # no downsample in down_blocks.3 _lowerCAmelCase = F"""down_blocks.{i}.downsamplers.0.conv.""" _lowerCAmelCase = F"""input_blocks.{3*(i+1)}.0.op.""" unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix)) # no upsample in up_blocks.3 _lowerCAmelCase = F"""up_blocks.{i}.upsamplers.0.""" _lowerCAmelCase = F"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}.""" unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix)) _lowerCAmelCase = '''mid_block.attentions.0.''' _lowerCAmelCase = '''middle_block.1.''' unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix)) for j in range(2): _lowerCAmelCase = F"""mid_block.resnets.{j}.""" _lowerCAmelCase = F"""middle_block.{2*j}.""" unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix)) def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Any = {k: k for k in unet_state_dict.keys()} for sd_name, hf_name in unet_conversion_map: lowerCAmelCase__ : Optional[int] = sd_name for k, v in mapping.items(): if "resnets" in k: for sd_part, hf_part in unet_conversion_map_resnet: lowerCAmelCase__ : Any = v.replace(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : List[Any] = v for k, v in mapping.items(): for sd_part, hf_part in unet_conversion_map_layer: lowerCAmelCase__ : List[Any] = v.replace(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = v lowerCAmelCase__ : Tuple = {v: unet_state_dict[k] for k, v in mapping.items()} return new_state_dict # ================# # VAE Conversion # # ================# _lowerCAmelCase = [ # (stable-diffusion, HF Diffusers) ('''nin_shortcut''', '''conv_shortcut'''), ('''norm_out''', '''conv_norm_out'''), ('''mid.attn_1.''', '''mid_block.attentions.0.'''), ] for i in range(4): # down_blocks have two resnets for j in range(2): _lowerCAmelCase = F"""encoder.down_blocks.{i}.resnets.{j}.""" _lowerCAmelCase = F"""encoder.down.{i}.block.{j}.""" vae_conversion_map.append((sd_down_prefix, hf_down_prefix)) if i < 3: _lowerCAmelCase = F"""down_blocks.{i}.downsamplers.0.""" _lowerCAmelCase = F"""down.{i}.downsample.""" vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix)) _lowerCAmelCase = F"""up_blocks.{i}.upsamplers.0.""" _lowerCAmelCase = F"""up.{3-i}.upsample.""" vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix)) # up_blocks have three resnets # also, up blocks in hf are numbered in reverse from sd for j in range(3): _lowerCAmelCase = F"""decoder.up_blocks.{i}.resnets.{j}.""" _lowerCAmelCase = F"""decoder.up.{3-i}.block.{j}.""" vae_conversion_map.append((sd_up_prefix, hf_up_prefix)) # this part accounts for mid blocks in both the encoder and the decoder for i in range(2): _lowerCAmelCase = F"""mid_block.resnets.{i}.""" _lowerCAmelCase = F"""mid.block_{i+1}.""" vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix)) _lowerCAmelCase = [ # (stable-diffusion, HF Diffusers) ('''norm.''', '''group_norm.'''), ('''q.''', '''query.'''), ('''k.''', '''key.'''), ('''v.''', '''value.'''), ('''proj_out.''', '''proj_attn.'''), ] def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" return w.reshape(*w.shape , 1 , 1 ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Optional[int] = {k: k for k in vae_state_dict.keys()} for k, v in mapping.items(): for sd_part, hf_part in vae_conversion_map: lowerCAmelCase__ : str = v.replace(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = v for k, v in mapping.items(): if "attentions" in k: for sd_part, hf_part in vae_conversion_map_attn: lowerCAmelCase__ : Dict = v.replace(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : List[Any] = v lowerCAmelCase__ : Union[str, Any] = {v: vae_state_dict[k] for k, v in mapping.items()} lowerCAmelCase__ : Tuple = ["""q""", """k""", """v""", """proj_out"""] for k, v in new_state_dict.items(): for weight_name in weights_to_convert: if f"""mid.attn_1.{weight_name}.weight""" in k: print(f"""Reshaping {k} for SD format""" ) lowerCAmelCase__ : Optional[int] = reshape_weight_for_sd(UpperCamelCase ) return new_state_dict # =========================# # Text Encoder Conversion # # =========================# _lowerCAmelCase = [ # (stable-diffusion, HF Diffusers) ('''resblocks.''', '''text_model.encoder.layers.'''), ('''ln_1''', '''layer_norm1'''), ('''ln_2''', '''layer_norm2'''), ('''.c_fc.''', '''.fc1.'''), ('''.c_proj.''', '''.fc2.'''), ('''.attn''', '''.self_attn'''), ('''ln_final.''', '''transformer.text_model.final_layer_norm.'''), ('''token_embedding.weight''', '''transformer.text_model.embeddings.token_embedding.weight'''), ('''positional_embedding''', '''transformer.text_model.embeddings.position_embedding.weight'''), ] _lowerCAmelCase = {re.escape(x[1]): x[0] for x in textenc_conversion_lst} _lowerCAmelCase = re.compile('''|'''.join(protected.keys())) # Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp _lowerCAmelCase = {'''q''': 0, '''k''': 1, '''v''': 2} def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Optional[Any] = {} lowerCAmelCase__ : int = {} lowerCAmelCase__ : List[Any] = {} for k, v in text_enc_dict.items(): if ( k.endswith(""".self_attn.q_proj.weight""" ) or k.endswith(""".self_attn.k_proj.weight""" ) or k.endswith(""".self_attn.v_proj.weight""" ) ): lowerCAmelCase__ : Optional[int] = k[: -len(""".q_proj.weight""" )] lowerCAmelCase__ : Tuple = k[-len("""q_proj.weight""" )] if k_pre not in capture_qkv_weight: lowerCAmelCase__ : List[Any] = [None, None, None] lowerCAmelCase__ : Dict = v continue if ( k.endswith(""".self_attn.q_proj.bias""" ) or k.endswith(""".self_attn.k_proj.bias""" ) or k.endswith(""".self_attn.v_proj.bias""" ) ): lowerCAmelCase__ : str = k[: -len(""".q_proj.bias""" )] lowerCAmelCase__ : List[str] = k[-len("""q_proj.bias""" )] if k_pre not in capture_qkv_bias: lowerCAmelCase__ : Union[str, Any] = [None, None, None] lowerCAmelCase__ : Any = v continue lowerCAmelCase__ : Dict = textenc_pattern.sub(lambda UpperCamelCase : protected[re.escape(m.group(0 ) )] , UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = v for k_pre, tensors in capture_qkv_weight.items(): if None in tensors: raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" ) lowerCAmelCase__ : Any = textenc_pattern.sub(lambda UpperCamelCase : protected[re.escape(m.group(0 ) )] , UpperCamelCase ) lowerCAmelCase__ : Tuple = torch.cat(UpperCamelCase ) for k_pre, tensors in capture_qkv_bias.items(): if None in tensors: raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" ) lowerCAmelCase__ : str = textenc_pattern.sub(lambda UpperCamelCase : protected[re.escape(m.group(0 ) )] , UpperCamelCase ) lowerCAmelCase__ : List[Any] = torch.cat(UpperCamelCase ) return new_state_dict def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" return text_enc_dict if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''') parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''') parser.add_argument( '''--use_safetensors''', action='''store_true''', help='''Save weights use safetensors, default is ckpt.''' ) _lowerCAmelCase = parser.parse_args() assert args.model_path is not None, "Must provide a model path!" assert args.checkpoint_path is not None, "Must provide a checkpoint path!" # Path for safetensors _lowerCAmelCase = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.safetensors''') _lowerCAmelCase = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.safetensors''') _lowerCAmelCase = osp.join(args.model_path, '''text_encoder''', '''model.safetensors''') # Load models from safetensors if it exists, if it doesn't pytorch if osp.exists(unet_path): _lowerCAmelCase = load_file(unet_path, device='''cpu''') else: _lowerCAmelCase = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.bin''') _lowerCAmelCase = torch.load(unet_path, map_location='''cpu''') if osp.exists(vae_path): _lowerCAmelCase = load_file(vae_path, device='''cpu''') else: _lowerCAmelCase = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.bin''') _lowerCAmelCase = torch.load(vae_path, map_location='''cpu''') if osp.exists(text_enc_path): _lowerCAmelCase = load_file(text_enc_path, device='''cpu''') else: _lowerCAmelCase = osp.join(args.model_path, '''text_encoder''', '''pytorch_model.bin''') _lowerCAmelCase = torch.load(text_enc_path, map_location='''cpu''') # Convert the UNet model _lowerCAmelCase = convert_unet_state_dict(unet_state_dict) _lowerCAmelCase = {'''model.diffusion_model.''' + k: v for k, v in unet_state_dict.items()} # Convert the VAE model _lowerCAmelCase = convert_vae_state_dict(vae_state_dict) _lowerCAmelCase = {'''first_stage_model.''' + k: v for k, v in vae_state_dict.items()} # Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper _lowerCAmelCase = '''text_model.encoder.layers.22.layer_norm2.bias''' in text_enc_dict if is_vaa_model: # Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm _lowerCAmelCase = {'''transformer.''' + k: v for k, v in text_enc_dict.items()} _lowerCAmelCase = convert_text_enc_state_dict_vaa(text_enc_dict) _lowerCAmelCase = {'''cond_stage_model.model.''' + k: v for k, v in text_enc_dict.items()} else: _lowerCAmelCase = convert_text_enc_state_dict(text_enc_dict) _lowerCAmelCase = {'''cond_stage_model.transformer.''' + k: v for k, v in text_enc_dict.items()} # Put together new checkpoint _lowerCAmelCase = {**unet_state_dict, **vae_state_dict, **text_enc_dict} if args.half: _lowerCAmelCase = {k: v.half() for k, v in state_dict.items()} if args.use_safetensors: save_file(state_dict, args.checkpoint_path) else: _lowerCAmelCase = {'''state_dict''': state_dict} torch.save(state_dict, args.checkpoint_path)
37
1
'''simple docstring''' import math import os import re import sys import unittest from pathlib import Path from typing import Tuple from unittest.mock import patch from parameterized import parameterized from transformers.testing_utils import ( CaptureStderr, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, get_torch_dist_unique_port, require_apex, require_bitsandbytes, require_fairscale, require_torch, require_torch_gpu, require_torch_multi_gpu, require_torch_non_multi_gpu, slow, ) from transformers.trainer_callback import TrainerState from transformers.trainer_utils import set_seed _lowerCAmelCase = os.path.abspath(os.path.dirname(__file__)) with ExtendSysPath(F"""{bindir}/../../examples/pytorch/translation"""): from run_translation import main # noqa set_seed(42) _lowerCAmelCase = '''sshleifer/student_marian_en_ro_6_1''' _lowerCAmelCase = '''sshleifer/tiny-mbart''' @require_torch class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' def UpperCAmelCase_ ( self ,__UpperCAmelCase=False ,__UpperCAmelCase=None ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,) -> Tuple: lowerCAmelCase__ : Tuple = self.run_trainer( eval_steps=1 ,max_len=12 ,model_name=__UpperCAmelCase ,num_train_epochs=1 ,distributed=__UpperCAmelCase ,extra_args_str=__UpperCAmelCase ,predict_with_generate=__UpperCAmelCase ,do_train=__UpperCAmelCase ,do_eval=__UpperCAmelCase ,do_predict=__UpperCAmelCase ,) lowerCAmelCase__ : List[Any] = TrainerState.load_from_json(os.path.join(__UpperCAmelCase ,"""trainer_state.json""" ) ).log_history if not do_eval: return lowerCAmelCase__ : List[Any] = [log for log in logs if """eval_loss""" in log.keys()] lowerCAmelCase__ : Optional[Any] = eval_metrics[0] if predict_with_generate: assert "eval_bleu" in first_step_stats lowerCAmelCase__ : Optional[int] = eval_metrics[-1] assert isinstance(last_step_stats["""eval_bleu"""] ,__UpperCAmelCase ) assert not math.isnan(float(last_step_stats["""eval_loss"""] ) ), "eval_loss must not be `nan`" @require_torch_non_multi_gpu def UpperCAmelCase_ ( self ) -> Optional[int]: self.run_seqaseq_quick() @require_torch_multi_gpu def UpperCAmelCase_ ( self ) -> List[Any]: self.run_seqaseq_quick(distributed=__UpperCAmelCase ) @require_torch_multi_gpu def UpperCAmelCase_ ( self ) -> Tuple: self.run_seqaseq_quick(distributed=__UpperCAmelCase ) @unittest.skip("""Requires an update of the env running those tests""" ) @require_torch_multi_gpu @require_fairscale def UpperCAmelCase_ ( self ) -> Any: self.run_seqaseq_quick(distributed=__UpperCAmelCase ,extra_args_str="""--sharded_ddp simple""" ) @unittest.skip("""Requires an update of the env running those tests""" ) @require_torch_multi_gpu @require_fairscale def UpperCAmelCase_ ( self ) -> Dict: self.run_seqaseq_quick(distributed=__UpperCAmelCase ,extra_args_str="""--sharded_ddp simple --fp16""" ) @unittest.skip("""Requires an update of the env running those tests""" ) @require_torch_multi_gpu @require_fairscale def UpperCAmelCase_ ( self ) -> Dict: self.run_seqaseq_quick(distributed=__UpperCAmelCase ,extra_args_str="""--sharded_ddp zero_dp_2""" ,predict_with_generate=__UpperCAmelCase ) @unittest.skip("""Requires an update of the env running those tests""" ) @require_torch_multi_gpu @require_fairscale def UpperCAmelCase_ ( self ) -> List[Any]: self.run_seqaseq_quick( distributed=__UpperCAmelCase ,extra_args_str="""--sharded_ddp zero_dp_2 --fp16""" ,predict_with_generate=__UpperCAmelCase ) @require_apex @require_torch_gpu def UpperCAmelCase_ ( self ) -> Tuple: # XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same # program and it breaks other tests that run from the same pytest worker, therefore until this is # sorted out it must be run only in an external program, that is distributed=True in this # test and only under one or more gpus - if we want cpu will need to make a special test # # specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via # 2nd main() call it botches the future eval. # self.run_seqaseq_quick(distributed=__UpperCAmelCase ,extra_args_str="""--fp16 --fp16_backend=apex""" ) # test 2nd time - was getting eval_loss': nan' # to reproduce the problem set distributed=False self.run_seqaseq_quick(distributed=__UpperCAmelCase ,extra_args_str="""--fp16 --fp16_backend=apex""" ) @parameterized.expand(["""base""", """low""", """high""", """mixed"""] ) @require_torch_multi_gpu def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Any: # as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout lowerCAmelCase__ : Optional[Any] = { # test with the default log_level - should be info and thus log info once """base""": {"""extra_args_str""": """""", """n_matches""": 1}, # test with low log_level and log_level_replica - should be noisy on all processes # now the info string should appear twice on 2 processes """low""": {"""extra_args_str""": """--log_level debug --log_level_replica debug""", """n_matches""": 2}, # test with high log_level and low log_level_replica # now the info string should appear once only on the replica """high""": {"""extra_args_str""": """--log_level error --log_level_replica debug""", """n_matches""": 1}, # test with high log_level and log_level_replica - should be quiet on all processes """mixed""": {"""extra_args_str""": """--log_level error --log_level_replica error""", """n_matches""": 0}, } lowerCAmelCase__ : Any = experiments[experiment_id] lowerCAmelCase__ : Optional[Any] = {"""distributed""": True, """predict_with_generate""": False, """do_eval""": False, """do_predict""": False} lowerCAmelCase__ : Optional[int] = """Running training""" with CaptureStderr() as cl: self.run_seqaseq_quick(**__UpperCAmelCase ,extra_args_str=data["""extra_args_str"""] ) lowerCAmelCase__ : Union[str, Any] = len(re.findall(__UpperCAmelCase ,cl.err ) ) self.assertEqual(__UpperCAmelCase ,data["""n_matches"""] ) @slow def UpperCAmelCase_ ( self ) -> Optional[Any]: lowerCAmelCase__ : Tuple = self.run_trainer( eval_steps=2 ,max_len=128 ,model_name=__UpperCAmelCase ,learning_rate=3E-4 ,num_train_epochs=10 ,distributed=__UpperCAmelCase ,) # Check metrics lowerCAmelCase__ : str = TrainerState.load_from_json(os.path.join(__UpperCAmelCase ,"""trainer_state.json""" ) ).log_history lowerCAmelCase__ : Union[str, Any] = [log for log in logs if """eval_loss""" in log.keys()] lowerCAmelCase__ : Optional[Any] = eval_metrics[0] lowerCAmelCase__ : Tuple = eval_metrics[-1] assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing" assert isinstance(last_step_stats["""eval_bleu"""] ,__UpperCAmelCase ) # test if do_predict saves generations and metrics lowerCAmelCase__ : Optional[Any] = os.listdir(__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = {os.path.basename(__UpperCAmelCase ) for p in contents} assert "generated_predictions.txt" in contents assert "predict_results.json" in contents @slow @require_bitsandbytes def UpperCAmelCase_ ( self ) -> Union[str, Any]: from transformers.training_args import OptimizerNames def train_and_return_metrics(__UpperCAmelCase ) -> Tuple[int, float]: lowerCAmelCase__ : List[Any] = """--skip_memory_metrics 0""" lowerCAmelCase__ : Dict = self.run_trainer( max_len=128 ,model_name=__UpperCAmelCase ,learning_rate=3E-4 ,num_train_epochs=1 ,optim=__UpperCAmelCase ,distributed=__UpperCAmelCase ,extra_args_str=__UpperCAmelCase ,do_eval=__UpperCAmelCase ,do_predict=__UpperCAmelCase ,n_gpus_to_use=1 ,) # Check metrics lowerCAmelCase__ : List[str] = TrainerState.load_from_json(Path(__UpperCAmelCase ,"""trainer_state.json""" ) ).log_history lowerCAmelCase__ : Tuple = int(logs[0]["""train_mem_gpu_peaked_delta"""] / 2**20 ) lowerCAmelCase__ : int = int(logs[0]["""train_mem_gpu_alloc_delta"""] / 2**20 ) lowerCAmelCase__ : Any = logs[0]["""train_loss"""] return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value ) lowerCAmelCase__ : Optional[Any] = gpu_alloc_mem_orig - gpu_alloc_mem_bnb lowerCAmelCase__ : List[Any] = gpu_peak_mem_orig + gpu_alloc_mem_orig lowerCAmelCase__ : Optional[int] = gpu_peak_mem_bnb + gpu_alloc_mem_bnb lowerCAmelCase__ : Any = gpu_total_mem_orig - gpu_total_mem_bnb # sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which # doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized # in 2 bytes and the diff in optim memory usage is derived as so: # # - normal 25*8=~200MB (8 bytes per param) # - bnb 25*2= ~50MB (2 bytes per param) # # Thus we should expect ~150MB total memory saved. # # Peak memory should be the same - the total should be different by about that same margin # # After leaving a small margin to accommodate for differences between gpus let's check # that we have at least 120MB in savings lowerCAmelCase__ : Any = 120 # uncomment the following if this test starts failing - requires py38 for a new print feature # gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb # print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB") # print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB") # print(f"{gpu_alloc_mem_diff=}MB") # print(f"{gpu_peak_mem_diff=}MB") # print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB") # print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB") self.assertGreater( __UpperCAmelCase ,__UpperCAmelCase ,"""should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got""" F""" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and""" F""" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB""" ,) self.assertGreater( __UpperCAmelCase ,__UpperCAmelCase ,"""should use ~150MB less total gpu memory with BNB, compared to without it for this model but got""" F""" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and""" F""" gpu_total_mem_bnb={gpu_total_mem_bnb}MB""" ,) self.assertEqual( __UpperCAmelCase ,__UpperCAmelCase ,F"""loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}""" ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = 3E-3 ,__UpperCAmelCase = "adafactor" ,__UpperCAmelCase = False ,__UpperCAmelCase = None ,__UpperCAmelCase = 0 ,__UpperCAmelCase = True ,__UpperCAmelCase = True ,__UpperCAmelCase = True ,__UpperCAmelCase = True ,__UpperCAmelCase = None ,) -> List[str]: lowerCAmelCase__ : List[str] = self.test_file_dir / """../fixtures/tests_samples/wmt_en_ro""" lowerCAmelCase__ : List[str] = self.get_auto_remove_tmp_dir() lowerCAmelCase__ : Optional[int] = F""" --model_name_or_path {model_name} --train_file {data_dir}/train.json --validation_file {data_dir}/val.json --test_file {data_dir}/test.json --output_dir {output_dir} --overwrite_output_dir --max_train_samples 8 --max_source_length {max_len} --max_target_length {max_len} --do_train --num_train_epochs {str(__UpperCAmelCase )} --per_device_train_batch_size 4 --learning_rate {learning_rate} --warmup_steps 8 --logging_steps 0 --logging_strategy no --save_steps {str(__UpperCAmelCase )} --group_by_length --label_smoothing_factor 0.1 --target_lang ro_RO --source_lang en_XX """.split() lowerCAmelCase__ : Union[str, Any] = F""" --do_eval --per_device_eval_batch_size 4 --max_eval_samples 8 --val_max_target_length {max_len} --evaluation_strategy steps --eval_steps {str(__UpperCAmelCase )} """.split() lowerCAmelCase__ : Optional[int] = """ --do_predict """.split() lowerCAmelCase__ : Dict = [] if do_train: args += args_train if do_eval: args += args_eval if do_predict: args += args_predict if predict_with_generate: args += "--predict_with_generate".split() if do_train: if optim == "adafactor": args += "--adafactor".split() else: args += F"""--optim {optim}""".split() if extra_args_str is not None: args += extra_args_str.split() if distributed: if n_gpus_to_use is None: lowerCAmelCase__ : Dict = get_gpu_count() lowerCAmelCase__ : Optional[Any] = get_torch_dist_unique_port() lowerCAmelCase__ : Optional[Any] = F""" -m torch.distributed.run --nproc_per_node={n_gpus_to_use} --master_port={master_port} {self.examples_dir_str}/pytorch/translation/run_translation.py """.split() lowerCAmelCase__ : List[str] = [sys.executable] + distributed_args + args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(__UpperCAmelCase ,env=self.get_env() ) else: lowerCAmelCase__ : List[Any] = ["""run_translation.py"""] + args with patch.object(__UpperCAmelCase ,"""argv""" ,__UpperCAmelCase ): main() return output_dir
37
'''simple docstring''' import os from bleurt import score # From: git+https://github.com/google-research/bleurt.git import datasets _lowerCAmelCase = datasets.logging.get_logger(__name__) _lowerCAmelCase = '''\ @inproceedings{bleurt, title={BLEURT: Learning Robust Metrics for Text Generation}, author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh}, booktitle={ACL}, year={2020}, url={https://arxiv.org/abs/2004.04696} } ''' _lowerCAmelCase = '''\ BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018) and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune it for your specific application (the latter is expected to perform better). See the project\'s README at https://github.com/google-research/bleurt#readme for more information. ''' _lowerCAmelCase = ''' BLEURT score. Args: `predictions` (list of str): prediction/candidate sentences `references` (list of str): reference sentences `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None. Returns: \'scores\': List of scores. Examples: >>> predictions = ["hello there", "general kenobi"] >>> references = ["hello there", "general kenobi"] >>> bleurt = datasets.load_metric("bleurt") >>> results = bleurt.compute(predictions=predictions, references=references) >>> print([round(v, 2) for v in results["scores"]]) [1.03, 1.04] ''' _lowerCAmelCase = { '''bleurt-tiny-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip''', '''bleurt-tiny-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip''', '''bleurt-base-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip''', '''bleurt-base-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip''', '''bleurt-large-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip''', '''bleurt-large-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip''', '''BLEURT-20-D3''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip''', '''BLEURT-20-D6''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip''', '''BLEURT-20-D12''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip''', '''BLEURT-20''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip''', } @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_( datasets.Metric ): '''simple docstring''' def UpperCAmelCase_ ( self ) -> Union[str, Any]: return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,homepage="""https://github.com/google-research/bleurt""" ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { """predictions""": datasets.Value("""string""" ,id="""sequence""" ), """references""": datasets.Value("""string""" ,id="""sequence""" ), } ) ,codebase_urls=["""https://github.com/google-research/bleurt"""] ,reference_urls=["""https://github.com/google-research/bleurt""", """https://arxiv.org/abs/2004.04696"""] ,) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Tuple: # check that config name specifies a valid BLEURT model if self.config_name == "default": logger.warning( """Using default BLEURT-Base checkpoint for sequence maximum length 128. """ """You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512').""" ) lowerCAmelCase__ : str = """bleurt-base-128""" if self.config_name.lower() in CHECKPOINT_URLS: lowerCAmelCase__ : Union[str, Any] = self.config_name.lower() elif self.config_name.upper() in CHECKPOINT_URLS: lowerCAmelCase__ : List[Any] = self.config_name.upper() else: raise KeyError( F"""{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}""" ) # download the model checkpoint specified by self.config_name and set up the scorer lowerCAmelCase__ : int = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] ) lowerCAmelCase__ : Dict = score.BleurtScorer(os.path.join(__UpperCAmelCase ,__UpperCAmelCase ) ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Union[str, Any]: lowerCAmelCase__ : Union[str, Any] = self.scorer.score(references=__UpperCAmelCase ,candidates=__UpperCAmelCase ) return {"scores": scores}
37
1
'''simple docstring''' import warnings from diffusers import StableDiffusionImgaImgPipeline # noqa F401 warnings.warn( '''The `image_to_image.py` script is outdated. Please use directly `from diffusers import''' ''' StableDiffusionImg2ImgPipeline` instead.''' )
37
'''simple docstring''' import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch _lowerCAmelCase = logging.get_logger(__name__) class lowerCAmelCase_: '''simple docstring''' def __init__( self ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ) -> str: if not conversation_id: lowerCAmelCase__ : List[str] = uuid.uuida() if past_user_inputs is None: lowerCAmelCase__ : List[Any] = [] if generated_responses is None: lowerCAmelCase__ : str = [] lowerCAmelCase__ : uuid.UUID = conversation_id lowerCAmelCase__ : List[str] = past_user_inputs lowerCAmelCase__ : List[str] = generated_responses lowerCAmelCase__ : Optional[str] = text def __eq__( self ,__UpperCAmelCase ) -> Dict: if not isinstance(__UpperCAmelCase ,__UpperCAmelCase ): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = False ) -> Optional[Any]: if self.new_user_input: if overwrite: logger.warning( F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """ F"""with: \"{text}\".""" ) lowerCAmelCase__ : Optional[int] = text else: logger.warning( F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """ F"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" ) else: lowerCAmelCase__ : Optional[Any] = text def UpperCAmelCase_ ( self ) -> List[Any]: if self.new_user_input: self.past_user_inputs.append(self.new_user_input ) lowerCAmelCase__ : Union[str, Any] = None def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Tuple: self.generated_responses.append(__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> List[str]: for user_input, generated_response in zip(self.past_user_inputs ,self.generated_responses ): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__( self ) -> Tuple: lowerCAmelCase__ : Tuple = F"""Conversation id: {self.uuid} \n""" for is_user, text in self.iter_texts(): lowerCAmelCase__ : Any = """user""" if is_user else """bot""" output += F"""{name} >> {text} \n""" return output @add_end_docstrings( SCREAMING_SNAKE_CASE_ , R''' min_length_for_response (`int`, *optional*, defaults to 32): The minimum length (in number of tokens) for a response. minimum_tokens (`int`, *optional*, defaults to 10): The minimum length of tokens to leave for a response. ''' , ) class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple: super().__init__(*__UpperCAmelCase ,**__UpperCAmelCase ) if self.tokenizer.pad_token_id is None: lowerCAmelCase__ : Tuple = self.tokenizer.eos_token def UpperCAmelCase_ ( self ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,**__UpperCAmelCase ) -> Optional[int]: lowerCAmelCase__ : List[Any] = {} lowerCAmelCase__ : Optional[int] = {} lowerCAmelCase__ : List[str] = {} if min_length_for_response is not None: lowerCAmelCase__ : Any = min_length_for_response if minimum_tokens is not None: lowerCAmelCase__ : Optional[int] = minimum_tokens if "max_length" in generate_kwargs: lowerCAmelCase__ : Optional[Any] = generate_kwargs["""max_length"""] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: lowerCAmelCase__ : int = clean_up_tokenization_spaces if generate_kwargs: forward_params.update(__UpperCAmelCase ) return preprocess_params, forward_params, postprocess_params def __call__( self ,__UpperCAmelCase ,__UpperCAmelCase=0 ,**__UpperCAmelCase ) -> List[str]: lowerCAmelCase__ : Optional[int] = super().__call__(__UpperCAmelCase ,num_workers=__UpperCAmelCase ,**__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) and len(__UpperCAmelCase ) == 1: return outputs[0] return outputs def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=32 ) -> Dict[str, Any]: if not isinstance(__UpperCAmelCase ,__UpperCAmelCase ): raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" ) if conversation.new_user_input is None: raise ValueError( F"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """ """Add user inputs with the conversation's `add_user_input` method""" ) if hasattr(self.tokenizer ,"""_build_conversation_input_ids""" ): lowerCAmelCase__ : str = self.tokenizer._build_conversation_input_ids(__UpperCAmelCase ) else: # If the tokenizer cannot handle conversations, we default to only the old version lowerCAmelCase__ : List[Any] = self._legacy_parse_and_tokenize(__UpperCAmelCase ) if self.framework == "pt": lowerCAmelCase__ : List[Any] = torch.LongTensor([input_ids] ) elif self.framework == "tf": lowerCAmelCase__ : Dict = tf.constant([input_ids] ) return {"input_ids": input_ids, "conversation": conversation} def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=10 ,**__UpperCAmelCase ) -> Dict: lowerCAmelCase__ : Optional[Any] = generate_kwargs.get("""max_length""" ,self.model.config.max_length ) lowerCAmelCase__ : Optional[Any] = model_inputs["""input_ids"""].shape[1] if max_length - minimum_tokens < n: logger.warning(F"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" ) lowerCAmelCase__ : str = max_length - minimum_tokens lowerCAmelCase__ : Union[str, Any] = model_inputs["""input_ids"""][:, -trim:] if "attention_mask" in model_inputs: lowerCAmelCase__ : Tuple = model_inputs["""attention_mask"""][:, -trim:] lowerCAmelCase__ : str = model_inputs.pop("""conversation""" ) lowerCAmelCase__ : Union[str, Any] = max_length lowerCAmelCase__ : Any = self.model.generate(**__UpperCAmelCase ,**__UpperCAmelCase ) if self.model.config.is_encoder_decoder: lowerCAmelCase__ : int = 1 else: lowerCAmelCase__ : int = n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=True ) -> List[str]: lowerCAmelCase__ : Optional[int] = model_outputs["""output_ids"""] lowerCAmelCase__ : Tuple = self.tokenizer.decode( output_ids[0] ,skip_special_tokens=__UpperCAmelCase ,clean_up_tokenization_spaces=__UpperCAmelCase ,) lowerCAmelCase__ : Union[str, Any] = model_outputs["""conversation"""] conversation.mark_processed() conversation.append_response(__UpperCAmelCase ) return conversation def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Dict: lowerCAmelCase__ : Dict = self.tokenizer.eos_token_id lowerCAmelCase__ : int = [] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ) + [eos_token_id] ) else: input_ids.extend(self.tokenizer.encode(__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ) ) if len(__UpperCAmelCase ) > self.tokenizer.model_max_length: lowerCAmelCase__ : Optional[Any] = input_ids[-self.tokenizer.model_max_length :] return input_ids
37
1
'''simple docstring''' # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import platform import numpy as np import psutil import torch from accelerate import __version__ as version from accelerate.commands.config import default_config_file, load_config_from_file from ..utils import is_npu_available, is_xpu_available def _SCREAMING_SNAKE_CASE ( UpperCamelCase=None ): """simple docstring""" if subparsers is not None: lowerCAmelCase__ : Any = subparsers.add_parser("""env""" ) else: lowerCAmelCase__ : List[str] = argparse.ArgumentParser("""Accelerate env command""" ) parser.add_argument( """--config_file""" , default=UpperCamelCase , help="""The config file to use for the default values in the launching script.""" ) if subparsers is not None: parser.set_defaults(func=UpperCamelCase ) return parser def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : List[str] = torch.__version__ lowerCAmelCase__ : List[Any] = torch.cuda.is_available() lowerCAmelCase__ : Optional[int] = is_xpu_available() lowerCAmelCase__ : Any = is_npu_available() lowerCAmelCase__ : str = """Not found""" # Get the default from the config file. if args.config_file is not None or os.path.isfile(UpperCamelCase ): lowerCAmelCase__ : int = load_config_from_file(args.config_file ).to_dict() lowerCAmelCase__ : Optional[Any] = { """`Accelerate` version""": version, """Platform""": platform.platform(), """Python version""": platform.python_version(), """Numpy version""": np.__version__, """PyTorch version (GPU?)""": f"""{pt_version} ({pt_cuda_available})""", """PyTorch XPU available""": str(UpperCamelCase ), """PyTorch NPU available""": str(UpperCamelCase ), """System RAM""": f"""{psutil.virtual_memory().total / 1024 ** 3:.2f} GB""", } if pt_cuda_available: lowerCAmelCase__ : List[Any] = torch.cuda.get_device_name() print("""\nCopy-and-paste the text below in your GitHub issue\n""" ) print("""\n""".join([f"""- {prop}: {val}""" for prop, val in info.items()] ) ) print("""- `Accelerate` default config:""" if args.config_file is None else """- `Accelerate` config passed:""" ) lowerCAmelCase__ : Dict = ( """\n""".join([f"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] ) if isinstance(UpperCamelCase , UpperCamelCase ) else f"""\t{accelerate_config}""" ) print(UpperCamelCase ) lowerCAmelCase__ : str = accelerate_config return info def _SCREAMING_SNAKE_CASE ( ): """simple docstring""" lowerCAmelCase__ : Optional[Any] = env_command_parser() lowerCAmelCase__ : Optional[Any] = parser.parse_args() env_command(UpperCamelCase ) return 0 if __name__ == "__main__": raise SystemExit(main())
37
'''simple docstring''' from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax _lowerCAmelCase = logging.get_logger(__name__) @add_end_docstrings(SCREAMING_SNAKE_CASE_ ) class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' def __init__( self ,**__UpperCAmelCase ) -> Tuple: super().__init__(**__UpperCAmelCase ) requires_backends(self ,"""vision""" ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == """tf""" else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self ,__UpperCAmelCase ,**__UpperCAmelCase ) -> str: return super().__call__(__UpperCAmelCase ,**__UpperCAmelCase ) def UpperCAmelCase_ ( self ,**__UpperCAmelCase ) -> str: lowerCAmelCase__ : List[Any] = {} if "candidate_labels" in kwargs: lowerCAmelCase__ : int = kwargs["""candidate_labels"""] if "hypothesis_template" in kwargs: lowerCAmelCase__ : Optional[int] = kwargs["""hypothesis_template"""] return preprocess_params, {}, {} def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=None ,__UpperCAmelCase="This is a photo of {}." ) -> int: lowerCAmelCase__ : str = load_image(__UpperCAmelCase ) lowerCAmelCase__ : Dict = self.image_processor(images=[image] ,return_tensors=self.framework ) lowerCAmelCase__ : List[Any] = candidate_labels lowerCAmelCase__ : List[str] = [hypothesis_template.format(__UpperCAmelCase ) for x in candidate_labels] lowerCAmelCase__ : Optional[Any] = self.tokenizer(__UpperCAmelCase ,return_tensors=self.framework ,padding=__UpperCAmelCase ) lowerCAmelCase__ : Tuple = [text_inputs] return inputs def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Union[str, Any]: lowerCAmelCase__ : Tuple = model_inputs.pop("""candidate_labels""" ) lowerCAmelCase__ : Union[str, Any] = model_inputs.pop("""text_inputs""" ) if isinstance(text_inputs[0] ,__UpperCAmelCase ): lowerCAmelCase__ : int = text_inputs[0] else: # Batching case. lowerCAmelCase__ : Dict = text_inputs[0][0] lowerCAmelCase__ : Any = self.model(**__UpperCAmelCase ,**__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = { """candidate_labels""": candidate_labels, """logits""": outputs.logits_per_image, } return model_outputs def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Any: lowerCAmelCase__ : Union[str, Any] = model_outputs.pop("""candidate_labels""" ) lowerCAmelCase__ : List[str] = model_outputs["""logits"""][0] if self.framework == "pt": lowerCAmelCase__ : List[str] = logits.softmax(dim=-1 ).squeeze(-1 ) lowerCAmelCase__ : Optional[Any] = probs.tolist() if not isinstance(__UpperCAmelCase ,__UpperCAmelCase ): lowerCAmelCase__ : Dict = [scores] elif self.framework == "tf": lowerCAmelCase__ : Any = stable_softmax(__UpperCAmelCase ,axis=-1 ) lowerCAmelCase__ : List[Any] = probs.numpy().tolist() else: raise ValueError(F"""Unsupported framework: {self.framework}""" ) lowerCAmelCase__ : Tuple = [ {"""score""": score, """label""": candidate_label} for score, candidate_label in sorted(zip(__UpperCAmelCase ,__UpperCAmelCase ) ,key=lambda __UpperCAmelCase : -x[0] ) ] return result
37
1
'''simple docstring''' import gc import math import unittest import torch from diffusers import UNetaDModel from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin _lowerCAmelCase = logging.get_logger(__name__) enable_full_determinism() class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): '''simple docstring''' __lowercase : int = UNetaDModel __lowercase : Optional[int] = '''sample''' @property def UpperCAmelCase_ ( self ) -> str: lowerCAmelCase__ : List[str] = 4 lowerCAmelCase__ : str = 3 lowerCAmelCase__ : List[str] = (32, 32) lowerCAmelCase__ : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCAmelCase ) lowerCAmelCase__ : Tuple = torch.tensor([10] ).to(__UpperCAmelCase ) return {"sample": noise, "timestep": time_step} @property def UpperCAmelCase_ ( self ) -> List[Any]: return (3, 32, 32) @property def UpperCAmelCase_ ( self ) -> int: return (3, 32, 32) def UpperCAmelCase_ ( self ) -> Dict: lowerCAmelCase__ : List[str] = { """block_out_channels""": (32, 64), """down_block_types""": ("""DownBlock2D""", """AttnDownBlock2D"""), """up_block_types""": ("""AttnUpBlock2D""", """UpBlock2D"""), """attention_head_dim""": 3, """out_channels""": 3, """in_channels""": 3, """layers_per_block""": 2, """sample_size""": 32, } lowerCAmelCase__ : str = self.dummy_input return init_dict, inputs_dict class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): '''simple docstring''' __lowercase : Any = UNetaDModel __lowercase : Tuple = '''sample''' @property def UpperCAmelCase_ ( self ) -> Any: lowerCAmelCase__ : Any = 4 lowerCAmelCase__ : Dict = 4 lowerCAmelCase__ : str = (32, 32) lowerCAmelCase__ : Any = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = torch.tensor([10] ).to(__UpperCAmelCase ) return {"sample": noise, "timestep": time_step} @property def UpperCAmelCase_ ( self ) -> Any: return (4, 32, 32) @property def UpperCAmelCase_ ( self ) -> Union[str, Any]: return (4, 32, 32) def UpperCAmelCase_ ( self ) -> Optional[Any]: lowerCAmelCase__ : str = { """sample_size""": 32, """in_channels""": 4, """out_channels""": 4, """layers_per_block""": 2, """block_out_channels""": (32, 64), """attention_head_dim""": 32, """down_block_types""": ("""DownBlock2D""", """DownBlock2D"""), """up_block_types""": ("""UpBlock2D""", """UpBlock2D"""), } lowerCAmelCase__ : List[str] = self.dummy_input return init_dict, inputs_dict def UpperCAmelCase_ ( self ) -> List[str]: lowerCAmelCase__ , lowerCAmelCase__ : Tuple = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" ,output_loading_info=__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) self.assertEqual(len(loading_info["""missing_keys"""] ) ,0 ) model.to(__UpperCAmelCase ) lowerCAmelCase__ : Any = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != """cuda""" ,"""This test is supposed to run on GPU""" ) def UpperCAmelCase_ ( self ) -> List[str]: lowerCAmelCase__ , lowerCAmelCase__ : Any = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" ,output_loading_info=__UpperCAmelCase ) model.to(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != """cuda""" ,"""This test is supposed to run on GPU""" ) def UpperCAmelCase_ ( self ) -> Optional[Any]: # by defautl model loading will use accelerate as `low_cpu_mem_usage=True` lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" ,output_loading_info=__UpperCAmelCase ) model_accelerate.to(__UpperCAmelCase ) model_accelerate.eval() lowerCAmelCase__ : Optional[Any] = torch.randn( 1 ,model_accelerate.config.in_channels ,model_accelerate.config.sample_size ,model_accelerate.config.sample_size ,generator=torch.manual_seed(0 ) ,) lowerCAmelCase__ : List[str] = noise.to(__UpperCAmelCase ) lowerCAmelCase__ : int = torch.tensor([10] * noise.shape[0] ).to(__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = model_accelerate(__UpperCAmelCase ,__UpperCAmelCase )["""sample"""] # two models don't need to stay in the device at the same time del model_accelerate torch.cuda.empty_cache() gc.collect() lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = UNetaDModel.from_pretrained( """fusing/unet-ldm-dummy-update""" ,output_loading_info=__UpperCAmelCase ,low_cpu_mem_usage=__UpperCAmelCase ) model_normal_load.to(__UpperCAmelCase ) model_normal_load.eval() lowerCAmelCase__ : Dict = model_normal_load(__UpperCAmelCase ,__UpperCAmelCase )["""sample"""] assert torch_all_close(__UpperCAmelCase ,__UpperCAmelCase ,rtol=1E-3 ) def UpperCAmelCase_ ( self ) -> str: lowerCAmelCase__ : Optional[int] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" ) model.eval() model.to(__UpperCAmelCase ) lowerCAmelCase__ : int = torch.randn( 1 ,model.config.in_channels ,model.config.sample_size ,model.config.sample_size ,generator=torch.manual_seed(0 ) ,) lowerCAmelCase__ : Optional[int] = noise.to(__UpperCAmelCase ) lowerCAmelCase__ : str = torch.tensor([10] * noise.shape[0] ).to(__UpperCAmelCase ) with torch.no_grad(): lowerCAmelCase__ : List[str] = model(__UpperCAmelCase ,__UpperCAmelCase ).sample lowerCAmelCase__ : Optional[Any] = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off lowerCAmelCase__ : int = torch.tensor([-1_3.3_2_5_8, -2_0.1_1_0_0, -1_5.9_8_7_3, -1_7.6_6_1_7, -2_3.0_5_9_6, -1_7.9_4_1_9, -1_3.3_6_7_5, -1_6.1_8_8_9, -1_2.3_8_0_0] ) # fmt: on self.assertTrue(torch_all_close(__UpperCAmelCase ,__UpperCAmelCase ,rtol=1E-3 ) ) class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): '''simple docstring''' __lowercase : List[str] = UNetaDModel __lowercase : Union[str, Any] = '''sample''' @property def UpperCAmelCase_ ( self ,__UpperCAmelCase=(32, 32) ) -> Dict: lowerCAmelCase__ : Any = 4 lowerCAmelCase__ : Optional[Any] = 3 lowerCAmelCase__ : Union[str, Any] = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCAmelCase ) lowerCAmelCase__ : str = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa ,device=__UpperCAmelCase ) return {"sample": noise, "timestep": time_step} @property def UpperCAmelCase_ ( self ) -> Dict: return (3, 32, 32) @property def UpperCAmelCase_ ( self ) -> Any: return (3, 32, 32) def UpperCAmelCase_ ( self ) -> Optional[int]: lowerCAmelCase__ : Union[str, Any] = { """block_out_channels""": [32, 64, 64, 64], """in_channels""": 3, """layers_per_block""": 1, """out_channels""": 3, """time_embedding_type""": """fourier""", """norm_eps""": 1E-6, """mid_block_scale_factor""": math.sqrt(2.0 ), """norm_num_groups""": None, """down_block_types""": [ """SkipDownBlock2D""", """AttnSkipDownBlock2D""", """SkipDownBlock2D""", """SkipDownBlock2D""", ], """up_block_types""": [ """SkipUpBlock2D""", """SkipUpBlock2D""", """AttnSkipUpBlock2D""", """SkipUpBlock2D""", ], } lowerCAmelCase__ : Tuple = self.dummy_input return init_dict, inputs_dict @slow def UpperCAmelCase_ ( self ) -> str: lowerCAmelCase__ , lowerCAmelCase__ : str = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" ,output_loading_info=__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) self.assertEqual(len(loading_info["""missing_keys"""] ) ,0 ) model.to(__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = self.dummy_input lowerCAmelCase__ : List[Any] = floats_tensor((4, 3) + (256, 256) ).to(__UpperCAmelCase ) lowerCAmelCase__ : Tuple = noise lowerCAmelCase__ : Union[str, Any] = model(**__UpperCAmelCase ) assert image is not None, "Make sure output is not None" @slow def UpperCAmelCase_ ( self ) -> str: lowerCAmelCase__ : Optional[Any] = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" ) model.to(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = 4 lowerCAmelCase__ : Any = 3 lowerCAmelCase__ : Optional[int] = (256, 256) lowerCAmelCase__ : int = torch.ones((batch_size, num_channels) + sizes ).to(__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = torch.tensor(batch_size * [1E-4] ).to(__UpperCAmelCase ) with torch.no_grad(): lowerCAmelCase__ : Dict = model(__UpperCAmelCase ,__UpperCAmelCase ).sample lowerCAmelCase__ : int = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off lowerCAmelCase__ : Any = torch.tensor([-4_8_4_2.8_6_9_1, -6_4_9_9.6_6_3_1, -3_8_0_0.1_9_5_3, -7_9_7_8.2_6_8_6, -1_0_9_8_0.7_1_2_9, -2_0_0_2_8.8_5_3_5, 8_1_4_8.2_8_2_2, 2_3_4_2.2_9_0_5, 5_6_7.7_6_0_8] ) # fmt: on self.assertTrue(torch_all_close(__UpperCAmelCase ,__UpperCAmelCase ,rtol=1E-2 ) ) def UpperCAmelCase_ ( self ) -> Tuple: lowerCAmelCase__ : Dict = UNetaDModel.from_pretrained("""fusing/ncsnpp-ffhq-ve-dummy-update""" ) model.to(__UpperCAmelCase ) lowerCAmelCase__ : int = 4 lowerCAmelCase__ : List[str] = 3 lowerCAmelCase__ : Union[str, Any] = (32, 32) lowerCAmelCase__ : int = torch.ones((batch_size, num_channels) + sizes ).to(__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = torch.tensor(batch_size * [1E-4] ).to(__UpperCAmelCase ) with torch.no_grad(): lowerCAmelCase__ : Any = model(__UpperCAmelCase ,__UpperCAmelCase ).sample lowerCAmelCase__ : str = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off lowerCAmelCase__ : str = torch.tensor([-0.0_3_2_5, -0.0_9_0_0, -0.0_8_6_9, -0.0_3_3_2, -0.0_7_2_5, -0.0_2_7_0, -0.0_1_0_1, 0.0_2_2_7, 0.0_2_5_6] ) # fmt: on self.assertTrue(torch_all_close(__UpperCAmelCase ,__UpperCAmelCase ,rtol=1E-2 ) ) def UpperCAmelCase_ ( self ) -> str: # not required for this model pass
37
'''simple docstring''' import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , ) @pytest.mark.usefixtures('''sm_env''' ) @parameterized_class( [ { '''framework''': '''pytorch''', '''script''': '''run_glue.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.p3.16xlarge''', '''results''': {'''train_runtime''': 6_5_0, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6}, }, { '''framework''': '''pytorch''', '''script''': '''run_ddp.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.p3.16xlarge''', '''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6}, }, { '''framework''': '''tensorflow''', '''script''': '''run_tf_dist.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.p3.16xlarge''', '''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.6, '''eval_loss''': 0.7}, }, ] ) class lowerCAmelCase_( unittest.TestCase ): '''simple docstring''' def UpperCAmelCase_ ( self ) -> Optional[Any]: if self.framework == "pytorch": subprocess.run( F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() ,encoding="""utf-8""" ,check=__UpperCAmelCase ,) assert hasattr(self ,"""env""" ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]: lowerCAmelCase__ : Optional[int] = F"""{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}""" # distributed data settings lowerCAmelCase__ : Any = {"""smdistributed""": {"""dataparallel""": {"""enabled""": True}}} if self.script != """run_ddp.py""" else None # creates estimator return HuggingFace( entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=__UpperCAmelCase ,instance_count=__UpperCAmelCase ,instance_type=self.instance_type ,debugger_hook_config=__UpperCAmelCase ,hyperparameters={**self.env.distributed_hyperparameters, """model_name_or_path""": self.model_name_or_path} ,metric_definitions=self.env.metric_definitions ,distribution=__UpperCAmelCase ,py_version="""py36""" ,) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]: TrainingJobAnalytics(__UpperCAmelCase ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" ) @parameterized.expand([(2,)] ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Any: # create estimator lowerCAmelCase__ : List[Any] = self.create_estimator(__UpperCAmelCase ) # run training estimator.fit() # result dataframe lowerCAmelCase__ : Any = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis lowerCAmelCase__ : int = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] ) lowerCAmelCase__ : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping lowerCAmelCase__ : List[str] = ( Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" ,99_9999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy ) assert all(t <= self.results["""eval_loss"""] for t in eval_loss ) # dump tests result into json file to share in PR with open(F"""{estimator.latest_training_job.name}.json""" ,"""w""" ) as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} ,__UpperCAmelCase )
37
1
'''simple docstring''' # Lint as: python3 # pylint: enable=line-too-long # pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position _lowerCAmelCase = '''2.13.1''' import platform import pyarrow from packaging import version if version.parse(platform.python_version()) < version.parse('''3.7'''): raise ImportWarning( '''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.''' ) if version.parse(pyarrow.__version__).major < 8: raise ImportWarning( '''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n''' '''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.''' ) del platform del pyarrow del version from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled from .info import DatasetInfo, MetricInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, list_datasets, list_metrics, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric from .metric import Metric from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .tasks import * from .utils import * from .utils import logging # deprecated modules from datasets import arrow_dataset as _arrow_dataset # isort:skip from datasets import utils as _utils # isort:skip from datasets.utils import download_manager as _deprecated_download_manager # isort:skip _lowerCAmelCase = concatenate_datasets _lowerCAmelCase = DownloadConfig _lowerCAmelCase = DownloadManager _lowerCAmelCase = DownloadMode _lowerCAmelCase = DownloadConfig _lowerCAmelCase = DownloadMode _lowerCAmelCase = DownloadManager del _arrow_dataset, _utils, _deprecated_download_manager
37
'''simple docstring''' import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = {'''vocab_file''': '''spiece.model'''} _lowerCAmelCase = { '''vocab_file''': { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''', } } _lowerCAmelCase = { '''albert-base-v1''': 512, '''albert-large-v1''': 512, '''albert-xlarge-v1''': 512, '''albert-xxlarge-v1''': 512, '''albert-base-v2''': 512, '''albert-large-v2''': 512, '''albert-xlarge-v2''': 512, '''albert-xxlarge-v2''': 512, } _lowerCAmelCase = '''▁''' class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Any = VOCAB_FILES_NAMES __lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP __lowercase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=False ,__UpperCAmelCase="[CLS]" ,__UpperCAmelCase="[SEP]" ,__UpperCAmelCase="<unk>" ,__UpperCAmelCase="[SEP]" ,__UpperCAmelCase="<pad>" ,__UpperCAmelCase="[CLS]" ,__UpperCAmelCase="[MASK]" ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> None: # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. lowerCAmelCase__ : Tuple = ( AddedToken(__UpperCAmelCase ,lstrip=__UpperCAmelCase ,rstrip=__UpperCAmelCase ,normalized=__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else mask_token ) lowerCAmelCase__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=__UpperCAmelCase ,remove_space=__UpperCAmelCase ,keep_accents=__UpperCAmelCase ,bos_token=__UpperCAmelCase ,eos_token=__UpperCAmelCase ,unk_token=__UpperCAmelCase ,sep_token=__UpperCAmelCase ,pad_token=__UpperCAmelCase ,cls_token=__UpperCAmelCase ,mask_token=__UpperCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**__UpperCAmelCase ,) lowerCAmelCase__ : str = do_lower_case lowerCAmelCase__ : int = remove_space lowerCAmelCase__ : Tuple = keep_accents lowerCAmelCase__ : Any = vocab_file lowerCAmelCase__ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__UpperCAmelCase ) @property def UpperCAmelCase_ ( self ) -> Optional[int]: return len(self.sp_model ) def UpperCAmelCase_ ( self ) -> Optional[Any]: lowerCAmelCase__ : Union[str, Any] = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> Any: lowerCAmelCase__ : Optional[Any] = self.__dict__.copy() lowerCAmelCase__ : Optional[Any] = None return state def __setstate__( self ,__UpperCAmelCase ) -> List[Any]: lowerCAmelCase__ : List[str] = d # for backward compatibility if not hasattr(self ,"""sp_model_kwargs""" ): lowerCAmelCase__ : Union[str, Any] = {} lowerCAmelCase__ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[int]: if self.remove_space: lowerCAmelCase__ : int = """ """.join(inputs.strip().split() ) else: lowerCAmelCase__ : str = inputs lowerCAmelCase__ : Tuple = outputs.replace("""``""" ,"""\"""" ).replace("""''""" ,"""\"""" ) if not self.keep_accents: lowerCAmelCase__ : Any = unicodedata.normalize("""NFKD""" ,__UpperCAmelCase ) lowerCAmelCase__ : Dict = """""".join([c for c in outputs if not unicodedata.combining(__UpperCAmelCase )] ) if self.do_lower_case: lowerCAmelCase__ : Tuple = outputs.lower() return outputs def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[str]: lowerCAmelCase__ : List[str] = self.preprocess_text(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = self.sp_model.encode(__UpperCAmelCase ,out_type=__UpperCAmelCase ) lowerCAmelCase__ : str = [] for piece in pieces: if len(__UpperCAmelCase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit(): lowerCAmelCase__ : List[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(__UpperCAmelCase ,"""""" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: lowerCAmelCase__ : str = cur_pieces[1:] else: lowerCAmelCase__ : int = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(__UpperCAmelCase ) else: new_pieces.append(__UpperCAmelCase ) return new_pieces def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[str]: return self.sp_model.PieceToId(__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Any: return self.sp_model.IdToPiece(__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str: lowerCAmelCase__ : str = [] lowerCAmelCase__ : Tuple = """""" lowerCAmelCase__ : Tuple = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__UpperCAmelCase ) + token lowerCAmelCase__ : Union[str, Any] = True lowerCAmelCase__ : List[str] = [] else: current_sub_tokens.append(__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = False out_string += self.sp_model.decode(__UpperCAmelCase ) return out_string.strip() def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> List[int]: lowerCAmelCase__ : int = [self.sep_token_id] lowerCAmelCase__ : Dict = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCAmelCase ,token_ids_a=__UpperCAmelCase ,already_has_special_tokens=__UpperCAmelCase ) if token_ids_a is not None: return [1] + ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1] return [1] + ([0] * len(__UpperCAmelCase )) + [1] def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> List[int]: lowerCAmelCase__ : List[str] = [self.sep_token_id] lowerCAmelCase__ : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(__UpperCAmelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase__ : int = os.path.join( __UpperCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,__UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__UpperCAmelCase ,"""wb""" ) as fi: lowerCAmelCase__ : List[Any] = self.sp_model.serialized_model_proto() fi.write(__UpperCAmelCase ) return (out_vocab_file,)
37
1
'''simple docstring''' import numpy as np _lowerCAmelCase = [ ['''a''', '''b''', '''c''', '''d''', '''e'''], ['''f''', '''g''', '''h''', '''i''', '''k'''], ['''l''', '''m''', '''n''', '''o''', '''p'''], ['''q''', '''r''', '''s''', '''t''', '''u'''], ['''v''', '''w''', '''x''', '''y''', '''z'''], ] class lowerCAmelCase_: '''simple docstring''' def __init__( self ) -> None: lowerCAmelCase__ : Optional[Any] = np.array(__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> np.ndarray: lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = np.where(letter == self.SQUARE ) lowerCAmelCase__ : Optional[int] = np.concatenate([indexa + 1, indexa + 1] ) return indexes def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> str: lowerCAmelCase__ : List[str] = self.SQUARE[indexa - 1, indexa - 1] return letter def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str: lowerCAmelCase__ : Optional[int] = message.lower() lowerCAmelCase__ : Tuple = message.replace(""" """ ,"""""" ) lowerCAmelCase__ : Optional[int] = message.replace("""j""" ,"""i""" ) lowerCAmelCase__ : str = np.empty((2, len(__UpperCAmelCase )) ) for letter_index in range(len(__UpperCAmelCase ) ): lowerCAmelCase__ : List[str] = self.letter_to_numbers(message[letter_index] ) lowerCAmelCase__ : Any = numbers[0] lowerCAmelCase__ : Optional[Any] = numbers[1] lowerCAmelCase__ : List[str] = first_step.reshape(2 * len(__UpperCAmelCase ) ) lowerCAmelCase__ : str = """""" for numbers_index in range(len(__UpperCAmelCase ) ): lowerCAmelCase__ : Tuple = int(second_step[numbers_index * 2] ) lowerCAmelCase__ : List[str] = int(second_step[(numbers_index * 2) + 1] ) lowerCAmelCase__ : Tuple = self.numbers_to_letter(__UpperCAmelCase ,__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = encoded_message + letter return encoded_message def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str: lowerCAmelCase__ : List[Any] = message.lower() message.replace(""" """ ,"""""" ) lowerCAmelCase__ : Dict = np.empty(2 * len(__UpperCAmelCase ) ) for letter_index in range(len(__UpperCAmelCase ) ): lowerCAmelCase__ : Tuple = self.letter_to_numbers(message[letter_index] ) lowerCAmelCase__ : List[str] = numbers[0] lowerCAmelCase__ : Dict = numbers[1] lowerCAmelCase__ : List[str] = first_step.reshape((2, len(__UpperCAmelCase )) ) lowerCAmelCase__ : str = """""" for numbers_index in range(len(__UpperCAmelCase ) ): lowerCAmelCase__ : Optional[Any] = int(second_step[0, numbers_index] ) lowerCAmelCase__ : Dict = int(second_step[1, numbers_index] ) lowerCAmelCase__ : Dict = self.numbers_to_letter(__UpperCAmelCase ,__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = decoded_message + letter return decoded_message
37
'''simple docstring''' import json import os from typing import Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', } _lowerCAmelCase = { '''vocab_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'''}, '''merges_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'''}, } _lowerCAmelCase = { '''ctrl''': 256, } _lowerCAmelCase = { '''Pregnancy''': 16_8629, '''Christianity''': 7675, '''Explain''': 10_6423, '''Fitness''': 6_3440, '''Saving''': 6_3163, '''Ask''': 2_7171, '''Ass''': 9_5985, '''Joke''': 16_3509, '''Questions''': 4_5622, '''Thoughts''': 4_9605, '''Retail''': 5_2342, '''Feminism''': 16_4338, '''Writing''': 1_1992, '''Atheism''': 19_2263, '''Netflix''': 4_8616, '''Computing''': 3_9639, '''Opinion''': 4_3213, '''Alone''': 4_4967, '''Funny''': 5_8917, '''Gaming''': 4_0358, '''Human''': 4088, '''India''': 1331, '''Joker''': 7_7138, '''Diet''': 3_6206, '''Legal''': 1_1859, '''Norman''': 4939, '''Tip''': 7_2689, '''Weight''': 5_2343, '''Movies''': 4_6273, '''Running''': 2_3425, '''Science''': 2090, '''Horror''': 3_7793, '''Confession''': 6_0572, '''Finance''': 1_2250, '''Politics''': 1_6360, '''Scary''': 19_1985, '''Support''': 1_2654, '''Technologies''': 3_2516, '''Teenage''': 6_6160, '''Event''': 3_2769, '''Learned''': 6_7460, '''Notion''': 18_2770, '''Wikipedia''': 3_7583, '''Books''': 6665, '''Extract''': 7_6050, '''Confessions''': 10_2701, '''Conspiracy''': 7_5932, '''Links''': 6_3674, '''Narcissus''': 15_0425, '''Relationship''': 5_4766, '''Relationships''': 13_4796, '''Reviews''': 4_1671, '''News''': 4256, '''Translation''': 2_6820, '''multilingual''': 12_8406, } def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Optional[int] = set() lowerCAmelCase__ : str = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCAmelCase__ : List[Any] = char lowerCAmelCase__ : Optional[Any] = set(UpperCamelCase ) return pairs class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Dict = VOCAB_FILES_NAMES __lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP __lowercase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowercase : Any = CONTROL_CODES def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase="<unk>" ,**__UpperCAmelCase ) -> Optional[int]: super().__init__(unk_token=__UpperCAmelCase ,**__UpperCAmelCase ) with open(__UpperCAmelCase ,encoding="""utf-8""" ) as vocab_handle: lowerCAmelCase__ : int = json.load(__UpperCAmelCase ) lowerCAmelCase__ : Any = {v: k for k, v in self.encoder.items()} with open(__UpperCAmelCase ,encoding="""utf-8""" ) as merges_handle: lowerCAmelCase__ : Union[str, Any] = merges_handle.read().split("""\n""" )[1:-1] lowerCAmelCase__ : Optional[Any] = [tuple(merge.split() ) for merge in merges] lowerCAmelCase__ : int = dict(zip(__UpperCAmelCase ,range(len(__UpperCAmelCase ) ) ) ) lowerCAmelCase__ : List[Any] = {} @property def UpperCAmelCase_ ( self ) -> Optional[Any]: return len(self.encoder ) def UpperCAmelCase_ ( self ) -> Optional[Any]: return dict(self.encoder ,**self.added_tokens_encoder ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> int: if token in self.cache: return self.cache[token] lowerCAmelCase__ : Optional[Any] = tuple(__UpperCAmelCase ) lowerCAmelCase__ : List[str] = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] ) lowerCAmelCase__ : Any = get_pairs(__UpperCAmelCase ) if not pairs: return token while True: lowerCAmelCase__ : List[str] = min(__UpperCAmelCase ,key=lambda __UpperCAmelCase : self.bpe_ranks.get(__UpperCAmelCase ,float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = bigram lowerCAmelCase__ : List[str] = [] lowerCAmelCase__ : Dict = 0 while i < len(__UpperCAmelCase ): try: lowerCAmelCase__ : Optional[Any] = word.index(__UpperCAmelCase ,__UpperCAmelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCAmelCase__ : Dict = j if word[i] == first and i < len(__UpperCAmelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCAmelCase__ : Tuple = tuple(__UpperCAmelCase ) lowerCAmelCase__ : Tuple = new_word if len(__UpperCAmelCase ) == 1: break else: lowerCAmelCase__ : Dict = get_pairs(__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = """@@ """.join(__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = word[:-4] lowerCAmelCase__ : Optional[Any] = word return word def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Dict: lowerCAmelCase__ : Optional[int] = [] lowerCAmelCase__ : Any = re.findall(R"""\S+\n?""" ,__UpperCAmelCase ) for token in words: split_tokens.extend(list(self.bpe(__UpperCAmelCase ).split(""" """ ) ) ) return split_tokens def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]: return self.encoder.get(__UpperCAmelCase ,self.encoder.get(self.unk_token ) ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Tuple: return self.decoder.get(__UpperCAmelCase ,self.unk_token ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Tuple: lowerCAmelCase__ : Any = """ """.join(__UpperCAmelCase ).replace("""@@ """ ,"""""" ).strip() return out_string def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(__UpperCAmelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase__ : List[Any] = os.path.join( __UpperCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) lowerCAmelCase__ : Optional[int] = os.path.join( __UpperCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(__UpperCAmelCase ,"""w""" ,encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=__UpperCAmelCase ,ensure_ascii=__UpperCAmelCase ) + """\n""" ) lowerCAmelCase__ : int = 0 with open(__UpperCAmelCase ,"""w""" ,encoding="""utf-8""" ) as writer: writer.write("""#version: 0.2\n""" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda __UpperCAmelCase : kv[1] ): if index != token_index: logger.warning( F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" """ Please check that the tokenizer is not corrupted!""" ) lowerCAmelCase__ : Dict = token_index writer.write(""" """.join(__UpperCAmelCase ) + """\n""" ) index += 1 return vocab_file, merge_file # def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True): # filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)) # tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens) # tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far) # return ''.join(tokens_generated_so_far)
37
1
'''simple docstring''' import functools def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" if not isinstance(UpperCamelCase , UpperCamelCase ) or not all(isinstance(UpperCamelCase , UpperCamelCase ) for day in days ): raise ValueError("""The parameter days should be a list of integers""" ) if len(UpperCamelCase ) != 3 or not all(isinstance(UpperCamelCase , UpperCamelCase ) for cost in costs ): raise ValueError("""The parameter costs should be a list of three integers""" ) if len(UpperCamelCase ) == 0: return 0 if min(UpperCamelCase ) <= 0: raise ValueError("""All days elements should be greater than 0""" ) if max(UpperCamelCase ) >= 366: raise ValueError("""All days elements should be less than 366""" ) lowerCAmelCase__ : Any = set(UpperCamelCase ) @functools.cache def dynamic_programming(UpperCamelCase ) -> int: if index > 365: return 0 if index not in days_set: return dynamic_programming(index + 1 ) return min( costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , ) return dynamic_programming(1 ) if __name__ == "__main__": import doctest doctest.testmod()
37
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( UpperCamelCase = "The quick brown fox jumps over the lazy dog" , ): """simple docstring""" lowerCAmelCase__ : str = set() # Replace all the whitespace in our sentence lowerCAmelCase__ : Tuple = input_str.replace(""" """ , """""" ) for alpha in input_str: if "a" <= alpha.lower() <= "z": frequency.add(alpha.lower() ) return len(UpperCamelCase ) == 26 def _SCREAMING_SNAKE_CASE ( UpperCamelCase = "The quick brown fox jumps over the lazy dog" , ): """simple docstring""" lowerCAmelCase__ : Any = [False] * 26 for char in input_str: if char.islower(): lowerCAmelCase__ : Optional[Any] = True elif char.isupper(): lowerCAmelCase__ : Any = True return all(UpperCamelCase ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase = "The quick brown fox jumps over the lazy dog" , ): """simple docstring""" return len({char for char in input_str.lower() if char.isalpha()} ) == 26 def _SCREAMING_SNAKE_CASE ( ): """simple docstring""" from timeit import timeit lowerCAmelCase__ : Union[str, Any] = """from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest""" print(timeit("""is_pangram()""" , setup=UpperCamelCase ) ) print(timeit("""is_pangram_faster()""" , setup=UpperCamelCase ) ) print(timeit("""is_pangram_fastest()""" , setup=UpperCamelCase ) ) # 5.348480500048026, 2.6477354579837993, 1.8470395830227062 # 5.036091582966037, 2.644472333951853, 1.8869528750656173 if __name__ == "__main__": import doctest doctest.testmod() benchmark()
37
1
'''simple docstring''' import random def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Optional[Any] = a[left_index] lowerCAmelCase__ : Tuple = left_index + 1 for j in range(left_index + 1 , UpperCamelCase ): if a[j] < pivot: lowerCAmelCase__ , lowerCAmelCase__ : str = a[i], a[j] i += 1 lowerCAmelCase__ , lowerCAmelCase__ : Any = a[i - 1], a[left_index] return i - 1 def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" if left < right: lowerCAmelCase__ : Tuple = random.randint(UpperCamelCase , right - 1 ) lowerCAmelCase__ , lowerCAmelCase__ : Tuple = ( a[left], a[pivot], ) # switches the pivot with the left most bound lowerCAmelCase__ : Optional[Any] = partition(UpperCamelCase , UpperCamelCase , UpperCamelCase ) quick_sort_random( UpperCamelCase , UpperCamelCase , UpperCamelCase ) # recursive quicksort to the left of the pivot point quick_sort_random( UpperCamelCase , pivot_index + 1 , UpperCamelCase ) # recursive quicksort to the right of the pivot point def _SCREAMING_SNAKE_CASE ( ): """simple docstring""" lowerCAmelCase__ : Optional[int] = input("""Enter numbers separated by a comma:\n""" ).strip() lowerCAmelCase__ : List[str] = [int(UpperCamelCase ) for item in user_input.split(""",""" )] quick_sort_random(UpperCamelCase , 0 , len(UpperCamelCase ) ) print(UpperCamelCase ) if __name__ == "__main__": main()
37
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Tuple = abs(UpperCamelCase ) lowerCAmelCase__ : List[Any] = 0 while n > 0: res += n % 10 n //= 10 return res def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Union[str, Any] = abs(UpperCamelCase ) return n if n < 10 else n % 10 + sum_of_digits(n // 10 ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" return sum(int(UpperCamelCase ) for c in str(abs(UpperCamelCase ) ) ) def _SCREAMING_SNAKE_CASE ( ): """simple docstring""" from collections.abc import Callable from timeit import timeit def benchmark_a_function(UpperCamelCase , UpperCamelCase ) -> None: lowerCAmelCase__ : str = f"""{func.__name__}({value})""" lowerCAmelCase__ : str = timeit(f"""__main__.{call}""" , setup="""import __main__""" ) print(f"""{call:56} = {func(UpperCamelCase )} -- {timing:.4f} seconds""" ) for value in (262144, 1125899906842624, 1267650600228229401496703205376): for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact): benchmark_a_function(UpperCamelCase , UpperCamelCase ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
37
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _lowerCAmelCase = {'''configuration_glpn''': ['''GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GLPNConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = ['''GLPNFeatureExtractor'''] _lowerCAmelCase = ['''GLPNImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = [ '''GLPN_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GLPNForDepthEstimation''', '''GLPNLayer''', '''GLPNModel''', '''GLPNPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_glpn import GLPNFeatureExtractor from .image_processing_glpn import GLPNImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_glpn import ( GLPN_PRETRAINED_MODEL_ARCHIVE_LIST, GLPNForDepthEstimation, GLPNLayer, GLPNModel, GLPNPreTrainedModel, ) else: import sys _lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
37
'''simple docstring''' # coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers _lowerCAmelCase = '''3''' print('''Python version:''', sys.version) print('''transformers version:''', transformers.__version__) try: import torch print('''Torch version:''', torch.__version__) print('''Cuda available:''', torch.cuda.is_available()) print('''Cuda version:''', torch.version.cuda) print('''CuDNN version:''', torch.backends.cudnn.version()) print('''Number of GPUs available:''', torch.cuda.device_count()) print('''NCCL version:''', torch.cuda.nccl.version()) except ImportError: print('''Torch version:''', None) try: import deepspeed print('''DeepSpeed version:''', deepspeed.__version__) except ImportError: print('''DeepSpeed version:''', None) try: import tensorflow as tf print('''TensorFlow version:''', tf.__version__) print('''TF GPUs available:''', bool(tf.config.list_physical_devices('''GPU'''))) print('''Number of TF GPUs available:''', len(tf.config.list_physical_devices('''GPU'''))) except ImportError: print('''TensorFlow version:''', None)
37
1
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import add_start_docstrings _lowerCAmelCase = R''' [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: title_sep (`str`, *optional*, defaults to `" / "`): Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`]. doc_sep (`str`, *optional*, defaults to `" // "`): Separator inserted between the text of the retrieved document and the original input when calling [`RagRetriever`]. n_docs (`int`, *optional*, defaults to 5): Number of documents to retrieve. max_combined_length (`int`, *optional*, defaults to 300): Max length of contextualized input returned by [`~RagRetriever.__call__`]. retrieval_vector_size (`int`, *optional*, defaults to 768): Dimensionality of the document embeddings indexed by [`RagRetriever`]. retrieval_batch_size (`int`, *optional*, defaults to 8): Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated [`RagRetriever`]. dataset (`str`, *optional*, defaults to `"wiki_dpr"`): A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids using `datasets.list_datasets()`). dataset_split (`str`, *optional*, defaults to `"train"`) Which split of the `dataset` to load. index_name (`str`, *optional*, defaults to `"compressed"`) The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and `"compressed"`. index_path (`str`, *optional*) The path to the serialized faiss index on disk. passages_path (`str`, *optional*): A path to text passages compatible with the faiss index. Required if using [`~models.rag.retrieval_rag.LegacyIndex`] use_dummy_dataset (`bool`, *optional*, defaults to `False`) Whether to load a "dummy" variant of the dataset specified by `dataset`. label_smoothing (`float`, *optional*, defaults to 0.0): Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing in the loss calculation. If set to 0, no label smoothing is performed. do_marginalize (`bool`, *optional*, defaults to `False`): If `True`, the logits are marginalized over all documents by making use of `torch.nn.functional.log_softmax`. reduce_loss (`bool`, *optional*, defaults to `False`): Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation. do_deduplication (`bool`, *optional*, defaults to `True`): Whether or not to deduplicate the generations from different context documents for a given input. Has to be set to `False` if used while training with distributed backend. exclude_bos_score (`bool`, *optional*, defaults to `False`): Whether or not to disregard the BOS token when computing the loss. output_retrieved(`bool`, *optional*, defaults to `False`): If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and `context_attention_mask` are returned. See returned tensors for more detail. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). forced_eos_token_id (`int`, *optional*): The id of the token to force as the last generated token when `max_length` is reached. Usually set to `eos_token_id`. ''' @add_start_docstrings(SCREAMING_SNAKE_CASE_ ) class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : str = '''rag''' __lowercase : Any = True def __init__( self ,__UpperCAmelCase=None ,__UpperCAmelCase=True ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=" / " ,__UpperCAmelCase=" // " ,__UpperCAmelCase=5 ,__UpperCAmelCase=300 ,__UpperCAmelCase=768 ,__UpperCAmelCase=8 ,__UpperCAmelCase="wiki_dpr" ,__UpperCAmelCase="train" ,__UpperCAmelCase="compressed" ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=True ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=True ,__UpperCAmelCase=None ,**__UpperCAmelCase ,) -> Optional[int]: super().__init__( bos_token_id=__UpperCAmelCase ,pad_token_id=__UpperCAmelCase ,eos_token_id=__UpperCAmelCase ,decoder_start_token_id=__UpperCAmelCase ,forced_eos_token_id=__UpperCAmelCase ,is_encoder_decoder=__UpperCAmelCase ,prefix=__UpperCAmelCase ,vocab_size=__UpperCAmelCase ,**__UpperCAmelCase ,) assert ( "question_encoder" in kwargs and "generator" in kwargs ), "Config has to be initialized with question_encoder and generator config" lowerCAmelCase__ : List[str] = kwargs.pop("""question_encoder""" ) lowerCAmelCase__ : Dict = question_encoder_config.pop("""model_type""" ) lowerCAmelCase__ : Optional[Any] = kwargs.pop("""generator""" ) lowerCAmelCase__ : Any = decoder_config.pop("""model_type""" ) from ..auto.configuration_auto import AutoConfig lowerCAmelCase__ : Optional[int] = AutoConfig.for_model(__UpperCAmelCase ,**__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = AutoConfig.for_model(__UpperCAmelCase ,**__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = reduce_loss lowerCAmelCase__ : Dict = label_smoothing lowerCAmelCase__ : List[Any] = exclude_bos_score lowerCAmelCase__ : Union[str, Any] = do_marginalize lowerCAmelCase__ : Tuple = title_sep lowerCAmelCase__ : Union[str, Any] = doc_sep lowerCAmelCase__ : Union[str, Any] = n_docs lowerCAmelCase__ : Union[str, Any] = max_combined_length lowerCAmelCase__ : str = dataset lowerCAmelCase__ : List[Any] = dataset_split lowerCAmelCase__ : Optional[Any] = index_name lowerCAmelCase__ : Dict = retrieval_vector_size lowerCAmelCase__ : Tuple = retrieval_batch_size lowerCAmelCase__ : Optional[Any] = passages_path lowerCAmelCase__ : Union[str, Any] = index_path lowerCAmelCase__ : Dict = use_dummy_dataset lowerCAmelCase__ : Optional[int] = output_retrieved lowerCAmelCase__ : int = do_deduplication lowerCAmelCase__ : Optional[Any] = use_cache if self.forced_eos_token_id is None: lowerCAmelCase__ : Tuple = getattr(self.generator ,"""forced_eos_token_id""" ,__UpperCAmelCase ) @classmethod def UpperCAmelCase_ ( cls ,__UpperCAmelCase ,__UpperCAmelCase ,**__UpperCAmelCase ) -> PretrainedConfig: return cls(question_encoder=question_encoder_config.to_dict() ,generator=generator_config.to_dict() ,**__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> str: lowerCAmelCase__ : Optional[int] = copy.deepcopy(self.__dict__ ) lowerCAmelCase__ : Optional[Any] = self.question_encoder.to_dict() lowerCAmelCase__ : List[Any] = self.generator.to_dict() lowerCAmelCase__ : Tuple = self.__class__.model_type return output
37
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { '''facebook/xlm-roberta-xl''': '''https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json''', '''facebook/xlm-roberta-xxl''': '''https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json''', # See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl } class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : int = '''xlm-roberta-xl''' def __init__( self ,__UpperCAmelCase=25_0880 ,__UpperCAmelCase=2560 ,__UpperCAmelCase=36 ,__UpperCAmelCase=32 ,__UpperCAmelCase=1_0240 ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=514 ,__UpperCAmelCase=1 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=1E-05 ,__UpperCAmelCase=1 ,__UpperCAmelCase=0 ,__UpperCAmelCase=2 ,__UpperCAmelCase="absolute" ,__UpperCAmelCase=True ,__UpperCAmelCase=None ,**__UpperCAmelCase ,) -> str: super().__init__(pad_token_id=__UpperCAmelCase ,bos_token_id=__UpperCAmelCase ,eos_token_id=__UpperCAmelCase ,**__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = vocab_size lowerCAmelCase__ : int = hidden_size lowerCAmelCase__ : int = num_hidden_layers lowerCAmelCase__ : str = num_attention_heads lowerCAmelCase__ : int = hidden_act lowerCAmelCase__ : Dict = intermediate_size lowerCAmelCase__ : List[Any] = hidden_dropout_prob lowerCAmelCase__ : str = attention_probs_dropout_prob lowerCAmelCase__ : Optional[int] = max_position_embeddings lowerCAmelCase__ : List[str] = type_vocab_size lowerCAmelCase__ : List[Any] = initializer_range lowerCAmelCase__ : Tuple = layer_norm_eps lowerCAmelCase__ : int = position_embedding_type lowerCAmelCase__ : Optional[Any] = use_cache lowerCAmelCase__ : Optional[Any] = classifier_dropout class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' @property def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": lowerCAmelCase__ : Dict = {0: """batch""", 1: """choice""", 2: """sequence"""} else: lowerCAmelCase__ : Any = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
37
1
'''simple docstring''' class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' pass class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' pass class lowerCAmelCase_: '''simple docstring''' def __init__( self ) -> Dict: lowerCAmelCase__ : Union[str, Any] = [ [], [], [], ] def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> None: try: if len(self.queues[priority] ) >= 100: raise OverflowError("""Maximum queue size is 100""" ) self.queues[priority].append(__UpperCAmelCase ) except IndexError: raise ValueError("""Valid priorities are 0, 1, and 2""" ) def UpperCAmelCase_ ( self ) -> int: for queue in self.queues: if queue: return queue.pop(0 ) raise UnderFlowError("""All queues are empty""" ) def __str__( self ) -> str: return "\n".join(F"""Priority {i}: {q}""" for i, q in enumerate(self.queues ) ) class lowerCAmelCase_: '''simple docstring''' def __init__( self ) -> List[Any]: lowerCAmelCase__ : List[Any] = [] def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> None: if len(self.queue ) == 100: raise OverFlowError("""Maximum queue size is 100""" ) self.queue.append(__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> int: if not self.queue: raise UnderFlowError("""The queue is empty""" ) else: lowerCAmelCase__ : Optional[int] = min(self.queue ) self.queue.remove(__UpperCAmelCase ) return data def __str__( self ) -> str: return str(self.queue ) def _SCREAMING_SNAKE_CASE ( ): """simple docstring""" lowerCAmelCase__ : List[str] = FixedPriorityQueue() fpq.enqueue(0 , 10 ) fpq.enqueue(1 , 70 ) fpq.enqueue(0 , 100 ) fpq.enqueue(2 , 1 ) fpq.enqueue(2 , 5 ) fpq.enqueue(1 , 7 ) fpq.enqueue(2 , 4 ) fpq.enqueue(1 , 64 ) fpq.enqueue(0 , 128 ) print(UpperCamelCase ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(UpperCamelCase ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) def _SCREAMING_SNAKE_CASE ( ): """simple docstring""" lowerCAmelCase__ : Optional[int] = ElementPriorityQueue() epq.enqueue(10 ) epq.enqueue(70 ) epq.enqueue(100 ) epq.enqueue(1 ) epq.enqueue(5 ) epq.enqueue(7 ) epq.enqueue(4 ) epq.enqueue(64 ) epq.enqueue(128 ) print(UpperCamelCase ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(UpperCamelCase ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) if __name__ == "__main__": fixed_priority_queue() element_priority_queue()
37
'''simple docstring''' from __future__ import annotations import math from collections import Counter from string import ascii_lowercase def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ , lowerCAmelCase__ : List[str] = analyze_text(UpperCamelCase ) lowerCAmelCase__ : Optional[int] = list(""" """ + ascii_lowercase ) # what is our total sum of probabilities. lowerCAmelCase__ : List[Any] = sum(single_char_strings.values() ) # one length string lowerCAmelCase__ : Optional[int] = 0 # for each alpha we go in our dict and if it is in it we calculate entropy for ch in my_alphas: if ch in single_char_strings: lowerCAmelCase__ : List[Any] = single_char_strings[ch] lowerCAmelCase__ : List[Any] = my_str / all_sum my_fir_sum += prob * math.loga(UpperCamelCase ) # entropy formula. # print entropy print(f"""{round(-1 * my_fir_sum ):.1f}""" ) # two len string lowerCAmelCase__ : Dict = sum(two_char_strings.values() ) lowerCAmelCase__ : int = 0 # for each alpha (two in size) calculate entropy. for cha in my_alphas: for cha in my_alphas: lowerCAmelCase__ : Union[str, Any] = cha + cha if sequence in two_char_strings: lowerCAmelCase__ : Dict = two_char_strings[sequence] lowerCAmelCase__ : Tuple = int(UpperCamelCase ) / all_sum my_sec_sum += prob * math.loga(UpperCamelCase ) # print second entropy print(f"""{round(-1 * my_sec_sum ):.1f}""" ) # print the difference between them print(f"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Optional[Any] = Counter() # type: ignore lowerCAmelCase__ : Tuple = Counter() # type: ignore single_char_strings[text[-1]] += 1 # first case when we have space at start. two_char_strings[" " + text[0]] += 1 for i in range(0 , len(UpperCamelCase ) - 1 ): single_char_strings[text[i]] += 1 two_char_strings[text[i : i + 2]] += 1 return single_char_strings, two_char_strings def _SCREAMING_SNAKE_CASE ( ): """simple docstring""" import doctest doctest.testmod() # text = ( # "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark " # "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest " # "jointure saw horrible. He private he on be imagine suppose. Fertile " # "beloved evident through no service elderly is. Blind there if every no so " # "at. Own neglected you preferred way sincerity delivered his attempted. To " # "of message cottage windows do besides against uncivil. Delightful " # "unreserved impossible few estimating men favourable see entreaties. She " # "propriety immediate was improving. He or entrance humoured likewise " # "moderate. Much nor game son say feel. Fat make met can must form into " # "gate. Me we offending prevailed discovery. " # ) # calculate_prob(text) if __name__ == "__main__": main()
37
1
'''simple docstring''' from math import factorial _lowerCAmelCase = {str(d): factorial(d) for d in range(10)} def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" return sum(DIGIT_FACTORIAL[d] for d in str(UpperCamelCase ) ) def _SCREAMING_SNAKE_CASE ( ): """simple docstring""" lowerCAmelCase__ : List[str] = 7 * factorial(9 ) + 1 return sum(i for i in range(3 , UpperCamelCase ) if sum_of_digit_factorial(UpperCamelCase ) == i ) if __name__ == "__main__": print(F"""{solution() = }""")
37
'''simple docstring''' import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase=1 ): """simple docstring""" if n_shave_prefix_segments >= 0: return ".".join(path.split(""".""" )[n_shave_prefix_segments:] ) else: return ".".join(path.split(""".""" )[:n_shave_prefix_segments] ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase=0 ): """simple docstring""" lowerCAmelCase__ : Union[str, Any] = [] for old_item in old_list: lowerCAmelCase__ : Optional[Any] = old_item.replace("""in_layers.0""" , """norm1""" ) lowerCAmelCase__ : Optional[int] = new_item.replace("""in_layers.2""" , """conv1""" ) lowerCAmelCase__ : Dict = new_item.replace("""out_layers.0""" , """norm2""" ) lowerCAmelCase__ : str = new_item.replace("""out_layers.3""" , """conv2""" ) lowerCAmelCase__ : str = new_item.replace("""emb_layers.1""" , """time_emb_proj""" ) lowerCAmelCase__ : Optional[Any] = new_item.replace("""skip_connection""" , """conv_shortcut""" ) lowerCAmelCase__ : Union[str, Any] = shave_segments(UpperCamelCase , n_shave_prefix_segments=UpperCamelCase ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase=0 ): """simple docstring""" lowerCAmelCase__ : int = [] for old_item in old_list: lowerCAmelCase__ : List[str] = old_item lowerCAmelCase__ : int = new_item.replace("""norm.weight""" , """group_norm.weight""" ) lowerCAmelCase__ : Optional[Any] = new_item.replace("""norm.bias""" , """group_norm.bias""" ) lowerCAmelCase__ : Optional[Any] = new_item.replace("""proj_out.weight""" , """proj_attn.weight""" ) lowerCAmelCase__ : int = new_item.replace("""proj_out.bias""" , """proj_attn.bias""" ) lowerCAmelCase__ : str = shave_segments(UpperCamelCase , n_shave_prefix_segments=UpperCamelCase ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): lowerCAmelCase__ : Any = old_checkpoint[path] lowerCAmelCase__ : int = old_tensor.shape[0] // 3 lowerCAmelCase__ : int = (-1, channels) if len(old_tensor.shape ) == 3 else (-1) lowerCAmelCase__ : Tuple = old_tensor.shape[0] // config["""num_head_channels"""] // 3 lowerCAmelCase__ : List[Any] = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = old_tensor.split(channels // num_heads , dim=1 ) lowerCAmelCase__ : int = query.reshape(UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = key.reshape(UpperCamelCase ) lowerCAmelCase__ : Optional[int] = value.reshape(UpperCamelCase ) for path in paths: lowerCAmelCase__ : Any = path["""new"""] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here lowerCAmelCase__ : Any = new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" ) lowerCAmelCase__ : Any = new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" ) lowerCAmelCase__ : Any = new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" ) if additional_replacements is not None: for replacement in additional_replacements: lowerCAmelCase__ : Any = new_path.replace(replacement["""old"""] , replacement["""new"""] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: lowerCAmelCase__ : List[Any] = old_checkpoint[path["""old"""]][:, :, 0] else: lowerCAmelCase__ : Dict = old_checkpoint[path["""old"""]] def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : str = {} lowerCAmelCase__ : str = checkpoint["""time_embed.0.weight"""] lowerCAmelCase__ : List[Any] = checkpoint["""time_embed.0.bias"""] lowerCAmelCase__ : int = checkpoint["""time_embed.2.weight"""] lowerCAmelCase__ : List[str] = checkpoint["""time_embed.2.bias"""] lowerCAmelCase__ : str = checkpoint["""input_blocks.0.0.weight"""] lowerCAmelCase__ : Any = checkpoint["""input_blocks.0.0.bias"""] lowerCAmelCase__ : Union[str, Any] = checkpoint["""out.0.weight"""] lowerCAmelCase__ : Union[str, Any] = checkpoint["""out.0.bias"""] lowerCAmelCase__ : str = checkpoint["""out.2.weight"""] lowerCAmelCase__ : Tuple = checkpoint["""out.2.bias"""] # Retrieves the keys for the input blocks only lowerCAmelCase__ : Optional[Any] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} ) lowerCAmelCase__ : Optional[Any] = { layer_id: [key for key in checkpoint if f"""input_blocks.{layer_id}""" in key] for layer_id in range(UpperCamelCase ) } # Retrieves the keys for the middle blocks only lowerCAmelCase__ : Union[str, Any] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} ) lowerCAmelCase__ : Union[str, Any] = { layer_id: [key for key in checkpoint if f"""middle_block.{layer_id}""" in key] for layer_id in range(UpperCamelCase ) } # Retrieves the keys for the output blocks only lowerCAmelCase__ : List[str] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} ) lowerCAmelCase__ : List[Any] = { layer_id: [key for key in checkpoint if f"""output_blocks.{layer_id}""" in key] for layer_id in range(UpperCamelCase ) } for i in range(1 , UpperCamelCase ): lowerCAmelCase__ : Dict = (i - 1) // (config["""num_res_blocks"""] + 1) lowerCAmelCase__ : Tuple = (i - 1) % (config["""num_res_blocks"""] + 1) lowerCAmelCase__ : Optional[int] = [key for key in input_blocks[i] if f"""input_blocks.{i}.0""" in key] lowerCAmelCase__ : Optional[Any] = [key for key in input_blocks[i] if f"""input_blocks.{i}.1""" in key] if f"""input_blocks.{i}.0.op.weight""" in checkpoint: lowerCAmelCase__ : Optional[int] = checkpoint[ f"""input_blocks.{i}.0.op.weight""" ] lowerCAmelCase__ : Tuple = checkpoint[ f"""input_blocks.{i}.0.op.bias""" ] continue lowerCAmelCase__ : Optional[Any] = renew_resnet_paths(UpperCamelCase ) lowerCAmelCase__ : Dict = {"""old""": f"""input_blocks.{i}.0""", """new""": f"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""} lowerCAmelCase__ : Optional[Any] = {"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""} assign_to_checkpoint( UpperCamelCase , UpperCamelCase , UpperCamelCase , additional_replacements=[meta_path, resnet_op] , config=UpperCamelCase ) if len(UpperCamelCase ): lowerCAmelCase__ : Optional[Any] = renew_attention_paths(UpperCamelCase ) lowerCAmelCase__ : Tuple = { """old""": f"""input_blocks.{i}.1""", """new""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}""", } lowerCAmelCase__ : List[str] = { f"""input_blocks.{i}.1.qkv.bias""": { """key""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""", """query""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""", """value""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""", }, f"""input_blocks.{i}.1.qkv.weight""": { """key""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""", """query""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""", """value""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""", }, } assign_to_checkpoint( UpperCamelCase , UpperCamelCase , UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=UpperCamelCase , config=UpperCamelCase , ) lowerCAmelCase__ : Dict = middle_blocks[0] lowerCAmelCase__ : Union[str, Any] = middle_blocks[1] lowerCAmelCase__ : Dict = middle_blocks[2] lowerCAmelCase__ : Any = renew_resnet_paths(UpperCamelCase ) assign_to_checkpoint(UpperCamelCase , UpperCamelCase , UpperCamelCase , config=UpperCamelCase ) lowerCAmelCase__ : Dict = renew_resnet_paths(UpperCamelCase ) assign_to_checkpoint(UpperCamelCase , UpperCamelCase , UpperCamelCase , config=UpperCamelCase ) lowerCAmelCase__ : Optional[int] = renew_attention_paths(UpperCamelCase ) lowerCAmelCase__ : Optional[int] = { """middle_block.1.qkv.bias""": { """key""": """mid_block.attentions.0.key.bias""", """query""": """mid_block.attentions.0.query.bias""", """value""": """mid_block.attentions.0.value.bias""", }, """middle_block.1.qkv.weight""": { """key""": """mid_block.attentions.0.key.weight""", """query""": """mid_block.attentions.0.query.weight""", """value""": """mid_block.attentions.0.value.weight""", }, } assign_to_checkpoint( UpperCamelCase , UpperCamelCase , UpperCamelCase , attention_paths_to_split=UpperCamelCase , config=UpperCamelCase ) for i in range(UpperCamelCase ): lowerCAmelCase__ : Tuple = i // (config["""num_res_blocks"""] + 1) lowerCAmelCase__ : List[str] = i % (config["""num_res_blocks"""] + 1) lowerCAmelCase__ : int = [shave_segments(UpperCamelCase , 2 ) for name in output_blocks[i]] lowerCAmelCase__ : Union[str, Any] = {} for layer in output_block_layers: lowerCAmelCase__ , lowerCAmelCase__ : Any = layer.split(""".""" )[0], shave_segments(UpperCamelCase , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(UpperCamelCase ) else: lowerCAmelCase__ : str = [layer_name] if len(UpperCamelCase ) > 1: lowerCAmelCase__ : str = [key for key in output_blocks[i] if f"""output_blocks.{i}.0""" in key] lowerCAmelCase__ : Dict = [key for key in output_blocks[i] if f"""output_blocks.{i}.1""" in key] lowerCAmelCase__ : Optional[int] = renew_resnet_paths(UpperCamelCase ) lowerCAmelCase__ : int = renew_resnet_paths(UpperCamelCase ) lowerCAmelCase__ : Optional[int] = {"""old""": f"""output_blocks.{i}.0""", """new""": f"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""} assign_to_checkpoint(UpperCamelCase , UpperCamelCase , UpperCamelCase , additional_replacements=[meta_path] , config=UpperCamelCase ) if ["conv.weight", "conv.bias"] in output_block_list.values(): lowerCAmelCase__ : List[Any] = list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] ) lowerCAmelCase__ : int = checkpoint[ f"""output_blocks.{i}.{index}.conv.weight""" ] lowerCAmelCase__ : int = checkpoint[ f"""output_blocks.{i}.{index}.conv.bias""" ] # Clear attentions as they have been attributed above. if len(UpperCamelCase ) == 2: lowerCAmelCase__ : Tuple = [] if len(UpperCamelCase ): lowerCAmelCase__ : Dict = renew_attention_paths(UpperCamelCase ) lowerCAmelCase__ : Tuple = { """old""": f"""output_blocks.{i}.1""", """new""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}""", } lowerCAmelCase__ : Tuple = { f"""output_blocks.{i}.1.qkv.bias""": { """key""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""", """query""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""", """value""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""", }, f"""output_blocks.{i}.1.qkv.weight""": { """key""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""", """query""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""", """value""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""", }, } assign_to_checkpoint( UpperCamelCase , UpperCamelCase , UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=UpperCamelCase , ) else: lowerCAmelCase__ : int = renew_resnet_paths(UpperCamelCase , n_shave_prefix_segments=1 ) for path in resnet_0_paths: lowerCAmelCase__ : Tuple = """.""".join(["""output_blocks""", str(UpperCamelCase ), path["""old"""]] ) lowerCAmelCase__ : List[Any] = """.""".join(["""up_blocks""", str(UpperCamelCase ), """resnets""", str(UpperCamelCase ), path["""new"""]] ) lowerCAmelCase__ : Union[str, Any] = checkpoint[old_path] return new_checkpoint if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the architecture.''', ) parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') _lowerCAmelCase = parser.parse_args() _lowerCAmelCase = torch.load(args.checkpoint_path) with open(args.config_file) as f: _lowerCAmelCase = json.loads(f.read()) _lowerCAmelCase = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] _lowerCAmelCase = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: _lowerCAmelCase = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) _lowerCAmelCase = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) _lowerCAmelCase = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
37
1
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" if not grid or not grid[0]: raise TypeError("""The grid does not contain the appropriate information""" ) for cell_n in range(1 , len(grid[0] ) ): grid[0][cell_n] += grid[0][cell_n - 1] lowerCAmelCase__ : int = grid[0] for row_n in range(1 , len(UpperCamelCase ) ): lowerCAmelCase__ : int = grid[row_n] lowerCAmelCase__ : Optional[int] = fill_row(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = grid[row_n] return grid[-1][-1] def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" current_row[0] += row_above[0] for cell_n in range(1 , len(UpperCamelCase ) ): current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] ) return current_row if __name__ == "__main__": import doctest doctest.testmod()
37
'''simple docstring''' from math import sqrt def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and ( number >= 0 ), "'number' must been an int and positive" lowerCAmelCase__ : int = True # 0 and 1 are none primes. if number <= 1: lowerCAmelCase__ : Optional[Any] = False for divisor in range(2 , int(round(sqrt(UpperCamelCase ) ) ) + 1 ): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: lowerCAmelCase__ : Any = False break # precondition assert isinstance(UpperCamelCase , UpperCamelCase ), "'status' must been from type bool" return status def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N lowerCAmelCase__ : List[str] = list(range(2 , n + 1 ) ) lowerCAmelCase__ : str = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(UpperCamelCase ) ): for j in range(i + 1 , len(UpperCamelCase ) ): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): lowerCAmelCase__ : List[Any] = 0 # filters actual prime numbers. lowerCAmelCase__ : List[Any] = [x for x in begin_list if x != 0] # precondition assert isinstance(UpperCamelCase , UpperCamelCase ), "'ans' must been from type list" return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and (n > 2), "'N' must been an int and > 2" lowerCAmelCase__ : List[str] = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2 , n + 1 ): if is_prime(UpperCamelCase ): ans.append(UpperCamelCase ) # precondition assert isinstance(UpperCamelCase , UpperCamelCase ), "'ans' must been from type list" return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and number >= 0, "'number' must been an int and >= 0" lowerCAmelCase__ : Optional[Any] = [] # this list will be returns of the function. # potential prime number factors. lowerCAmelCase__ : Dict = 2 lowerCAmelCase__ : Dict = number if number == 0 or number == 1: ans.append(UpperCamelCase ) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(UpperCamelCase ): while quotient != 1: if is_prime(UpperCamelCase ) and (quotient % factor == 0): ans.append(UpperCamelCase ) quotient /= factor else: factor += 1 else: ans.append(UpperCamelCase ) # precondition assert isinstance(UpperCamelCase , UpperCamelCase ), "'ans' must been from type list" return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and ( number >= 0 ), "'number' bust been an int and >= 0" lowerCAmelCase__ : Optional[int] = 0 # prime factorization of 'number' lowerCAmelCase__ : List[str] = prime_factorization(UpperCamelCase ) lowerCAmelCase__ : Any = max(UpperCamelCase ) # precondition assert isinstance(UpperCamelCase , UpperCamelCase ), "'ans' must been from type int" return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and ( number >= 0 ), "'number' bust been an int and >= 0" lowerCAmelCase__ : List[Any] = 0 # prime factorization of 'number' lowerCAmelCase__ : List[str] = prime_factorization(UpperCamelCase ) lowerCAmelCase__ : Optional[int] = min(UpperCamelCase ) # precondition assert isinstance(UpperCamelCase , UpperCamelCase ), "'ans' must been from type int" return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ), "'number' must been an int" assert isinstance(number % 2 == 0 , UpperCamelCase ), "compare bust been from type bool" return number % 2 == 0 def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ), "'number' must been an int" assert isinstance(number % 2 != 0 , UpperCamelCase ), "compare bust been from type bool" return number % 2 != 0 def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert ( isinstance(UpperCamelCase , UpperCamelCase ) and (number > 2) and is_even(UpperCamelCase ) ), "'number' must been an int, even and > 2" lowerCAmelCase__ : Dict = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' lowerCAmelCase__ : Dict = get_prime_numbers(UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = len(UpperCamelCase ) # run variable for while-loops. lowerCAmelCase__ : List[str] = 0 lowerCAmelCase__ : List[Any] = None # exit variable. for break up the loops lowerCAmelCase__ : Any = True while i < len_pn and loop: lowerCAmelCase__ : List[Any] = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: lowerCAmelCase__ : Optional[Any] = False ans.append(prime_numbers[i] ) ans.append(prime_numbers[j] ) j += 1 i += 1 # precondition assert ( isinstance(UpperCamelCase , UpperCamelCase ) and (len(UpperCamelCase ) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0] ) and is_prime(ans[1] ) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" assert ( isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase ) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." lowerCAmelCase__ : int = 0 while numbera != 0: lowerCAmelCase__ : Any = numbera % numbera lowerCAmelCase__ : str = numbera lowerCAmelCase__ : List[str] = rest # precondition assert isinstance(UpperCamelCase , UpperCamelCase ) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" assert ( isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase ) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." lowerCAmelCase__ : int = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' lowerCAmelCase__ : int = prime_factorization(UpperCamelCase ) lowerCAmelCase__ : Any = prime_factorization(UpperCamelCase ) elif numbera == 1 or numbera == 1: lowerCAmelCase__ : Optional[Any] = [] lowerCAmelCase__ : Dict = [] lowerCAmelCase__ : List[str] = max(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : Tuple = 0 lowerCAmelCase__ : str = 0 lowerCAmelCase__ : List[Any] = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: lowerCAmelCase__ : int = prime_fac_a.count(UpperCamelCase ) lowerCAmelCase__ : Any = prime_fac_a.count(UpperCamelCase ) for _ in range(max(UpperCamelCase , UpperCamelCase ) ): ans *= n else: lowerCAmelCase__ : Any = prime_fac_a.count(UpperCamelCase ) for _ in range(UpperCamelCase ): ans *= n done.append(UpperCamelCase ) # iterates through primeFac2 for n in prime_fac_a: if n not in done: lowerCAmelCase__ : Optional[int] = prime_fac_a.count(UpperCamelCase ) for _ in range(UpperCamelCase ): ans *= n done.append(UpperCamelCase ) # precondition assert isinstance(UpperCamelCase , UpperCamelCase ) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and (n >= 0), "'number' must been a positive int" lowerCAmelCase__ : Optional[Any] = 0 lowerCAmelCase__ : Tuple = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(UpperCamelCase ): ans += 1 # precondition assert isinstance(UpperCamelCase , UpperCamelCase ) and is_prime( UpperCamelCase ), "'ans' must been a prime number and from type int" return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" assert ( is_prime(UpperCamelCase ) and is_prime(UpperCamelCase ) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" lowerCAmelCase__ : Dict = p_number_a + 1 # jump to the next number lowerCAmelCase__ : List[Any] = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(UpperCamelCase ): number += 1 while number < p_number_a: ans.append(UpperCamelCase ) number += 1 # fetch the next prime number. while not is_prime(UpperCamelCase ): number += 1 # precondition assert ( isinstance(UpperCamelCase , UpperCamelCase ) and ans[0] != p_number_a and ans[len(UpperCamelCase ) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and (n >= 1), "'n' must been int and >= 1" lowerCAmelCase__ : List[Any] = [] # will be returned. for divisor in range(1 , n + 1 ): if n % divisor == 0: ans.append(UpperCamelCase ) # precondition assert ans[0] == 1 and ans[len(UpperCamelCase ) - 1] == n, "Error in function getDivisiors(...)" return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and ( number > 1 ), "'number' must been an int and >= 1" lowerCAmelCase__ : Optional[int] = get_divisors(UpperCamelCase ) # precondition assert ( isinstance(UpperCamelCase , UpperCamelCase ) and (divisors[0] == 1) and (divisors[len(UpperCamelCase ) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1] ) == number def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" assert ( isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase ) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. lowerCAmelCase__ : int = gcd(abs(UpperCamelCase ) , abs(UpperCamelCase ) ) # precondition assert ( isinstance(UpperCamelCase , UpperCamelCase ) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and (n >= 0), "'n' must been a int and >= 0" lowerCAmelCase__ : str = 1 # this will be return. for factor in range(1 , n + 1 ): ans *= factor return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and (n >= 0), "'n' must been an int and >= 0" lowerCAmelCase__ : List[Any] = 0 lowerCAmelCase__ : Any = 1 lowerCAmelCase__ : Optional[Any] = 1 # this will be return for _ in range(n - 1 ): lowerCAmelCase__ : Dict = ans ans += fiba lowerCAmelCase__ : str = tmp return ans
37
1
'''simple docstring''' # coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers _lowerCAmelCase = '''3''' print('''Python version:''', sys.version) print('''transformers version:''', transformers.__version__) try: import torch print('''Torch version:''', torch.__version__) print('''Cuda available:''', torch.cuda.is_available()) print('''Cuda version:''', torch.version.cuda) print('''CuDNN version:''', torch.backends.cudnn.version()) print('''Number of GPUs available:''', torch.cuda.device_count()) print('''NCCL version:''', torch.cuda.nccl.version()) except ImportError: print('''Torch version:''', None) try: import deepspeed print('''DeepSpeed version:''', deepspeed.__version__) except ImportError: print('''DeepSpeed version:''', None) try: import tensorflow as tf print('''TensorFlow version:''', tf.__version__) print('''TF GPUs available:''', bool(tf.config.list_physical_devices('''GPU'''))) print('''Number of TF GPUs available:''', len(tf.config.list_physical_devices('''GPU'''))) except ImportError: print('''TensorFlow version:''', None)
37
'''simple docstring''' from sklearn.metrics import fa_score, matthews_corrcoef import datasets from .record_evaluation import evaluate as evaluate_record _lowerCAmelCase = '''\ @article{wang2019superglue, title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems}, author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R}, journal={arXiv preprint arXiv:1905.00537}, year={2019} } ''' _lowerCAmelCase = '''\ SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after GLUE with a new set of more difficult language understanding tasks, improved resources, and a new public leaderboard. ''' _lowerCAmelCase = ''' Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset. Args: predictions: list of predictions to score. Depending on the SuperGlUE subset: - for \'record\': list of question-answer dictionaries with the following keys: - \'idx\': index of the question as specified by the dataset - \'prediction_text\': the predicted answer text - for \'multirc\': list of question-answer dictionaries with the following keys: - \'idx\': index of the question-answer pair as specified by the dataset - \'prediction\': the predicted answer label - otherwise: list of predicted labels references: list of reference labels. Depending on the SuperGLUE subset: - for \'record\': list of question-answers dictionaries with the following keys: - \'idx\': index of the question as specified by the dataset - \'answers\': list of possible answers - otherwise: list of reference labels Returns: depending on the SuperGLUE subset: - for \'record\': - \'exact_match\': Exact match between answer and gold answer - \'f1\': F1 score - for \'multirc\': - \'exact_match\': Exact match between answer and gold answer - \'f1_m\': Per-question macro-F1 score - \'f1_a\': Average F1 score over all answers - for \'axb\': \'matthews_correlation\': Matthew Correlation - for \'cb\': - \'accuracy\': Accuracy - \'f1\': F1 score - for all others: - \'accuracy\': Accuracy Examples: >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"] >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\') >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0, \'f1\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\') >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}] >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'exact_match\': 1.0, \'f1\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\') >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\') >>> references = [0, 1] >>> predictions = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'matthews_correlation\': 1.0} ''' def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" return float((preds == labels).mean() ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase="binary" ): """simple docstring""" lowerCAmelCase__ : Any = simple_accuracy(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : Tuple = float(fa_score(y_true=UpperCamelCase , y_pred=UpperCamelCase , average=UpperCamelCase ) ) return { "accuracy": acc, "f1": fa, } def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : List[str] = {} for id_pred, label in zip(UpperCamelCase , UpperCamelCase ): lowerCAmelCase__ : str = f"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}""" lowerCAmelCase__ : Dict = id_pred["""prediction"""] if question_id in question_map: question_map[question_id].append((pred, label) ) else: lowerCAmelCase__ : Optional[int] = [(pred, label)] lowerCAmelCase__ , lowerCAmelCase__ : int = [], [] for question, preds_labels in question_map.items(): lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = zip(*UpperCamelCase ) lowerCAmelCase__ : List[Any] = fa_score(y_true=UpperCamelCase , y_pred=UpperCamelCase , average="""macro""" ) fas.append(UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = int(sum(pred == label for pred, label in preds_labels ) == len(UpperCamelCase ) ) ems.append(UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = float(sum(UpperCamelCase ) / len(UpperCamelCase ) ) lowerCAmelCase__ : List[Any] = sum(UpperCamelCase ) / len(UpperCamelCase ) lowerCAmelCase__ : Dict = float(fa_score(y_true=UpperCamelCase , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) ) return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a} @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_( datasets.Metric ): '''simple docstring''' def UpperCAmelCase_ ( self ) -> Optional[Any]: if self.config_name not in [ "boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg", ]: raise KeyError( """You should supply a configuration name selected in """ """[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" ) return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(self._get_feature_types() ) ,codebase_urls=[] ,reference_urls=[] ,format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None ,) def UpperCAmelCase_ ( self ) -> str: if self.config_name == "record": return { "predictions": { "idx": { "passage": datasets.Value("""int64""" ), "query": datasets.Value("""int64""" ), }, "prediction_text": datasets.Value("""string""" ), }, "references": { "idx": { "passage": datasets.Value("""int64""" ), "query": datasets.Value("""int64""" ), }, "answers": datasets.Sequence(datasets.Value("""string""" ) ), }, } elif self.config_name == "multirc": return { "predictions": { "idx": { "answer": datasets.Value("""int64""" ), "paragraph": datasets.Value("""int64""" ), "question": datasets.Value("""int64""" ), }, "prediction": datasets.Value("""int64""" ), }, "references": datasets.Value("""int64""" ), } else: return { "predictions": datasets.Value("""int64""" ), "references": datasets.Value("""int64""" ), } def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Any: if self.config_name == "axb": return {"matthews_correlation": matthews_corrcoef(__UpperCAmelCase ,__UpperCAmelCase )} elif self.config_name == "cb": return acc_and_fa(__UpperCAmelCase ,__UpperCAmelCase ,fa_avg="""macro""" ) elif self.config_name == "record": lowerCAmelCase__ : Optional[Any] = [ { """qas""": [ {"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]} for ref in references ] } ] lowerCAmelCase__ : Union[str, Any] = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions} return evaluate_record(__UpperCAmelCase ,__UpperCAmelCase )[0] elif self.config_name == "multirc": return evaluate_multirc(__UpperCAmelCase ,__UpperCAmelCase ) elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]: return {"accuracy": simple_accuracy(__UpperCAmelCase ,__UpperCAmelCase )} else: raise KeyError( """You should supply a configuration name selected in """ """[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
37
1
'''simple docstring''' from __future__ import annotations from math import pi, sqrt def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" if inductance <= 0: raise ValueError("""Inductance cannot be 0 or negative""" ) elif capacitance <= 0: raise ValueError("""Capacitance cannot be 0 or negative""" ) else: return ( "Resonant frequency", float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ), ) if __name__ == "__main__": import doctest doctest.testmod()
37
'''simple docstring''' import unittest from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryDirectory from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline from transformers.convert_graph_to_onnx import ( convert, ensure_valid_input, generate_identified_filename, infer_shapes, quantize, ) from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow class lowerCAmelCase_: '''simple docstring''' def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[Any]: return None class lowerCAmelCase_: '''simple docstring''' def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Tuple: return None class lowerCAmelCase_( unittest.TestCase ): '''simple docstring''' __lowercase : Dict = [ # (model_name, model_kwargs) ('''bert-base-cased''', {}), ('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore ] @require_tf @slow def UpperCAmelCase_ ( self ) -> int: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__UpperCAmelCase ,"""tf""" ,12 ,**__UpperCAmelCase ) @require_torch @slow def UpperCAmelCase_ ( self ) -> Union[str, Any]: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__UpperCAmelCase ,"""pt""" ,12 ,**__UpperCAmelCase ) @require_torch @slow def UpperCAmelCase_ ( self ) -> Any: from transformers import BertModel lowerCAmelCase__ : Optional[int] = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""] with NamedTemporaryFile(mode="""w+t""" ) as vocab_file: vocab_file.write("""\n""".join(__UpperCAmelCase ) ) vocab_file.flush() lowerCAmelCase__ : Dict = BertTokenizerFast(vocab_file.name ) with TemporaryDirectory() as bert_save_dir: lowerCAmelCase__ : Tuple = BertModel(BertConfig(vocab_size=len(__UpperCAmelCase ) ) ) model.save_pretrained(__UpperCAmelCase ) self._test_export(__UpperCAmelCase ,"""pt""" ,12 ,__UpperCAmelCase ) @require_tf @slow def UpperCAmelCase_ ( self ) -> List[str]: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: lowerCAmelCase__ : Dict = self._test_export(__UpperCAmelCase ,"""tf""" ,12 ,**__UpperCAmelCase ) lowerCAmelCase__ : List[str] = quantize(Path(__UpperCAmelCase ) ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__UpperCAmelCase ).stat().st_size: self.fail("""Quantized model is bigger than initial ONNX model""" ) @require_torch @slow def UpperCAmelCase_ ( self ) -> List[Any]: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: lowerCAmelCase__ : Any = self._test_export(__UpperCAmelCase ,"""pt""" ,12 ,**__UpperCAmelCase ) lowerCAmelCase__ : Dict = quantize(__UpperCAmelCase ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__UpperCAmelCase ).stat().st_size: self.fail("""Quantized model is bigger than initial ONNX model""" ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=None ,**__UpperCAmelCase ) -> Optional[Any]: try: # Compute path with TemporaryDirectory() as tempdir: lowerCAmelCase__ : Optional[int] = Path(__UpperCAmelCase ).joinpath("""model.onnx""" ) # Remove folder if exists if path.parent.exists(): path.parent.rmdir() # Export convert(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,**__UpperCAmelCase ) return path except Exception as e: self.fail(__UpperCAmelCase ) @require_torch @require_tokenizers @slow def UpperCAmelCase_ ( self ) -> Union[str, Any]: from transformers import BertModel lowerCAmelCase__ : List[Any] = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) ) lowerCAmelCase__ : Union[str, Any] = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" ) self._test_infer_dynamic_axis(__UpperCAmelCase ,__UpperCAmelCase ,"""pt""" ) @require_tf @require_tokenizers @slow def UpperCAmelCase_ ( self ) -> Optional[int]: from transformers import TFBertModel lowerCAmelCase__ : int = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) ) lowerCAmelCase__ : Optional[int] = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" ) self._test_infer_dynamic_axis(__UpperCAmelCase ,__UpperCAmelCase ,"""tf""" ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Tuple: lowerCAmelCase__ : Any = FeatureExtractionPipeline(__UpperCAmelCase ,__UpperCAmelCase ) lowerCAmelCase__ : List[str] = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""] lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = infer_shapes(__UpperCAmelCase ,__UpperCAmelCase ) # Assert all variables are present self.assertEqual(len(__UpperCAmelCase ) ,len(__UpperCAmelCase ) ) self.assertTrue(all(var_name in shapes for var_name in variable_names ) ) self.assertSequenceEqual(variable_names[:3] ,__UpperCAmelCase ) self.assertSequenceEqual(variable_names[3:] ,__UpperCAmelCase ) # Assert inputs are {0: batch, 1: sequence} for var_name in ["input_ids", "token_type_ids", "attention_mask"]: self.assertDictEqual(shapes[var_name] ,{0: """batch""", 1: """sequence"""} ) # Assert outputs are {0: batch, 1: sequence} and {0: batch} self.assertDictEqual(shapes["""output_0"""] ,{0: """batch""", 1: """sequence"""} ) self.assertDictEqual(shapes["""output_1"""] ,{0: """batch"""} ) def UpperCAmelCase_ ( self ) -> Optional[int]: lowerCAmelCase__ : List[str] = ["""input_ids""", """attention_mask""", """token_type_ids"""] lowerCAmelCase__ : Union[str, Any] = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]} lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = ensure_valid_input(FuncContiguousArgs() ,__UpperCAmelCase ,__UpperCAmelCase ) # Should have exactly the same number of args (all are valid) self.assertEqual(len(__UpperCAmelCase ) ,3 ) # Should have exactly the same input names self.assertEqual(set(__UpperCAmelCase ) ,set(__UpperCAmelCase ) ) # Parameter should be reordered according to their respective place in the function: # (input_ids, token_type_ids, attention_mask) self.assertEqual(__UpperCAmelCase ,(tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) ) # Generated args are interleaved with another args (for instance parameter "past" in GPT2) lowerCAmelCase__ , lowerCAmelCase__ : int = ensure_valid_input(FuncNonContiguousArgs() ,__UpperCAmelCase ,__UpperCAmelCase ) # Should have exactly the one arg (all before the one not provided "some_other_args") self.assertEqual(len(__UpperCAmelCase ) ,1 ) self.assertEqual(len(__UpperCAmelCase ) ,1 ) # Should have only "input_ids" self.assertEqual(inputs_args[0] ,tokens["""input_ids"""] ) self.assertEqual(ordered_input_names[0] ,"""input_ids""" ) def UpperCAmelCase_ ( self ) -> Tuple: lowerCAmelCase__ : Dict = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) ,"""-test""" ) self.assertEqual("""/home/something/my_fake_model-test.onnx""" ,generated.as_posix() )
37
1
'''simple docstring''' from math import factorial _lowerCAmelCase = {str(digit): factorial(digit) for digit in range(10)} def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" if not isinstance(UpperCamelCase , UpperCamelCase ): raise TypeError("""Parameter number must be int""" ) if number < 0: raise ValueError("""Parameter number must be greater than or equal to 0""" ) # Converts number in string to iterate on its digits and adds its factorial. return sum(DIGIT_FACTORIAL[digit] for digit in str(UpperCamelCase ) ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase = 60 , UpperCamelCase = 1000000 ): """simple docstring""" if not isinstance(UpperCamelCase , UpperCamelCase ) or not isinstance(UpperCamelCase , UpperCamelCase ): raise TypeError("""Parameters chain_length and number_limit must be int""" ) if chain_length <= 0 or number_limit <= 0: raise ValueError( """Parameters chain_length and number_limit must be greater than 0""" ) # the counter for the chains with the exact desired length lowerCAmelCase__ : List[str] = 0 # the cached sizes of the previous chains lowerCAmelCase__ : dict[int, int] = {} for start_chain_element in range(1 , UpperCamelCase ): # The temporary set will contain the elements of the chain lowerCAmelCase__ : List[str] = set() lowerCAmelCase__ : Optional[int] = 0 # Stop computing the chain when you find a cached size, a repeating item or the # length is greater then the desired one. lowerCAmelCase__ : Any = start_chain_element while ( chain_element not in chain_sets_lengths and chain_element not in chain_set and chain_set_length <= chain_length ): chain_set.add(UpperCamelCase ) chain_set_length += 1 lowerCAmelCase__ : List[Any] = digit_factorial_sum(UpperCamelCase ) if chain_element in chain_sets_lengths: chain_set_length += chain_sets_lengths[chain_element] lowerCAmelCase__ : Optional[int] = chain_set_length # If chain contains the exact amount of elements increase the counter if chain_set_length == chain_length: chains_counter += 1 return chains_counter if __name__ == "__main__": import doctest doctest.testmod() print(F"""{solution()}""")
37
'''simple docstring''' from maths.prime_factors import prime_factors def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" if not isinstance(UpperCamelCase , UpperCamelCase ): lowerCAmelCase__ : int = f"""Input value of [number={number}] must be an integer""" raise TypeError(UpperCamelCase ) if number < 1: raise ValueError("""Input must be a positive integer""" ) return -1 if len(prime_factors(UpperCamelCase ) ) % 2 else 1 if __name__ == "__main__": import doctest doctest.testmod()
37
1
'''simple docstring''' from __future__ import annotations def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , ): """simple docstring""" if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1: raise ValueError("""You cannot supply more or less than 2 values""" ) elif electron_conc < 0: raise ValueError("""Electron concentration cannot be negative in a semiconductor""" ) elif hole_conc < 0: raise ValueError("""Hole concentration cannot be negative in a semiconductor""" ) elif intrinsic_conc < 0: raise ValueError( """Intrinsic concentration cannot be negative in a semiconductor""" ) elif electron_conc == 0: return ( "electron_conc", intrinsic_conc**2 / hole_conc, ) elif hole_conc == 0: return ( "hole_conc", intrinsic_conc**2 / electron_conc, ) elif intrinsic_conc == 0: return ( "intrinsic_conc", (electron_conc * hole_conc) ** 0.5, ) else: return (-1, -1) if __name__ == "__main__": import doctest doctest.testmod()
37
'''simple docstring''' import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = {'''vocab_file''': '''spiece.model'''} _lowerCAmelCase = { '''vocab_file''': { '''AI-Sweden/gpt-sw3-126m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model''', '''AI-Sweden/gpt-sw3-350m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model''', '''AI-Sweden/gpt-sw3-1.6b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model''', '''AI-Sweden/gpt-sw3-6.7b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model''', '''AI-Sweden/gpt-sw3-20b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model''', } } _lowerCAmelCase = { '''AI-Sweden/gpt-sw3-126m''': 2048, '''AI-Sweden/gpt-sw3-350m''': 2048, '''AI-Sweden/gpt-sw3-1.6b''': 2048, '''AI-Sweden/gpt-sw3-6.7b''': 2048, '''AI-Sweden/gpt-sw3-20b''': 2048, } class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Dict = VOCAB_FILES_NAMES __lowercase : str = PRETRAINED_VOCAB_FILES_MAP __lowercase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowercase : Optional[int] = ['''input_ids''', '''attention_mask'''] def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> None: lowerCAmelCase__ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs lowerCAmelCase__ : Dict = kwargs.get("""name_or_path""" ) if name_or_path is None: logger.warning( """name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,""" """ you are testing the model, this can safely be ignored""" ) lowerCAmelCase__ : Tuple = """None""" # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing lowerCAmelCase__ : Union[str, Any] = """<|endoftext|>""" if eos_token is None else eos_token lowerCAmelCase__ : Dict = """<unk>""" if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: lowerCAmelCase__ : Any = unk_token if pad_token is None else pad_token lowerCAmelCase__ : Dict = eos_token if bos_token is None else bos_token else: lowerCAmelCase__ : List[str] = """<pad>""" if pad_token is None else pad_token lowerCAmelCase__ : Optional[int] = """<s>""" if bos_token is None else bos_token super().__init__( do_lower_case=__UpperCAmelCase ,remove_space=__UpperCAmelCase ,keep_accents=__UpperCAmelCase ,bos_token=__UpperCAmelCase ,eos_token=__UpperCAmelCase ,unk_token=__UpperCAmelCase ,pad_token=__UpperCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**__UpperCAmelCase ,) lowerCAmelCase__ : Optional[int] = do_lower_case lowerCAmelCase__ : Dict = remove_space lowerCAmelCase__ : Optional[Any] = keep_accents lowerCAmelCase__ : int = vocab_file lowerCAmelCase__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__UpperCAmelCase ) # Used for whitespace normalization in input texts # fmt : off lowerCAmelCase__ : int = {""" """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """""", """„"""} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing lowerCAmelCase__ : List[str] = re.compile( F"""[{''.join(map(__UpperCAmelCase ,list(range(0 ,9 ) ) + list(range(11 ,32 ) ) + list(range(127 ,160 ) ) + [160, 173, 8203] ) )}]""" ) def __getstate__( self ) -> Any: lowerCAmelCase__ : int = self.__dict__.copy() lowerCAmelCase__ : Optional[int] = None return state def __setstate__( self ,__UpperCAmelCase ) -> List[str]: lowerCAmelCase__ : List[str] = d # for backward compatibility if not hasattr(self ,"""sp_model_kwargs""" ): lowerCAmelCase__ : Tuple = {} lowerCAmelCase__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def UpperCAmelCase_ ( self ) -> int: return len(self.sp_model ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str: lowerCAmelCase__ : Tuple = self.non_printing_characters_re.sub("""""" ,__UpperCAmelCase ) # Normalize whitespaces lowerCAmelCase__ : List[Any] = """""".join([char if char not in self.whitespaces else """ """ for char in text] ) # NFC Unicode normalization lowerCAmelCase__ : List[Any] = unicodedata.normalize("""NFC""" ,__UpperCAmelCase ) return text def UpperCAmelCase_ ( self ,__UpperCAmelCase ,**__UpperCAmelCase ) -> List[str]: lowerCAmelCase__ : List[Any] = self.preprocess_text(__UpperCAmelCase ) return self.sp_model.encode(__UpperCAmelCase ,out_type=__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> int: return self.sp_model.PieceToId(__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str: return self.sp_model.IdToPiece(__UpperCAmelCase ) @staticmethod def UpperCAmelCase_ ( __UpperCAmelCase ) -> str: return out_string def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str: lowerCAmelCase__ : int = [] lowerCAmelCase__ : Optional[int] = """""" lowerCAmelCase__ : Tuple = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__UpperCAmelCase ) + token lowerCAmelCase__ : Union[str, Any] = True lowerCAmelCase__ : Optional[Any] = [] else: current_sub_tokens.append(__UpperCAmelCase ) lowerCAmelCase__ : Any = False out_string += self.sp_model.decode(__UpperCAmelCase ) return out_string def UpperCAmelCase_ ( self ) -> Dict[str, int]: lowerCAmelCase__ : Optional[int] = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(__UpperCAmelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase__ : Optional[int] = os.path.join( __UpperCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,__UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__UpperCAmelCase ,"""wb""" ) as fi: lowerCAmelCase__ : str = self.sp_model.serialized_model_proto() fi.write(__UpperCAmelCase ) return (out_vocab_file,) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]: if isinstance(__UpperCAmelCase ,__UpperCAmelCase ): lowerCAmelCase__ : Tuple = self.preprocess_text(__UpperCAmelCase ) lowerCAmelCase__ : int = self.sp_model.encode(__UpperCAmelCase ) else: lowerCAmelCase__ : int = [self.preprocess_text(__UpperCAmelCase ) for t in text] lowerCAmelCase__ : Any = self.sp_model.encode(__UpperCAmelCase ) if return_tensors is True or return_tensors == "pt": lowerCAmelCase__ : Tuple = torch.tensor(__UpperCAmelCase ) return token_ids def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str: return self.sp_model.decode(__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[int]: lowerCAmelCase__ : List[Any] = [F"""User: {text}""" if is_user else F"""Bot: {text}""" for is_user, text in conversation.iter_texts()] lowerCAmelCase__ : Any = ( F"""{self.eos_token}{self.bos_token}""" + F"""{self.bos_token}""".join(__UpperCAmelCase ) + F"""{self.bos_token}Bot:""" ) return self.encode(text=__UpperCAmelCase )
37
1
'''simple docstring''' import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase_: '''simple docstring''' def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=2 ,__UpperCAmelCase=8 ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=99 ,__UpperCAmelCase=16 ,__UpperCAmelCase=5 ,__UpperCAmelCase=2 ,__UpperCAmelCase=36 ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=512 ,__UpperCAmelCase=16 ,__UpperCAmelCase=2 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=3 ,__UpperCAmelCase=4 ,__UpperCAmelCase=None ,) -> Dict: lowerCAmelCase__ : Dict = parent lowerCAmelCase__ : Optional[int] = batch_size lowerCAmelCase__ : Optional[int] = seq_length lowerCAmelCase__ : Any = is_training lowerCAmelCase__ : str = use_input_mask lowerCAmelCase__ : Any = use_token_type_ids lowerCAmelCase__ : Union[str, Any] = use_labels lowerCAmelCase__ : List[Any] = vocab_size lowerCAmelCase__ : str = hidden_size lowerCAmelCase__ : Tuple = num_hidden_layers lowerCAmelCase__ : Any = num_attention_heads lowerCAmelCase__ : Union[str, Any] = intermediate_size lowerCAmelCase__ : List[Any] = hidden_act lowerCAmelCase__ : Tuple = hidden_dropout_prob lowerCAmelCase__ : int = attention_probs_dropout_prob lowerCAmelCase__ : Dict = max_position_embeddings lowerCAmelCase__ : Optional[int] = type_vocab_size lowerCAmelCase__ : Optional[int] = type_sequence_label_size lowerCAmelCase__ : int = initializer_range lowerCAmelCase__ : Dict = num_labels lowerCAmelCase__ : List[str] = num_choices lowerCAmelCase__ : str = scope def UpperCAmelCase_ ( self ) -> int: lowerCAmelCase__ : int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) lowerCAmelCase__ : Union[str, Any] = None if self.use_input_mask: lowerCAmelCase__ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase__ : Tuple = None if self.use_token_type_ids: lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) lowerCAmelCase__ : int = None lowerCAmelCase__ : str = None lowerCAmelCase__ : int = None if self.use_labels: lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) lowerCAmelCase__ : int = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) lowerCAmelCase__ : Any = ids_tensor([self.batch_size] ,self.num_choices ) lowerCAmelCase__ : Any = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase_ ( self ) -> int: return MraConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__UpperCAmelCase ,initializer_range=self.initializer_range ,) def UpperCAmelCase_ ( self ) -> Optional[int]: lowerCAmelCase__ : Dict = self.get_config() lowerCAmelCase__ : Union[str, Any] = 300 return config def UpperCAmelCase_ ( self ) -> Optional[int]: ( ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ) : int = self.prepare_config_and_inputs() lowerCAmelCase__ : int = True lowerCAmelCase__ : Any = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[int]: lowerCAmelCase__ : Optional[int] = MraModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Optional[int] = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,token_type_ids=__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = model(__UpperCAmelCase ,token_type_ids=__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,) -> int: lowerCAmelCase__ : List[str] = True lowerCAmelCase__ : Union[str, Any] = MraModel(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Optional[Any] = model( __UpperCAmelCase ,attention_mask=__UpperCAmelCase ,token_type_ids=__UpperCAmelCase ,encoder_hidden_states=__UpperCAmelCase ,encoder_attention_mask=__UpperCAmelCase ,) lowerCAmelCase__ : str = model( __UpperCAmelCase ,attention_mask=__UpperCAmelCase ,token_type_ids=__UpperCAmelCase ,encoder_hidden_states=__UpperCAmelCase ,) lowerCAmelCase__ : Any = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,token_type_ids=__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> int: lowerCAmelCase__ : Tuple = MraForMaskedLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Any = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,token_type_ids=__UpperCAmelCase ,labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> List[str]: lowerCAmelCase__ : List[Any] = MraForQuestionAnswering(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : int = model( __UpperCAmelCase ,attention_mask=__UpperCAmelCase ,token_type_ids=__UpperCAmelCase ,start_positions=__UpperCAmelCase ,end_positions=__UpperCAmelCase ,) self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> str: lowerCAmelCase__ : Optional[Any] = self.num_labels lowerCAmelCase__ : Union[str, Any] = MraForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : int = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,token_type_ids=__UpperCAmelCase ,labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[int]: lowerCAmelCase__ : List[Any] = self.num_labels lowerCAmelCase__ : Optional[Any] = MraForTokenClassification(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Any = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,token_type_ids=__UpperCAmelCase ,labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[Any]: lowerCAmelCase__ : Any = self.num_choices lowerCAmelCase__ : Optional[Any] = MraForMultipleChoice(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : int = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() lowerCAmelCase__ : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() lowerCAmelCase__ : List[Any] = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() lowerCAmelCase__ : Tuple = model( __UpperCAmelCase ,attention_mask=__UpperCAmelCase ,token_type_ids=__UpperCAmelCase ,labels=__UpperCAmelCase ,) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) ) def UpperCAmelCase_ ( self ) -> List[str]: lowerCAmelCase__ : Optional[int] = self.prepare_config_and_inputs() ( ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ) : Optional[Any] = config_and_inputs lowerCAmelCase__ : Tuple = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): '''simple docstring''' __lowercase : Dict = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) __lowercase : str = False __lowercase : Union[str, Any] = False __lowercase : Optional[Any] = False __lowercase : int = False __lowercase : int = () def UpperCAmelCase_ ( self ) -> Tuple: lowerCAmelCase__ : List[str] = MraModelTester(self ) lowerCAmelCase__ : Dict = ConfigTester(self ,config_class=__UpperCAmelCase ,hidden_size=37 ) def UpperCAmelCase_ ( self ) -> Tuple: self.config_tester.run_common_tests() def UpperCAmelCase_ ( self ) -> List[Any]: lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowerCAmelCase__ : str = type self.model_tester.create_and_check_model(*__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> List[Any]: lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> str: lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> str: lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> Dict: lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> Dict: lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase ) @slow def UpperCAmelCase_ ( self ) -> Optional[Any]: for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase__ : Any = MraModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) @unittest.skip(reason="""MRA does not output attentions""" ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: return @require_torch class lowerCAmelCase_( unittest.TestCase ): '''simple docstring''' @slow def UpperCAmelCase_ ( self ) -> Dict: lowerCAmelCase__ : str = MraModel.from_pretrained("""uw-madison/mra-base-512-4""" ) lowerCAmelCase__ : List[Any] = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): lowerCAmelCase__ : int = model(__UpperCAmelCase )[0] lowerCAmelCase__ : Optional[int] = torch.Size((1, 256, 768) ) self.assertEqual(output.shape ,__UpperCAmelCase ) lowerCAmelCase__ : str = torch.tensor( [[[-0.0_1_4_0, 0.0_8_3_0, -0.0_3_8_1], [0.1_5_4_6, 0.1_4_0_2, 0.0_2_2_0], [0.1_1_6_2, 0.0_8_5_1, 0.0_1_6_5]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCAmelCase ,atol=1E-4 ) ) @slow def UpperCAmelCase_ ( self ) -> Tuple: lowerCAmelCase__ : Any = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""" ) lowerCAmelCase__ : Tuple = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): lowerCAmelCase__ : List[Any] = model(__UpperCAmelCase )[0] lowerCAmelCase__ : List[str] = 5_0265 lowerCAmelCase__ : int = torch.Size((1, 256, vocab_size) ) self.assertEqual(output.shape ,__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = torch.tensor( [[[9.2_5_9_5, -3.6_0_3_8, 1_1.8_8_1_9], [9.3_8_6_9, -3.2_6_9_3, 1_1.0_9_5_6], [1_1.8_5_2_4, -3.4_9_3_8, 1_3.1_2_1_0]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCAmelCase ,atol=1E-4 ) ) @slow def UpperCAmelCase_ ( self ) -> Tuple: lowerCAmelCase__ : Union[str, Any] = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""" ) lowerCAmelCase__ : Optional[int] = torch.arange(4096 ).unsqueeze(0 ) with torch.no_grad(): lowerCAmelCase__ : Optional[int] = model(__UpperCAmelCase )[0] lowerCAmelCase__ : Optional[Any] = 5_0265 lowerCAmelCase__ : Tuple = torch.Size((1, 4096, vocab_size) ) self.assertEqual(output.shape ,__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = torch.tensor( [[[5.4_7_8_9, -2.3_5_6_4, 7.5_0_6_4], [7.9_0_6_7, -1.3_3_6_9, 9.9_6_6_8], [9.0_7_1_2, -1.8_1_0_6, 7.0_3_8_0]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCAmelCase ,atol=1E-4 ) )
37
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class lowerCAmelCase_( unittest.TestCase ): '''simple docstring''' @slow def UpperCAmelCase_ ( self ) -> Union[str, Any]: lowerCAmelCase__ : Tuple = XLMRobertaModel.from_pretrained("""xlm-roberta-base""" ) lowerCAmelCase__ : Optional[int] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] ) # The dog is cute and lives in the garden house lowerCAmelCase__ : str = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim lowerCAmelCase__ : Dict = torch.tensor( [[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): lowerCAmelCase__ : Optional[int] = model(__UpperCAmelCase )["""last_hidden_state"""].detach() self.assertEqual(output.shape ,__UpperCAmelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] ,__UpperCAmelCase ,atol=1E-3 ) ) @slow def UpperCAmelCase_ ( self ) -> int: lowerCAmelCase__ : Union[str, Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-large""" ) lowerCAmelCase__ : Optional[Any] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] ) # The dog is cute and lives in the garden house lowerCAmelCase__ : Dict = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim lowerCAmelCase__ : Union[str, Any] = torch.tensor( [[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase )["""last_hidden_state"""].detach() self.assertEqual(output.shape ,__UpperCAmelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] ,__UpperCAmelCase ,atol=1E-3 ) )
37
1
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" if any(not isinstance(UpperCamelCase , UpperCamelCase ) or x < 0 for x in sequence ): raise TypeError("""Sequence must be list of non-negative integers""" ) for _ in range(len(UpperCamelCase ) ): for i, (rod_upper, rod_lower) in enumerate(zip(UpperCamelCase , sequence[1:] ) ): if rod_upper > rod_lower: sequence[i] -= rod_upper - rod_lower sequence[i + 1] += rod_upper - rod_lower return sequence if __name__ == "__main__": assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
37
'''simple docstring''' from pickle import UnpicklingError import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict from ..utils import logging _lowerCAmelCase = logging.get_logger(__name__) def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" try: with open(UpperCamelCase , """rb""" ) as flax_state_f: lowerCAmelCase__ : Union[str, Any] = from_bytes(UpperCamelCase , flax_state_f.read() ) except UnpicklingError as e: try: with open(UpperCamelCase ) as f: if f.read().startswith("""version""" ): raise OSError( """You seem to have cloned a repository without having git-lfs installed. Please""" """ install git-lfs and run `git lfs install` followed by `git lfs pull` in the""" """ folder you cloned.""" ) else: raise ValueError from e except (UnicodeDecodeError, ValueError): raise EnvironmentError(f"""Unable to convert {model_file} to Flax deserializable object. """ ) return load_flax_weights_in_pytorch_model(UpperCamelCase , UpperCamelCase ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" try: import torch # noqa: F401 except ImportError: logger.error( """Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see""" """ https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation""" """ instructions.""" ) raise # check if we have bf16 weights lowerCAmelCase__ : str = flatten_dict(jax.tree_util.tree_map(lambda UpperCamelCase : x.dtype == jnp.bfloataa , UpperCamelCase ) ).values() if any(UpperCamelCase ): # convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( """Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """ """before loading those in PyTorch model.""" ) lowerCAmelCase__ : Dict = jax.tree_util.tree_map( lambda UpperCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , UpperCamelCase ) lowerCAmelCase__ : Any = """""" lowerCAmelCase__ : Any = flatten_dict(UpperCamelCase , sep=""".""" ) lowerCAmelCase__ : Optional[int] = pt_model.state_dict() # keep track of unexpected & missing keys lowerCAmelCase__ : Optional[Any] = [] lowerCAmelCase__ : int = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): lowerCAmelCase__ : Union[str, Any] = flax_key_tuple.split(""".""" ) if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4: lowerCAmelCase__ : Optional[int] = flax_key_tuple_array[:-1] + ["""weight"""] lowerCAmelCase__ : Any = jnp.transpose(UpperCamelCase , (3, 2, 0, 1) ) elif flax_key_tuple_array[-1] == "kernel": lowerCAmelCase__ : str = flax_key_tuple_array[:-1] + ["""weight"""] lowerCAmelCase__ : Any = flax_tensor.T elif flax_key_tuple_array[-1] == "scale": lowerCAmelCase__ : int = flax_key_tuple_array[:-1] + ["""weight"""] if "time_embedding" not in flax_key_tuple_array: for i, flax_key_tuple_string in enumerate(UpperCamelCase ): lowerCAmelCase__ : List[str] = ( flax_key_tuple_string.replace("""_0""" , """.0""" ) .replace("""_1""" , """.1""" ) .replace("""_2""" , """.2""" ) .replace("""_3""" , """.3""" ) .replace("""_4""" , """.4""" ) .replace("""_5""" , """.5""" ) .replace("""_6""" , """.6""" ) .replace("""_7""" , """.7""" ) .replace("""_8""" , """.8""" ) .replace("""_9""" , """.9""" ) ) lowerCAmelCase__ : Union[str, Any] = """.""".join(UpperCamelCase ) if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( f"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """ f"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" ) else: # add weight to pytorch dict lowerCAmelCase__ : int = np.asarray(UpperCamelCase ) if not isinstance(UpperCamelCase , np.ndarray ) else flax_tensor lowerCAmelCase__ : int = torch.from_numpy(UpperCamelCase ) # remove from missing keys missing_keys.remove(UpperCamelCase ) else: # weight is not expected by PyTorch model unexpected_keys.append(UpperCamelCase ) pt_model.load_state_dict(UpperCamelCase ) # re-transform missing_keys to list lowerCAmelCase__ : Optional[int] = list(UpperCamelCase ) if len(UpperCamelCase ) > 0: logger.warning( """Some weights of the Flax model were not used when initializing the PyTorch model""" f""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing""" f""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture""" """ (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This""" f""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect""" """ to be exactly identical (e.g. initializing a BertForSequenceClassification model from a""" """ FlaxBertForSequenceClassification model).""" ) if len(UpperCamelCase ) > 0: logger.warning( f"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly""" f""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to""" """ use it for predictions and inference.""" ) return pt_model
37
1
'''simple docstring''' from arguments import InitializationArguments from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser # Configuration _lowerCAmelCase = HfArgumentParser(InitializationArguments) _lowerCAmelCase = parser.parse_args() # Load codeparrot tokenizer trained for Python code tokenization _lowerCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_name) # Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks _lowerCAmelCase = { '''vocab_size''': len(tokenizer), '''scale_attn_by_inverse_layer_idx''': True, '''reorder_and_upcast_attn''': True, } # Load model config (GPT-2 large in this case) _lowerCAmelCase = AutoConfig.from_pretrained(args.config_name, **config_kwargs) # Initialize new model with config _lowerCAmelCase = AutoModelForCausalLM.from_config(config) # Save model to the hub model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
37
'''simple docstring''' import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class lowerCAmelCase_: '''simple docstring''' def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=2 ,__UpperCAmelCase=True ,__UpperCAmelCase=False ,__UpperCAmelCase=10 ,__UpperCAmelCase=3 ,__UpperCAmelCase=32 * 4 ,__UpperCAmelCase=32 * 6 ,__UpperCAmelCase=4 ,__UpperCAmelCase=32 ,) -> str: lowerCAmelCase__ : Optional[int] = parent lowerCAmelCase__ : Optional[int] = batch_size lowerCAmelCase__ : Optional[int] = is_training lowerCAmelCase__ : Dict = use_auxiliary_loss lowerCAmelCase__ : Union[str, Any] = num_queries lowerCAmelCase__ : str = num_channels lowerCAmelCase__ : List[str] = min_size lowerCAmelCase__ : int = max_size lowerCAmelCase__ : Optional[Any] = num_labels lowerCAmelCase__ : List[Any] = mask_feature_size def UpperCAmelCase_ ( self ) -> Tuple: lowerCAmelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( __UpperCAmelCase ) lowerCAmelCase__ : str = torch.ones([self.batch_size, self.min_size, self.max_size] ,device=__UpperCAmelCase ) lowerCAmelCase__ : Any = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] ,device=__UpperCAmelCase ) > 0.5 ).float() lowerCAmelCase__ : Optional[int] = (torch.rand((self.batch_size, self.num_labels) ,device=__UpperCAmelCase ) > 0.5).long() lowerCAmelCase__ : Any = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def UpperCAmelCase_ ( self ) -> Dict: return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] ,) ,decoder_config=DetrConfig( decoder_ffn_dim=128 ,num_queries=self.num_queries ,decoder_attention_heads=2 ,d_model=self.mask_feature_size ,) ,mask_feature_size=self.mask_feature_size ,fpn_feature_size=self.mask_feature_size ,num_channels=self.num_channels ,num_labels=self.num_labels ,) def UpperCAmelCase_ ( self ) -> Optional[int]: lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.prepare_config_and_inputs() lowerCAmelCase__ : List[str] = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask} return config, inputs_dict def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Any: lowerCAmelCase__ : Optional[int] = output.encoder_hidden_states lowerCAmelCase__ : Optional[int] = output.pixel_decoder_hidden_states lowerCAmelCase__ : Dict = output.transformer_decoder_hidden_states self.parent.assertTrue(len(__UpperCAmelCase ) ,len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__UpperCAmelCase ) ,len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__UpperCAmelCase ) ,config.decoder_config.decoder_layers ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=False ) -> Optional[Any]: with torch.no_grad(): lowerCAmelCase__ : int = MaskFormerModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : str = model(pixel_values=__UpperCAmelCase ,pixel_mask=__UpperCAmelCase ) lowerCAmelCase__ : int = model(__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape ,(self.batch_size, self.num_queries, self.mask_feature_size) ,) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(__UpperCAmelCase ,__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[int]: lowerCAmelCase__ : Dict = MaskFormerForInstanceSegmentation(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() def comm_check_on_output(__UpperCAmelCase ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape ,(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) ,) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape ,(self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): lowerCAmelCase__ : List[Any] = model(pixel_values=__UpperCAmelCase ,pixel_mask=__UpperCAmelCase ) lowerCAmelCase__ : Dict = model(__UpperCAmelCase ) comm_check_on_output(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = model( pixel_values=__UpperCAmelCase ,pixel_mask=__UpperCAmelCase ,mask_labels=__UpperCAmelCase ,class_labels=__UpperCAmelCase ) comm_check_on_output(__UpperCAmelCase ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape ,torch.Size([1] ) ) @require_torch class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): '''simple docstring''' __lowercase : Optional[Any] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () __lowercase : int = ( {'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) __lowercase : Union[str, Any] = False __lowercase : Dict = False __lowercase : Tuple = False __lowercase : List[Any] = False def UpperCAmelCase_ ( self ) -> Optional[int]: lowerCAmelCase__ : str = MaskFormerModelTester(self ) lowerCAmelCase__ : List[Any] = ConfigTester(self ,config_class=__UpperCAmelCase ,has_text_modality=__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> List[str]: self.config_tester.run_common_tests() def UpperCAmelCase_ ( self ) -> Union[str, Any]: lowerCAmelCase__ , lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__UpperCAmelCase ,**__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> Optional[int]: lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__UpperCAmelCase ) @unittest.skip(reason="""MaskFormer does not use inputs_embeds""" ) def UpperCAmelCase_ ( self ) -> List[Any]: pass @unittest.skip(reason="""MaskFormer does not have a get_input_embeddings method""" ) def UpperCAmelCase_ ( self ) -> str: pass @unittest.skip(reason="""MaskFormer is not a generative model""" ) def UpperCAmelCase_ ( self ) -> Any: pass @unittest.skip(reason="""MaskFormer does not use token embeddings""" ) def UpperCAmelCase_ ( self ) -> List[str]: pass @require_torch_multi_gpu @unittest.skip( reason="""MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def UpperCAmelCase_ ( self ) -> List[str]: pass def UpperCAmelCase_ ( self ) -> Tuple: lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ : str = model_class(__UpperCAmelCase ) lowerCAmelCase__ : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase__ : Dict = [*signature.parameters.keys()] lowerCAmelCase__ : Tuple = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,__UpperCAmelCase ) @slow def UpperCAmelCase_ ( self ) -> Union[str, Any]: for model_name in ["facebook/maskformer-swin-small-coco"]: lowerCAmelCase__ : List[str] = MaskFormerModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> str: lowerCAmelCase__ : List[Any] = (self.model_tester.min_size,) * 2 lowerCAmelCase__ : Any = { """pixel_values""": torch.randn((2, 3, *size) ,device=__UpperCAmelCase ), """mask_labels""": torch.randn((2, 10, *size) ,device=__UpperCAmelCase ), """class_labels""": torch.zeros(2 ,10 ,device=__UpperCAmelCase ).long(), } lowerCAmelCase__ : Tuple = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = model(**__UpperCAmelCase ) self.assertTrue(outputs.loss is not None ) def UpperCAmelCase_ ( self ) -> str: lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__UpperCAmelCase ,**__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> Tuple: lowerCAmelCase__ , lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ : int = model_class(__UpperCAmelCase ).to(__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = model(**__UpperCAmelCase ,output_attentions=__UpperCAmelCase ) self.assertTrue(outputs.attentions is not None ) def UpperCAmelCase_ ( self ) -> int: if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss lowerCAmelCase__ : Dict = self.all_model_classes[1] lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() lowerCAmelCase__ : List[Any] = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.train() lowerCAmelCase__ : List[str] = model(__UpperCAmelCase ,mask_labels=__UpperCAmelCase ,class_labels=__UpperCAmelCase ).loss loss.backward() def UpperCAmelCase_ ( self ) -> List[str]: # only MaskFormerForInstanceSegmentation has the loss lowerCAmelCase__ : Tuple = self.all_model_classes[1] lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() lowerCAmelCase__ : Union[str, Any] = True lowerCAmelCase__ : Tuple = True lowerCAmelCase__ : Optional[Any] = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.train() lowerCAmelCase__ : Dict = model(__UpperCAmelCase ,mask_labels=__UpperCAmelCase ,class_labels=__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() lowerCAmelCase__ : str = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't lowerCAmelCase__ : Union[str, Any] = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() lowerCAmelCase__ : List[Any] = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=__UpperCAmelCase ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) _lowerCAmelCase = 1e-4 def _SCREAMING_SNAKE_CASE ( ): """simple docstring""" lowerCAmelCase__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_vision @slow class lowerCAmelCase_( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCAmelCase_ ( self ) -> List[Any]: return ( MaskFormerImageProcessor.from_pretrained("""facebook/maskformer-swin-small-coco""" ) if is_vision_available() else None ) def UpperCAmelCase_ ( self ) -> Any: lowerCAmelCase__ : Any = MaskFormerModel.from_pretrained("""facebook/maskformer-swin-small-coco""" ).to(__UpperCAmelCase ) lowerCAmelCase__ : str = self.default_image_processor lowerCAmelCase__ : str = prepare_img() lowerCAmelCase__ : Optional[int] = image_processor(__UpperCAmelCase ,return_tensors="""pt""" ).to(__UpperCAmelCase ) lowerCAmelCase__ : Dict = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__UpperCAmelCase ,(1, 3, 800, 1088) ) with torch.no_grad(): lowerCAmelCase__ : Union[str, Any] = model(**__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = torch.tensor( [[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(__UpperCAmelCase ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) ) lowerCAmelCase__ : Dict = torch.tensor( [[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(__UpperCAmelCase ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) ) lowerCAmelCase__ : Optional[int] = torch.tensor( [[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(__UpperCAmelCase ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) ) def UpperCAmelCase_ ( self ) -> Optional[Any]: lowerCAmelCase__ : List[Any] = ( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" ) .to(__UpperCAmelCase ) .eval() ) lowerCAmelCase__ : Optional[Any] = self.default_image_processor lowerCAmelCase__ : List[str] = prepare_img() lowerCAmelCase__ : str = image_processor(__UpperCAmelCase ,return_tensors="""pt""" ).to(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__UpperCAmelCase ,(1, 3, 800, 1088) ) with torch.no_grad(): lowerCAmelCase__ : List[Any] = model(**__UpperCAmelCase ) # masks_queries_logits lowerCAmelCase__ : Optional[int] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape ,(1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ,) lowerCAmelCase__ : Optional[int] = [ [-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3], [-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5], [-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2], ] lowerCAmelCase__ : Optional[int] = torch.tensor(__UpperCAmelCase ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) ) # class_queries_logits lowerCAmelCase__ : Tuple = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape ,(1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) lowerCAmelCase__ : Union[str, Any] = torch.tensor( [ [1.65_12E00, -5.25_72E00, -3.35_19E00], [3.61_69E-02, -5.90_25E00, -2.93_13E00], [1.07_66E-04, -7.76_30E00, -5.12_63E00], ] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) ) def UpperCAmelCase_ ( self ) -> str: lowerCAmelCase__ : List[Any] = ( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-resnet101-coco-stuff""" ) .to(__UpperCAmelCase ) .eval() ) lowerCAmelCase__ : Optional[Any] = self.default_image_processor lowerCAmelCase__ : int = prepare_img() lowerCAmelCase__ : Optional[Any] = image_processor(__UpperCAmelCase ,return_tensors="""pt""" ).to(__UpperCAmelCase ) lowerCAmelCase__ : str = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__UpperCAmelCase ,(1, 3, 800, 1088) ) with torch.no_grad(): lowerCAmelCase__ : str = model(**__UpperCAmelCase ) # masks_queries_logits lowerCAmelCase__ : Optional[Any] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape ,(1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ,) lowerCAmelCase__ : int = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]] lowerCAmelCase__ : List[str] = torch.tensor(__UpperCAmelCase ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) ) # class_queries_logits lowerCAmelCase__ : Optional[Any] = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape ,(1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) lowerCAmelCase__ : Tuple = torch.tensor( [[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) ) def UpperCAmelCase_ ( self ) -> Optional[Any]: lowerCAmelCase__ : str = ( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" ) .to(__UpperCAmelCase ) .eval() ) lowerCAmelCase__ : Dict = self.default_image_processor lowerCAmelCase__ : Union[str, Any] = image_processor( [np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] ,segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] ,return_tensors="""pt""" ,) lowerCAmelCase__ : Tuple = inputs["""pixel_values"""].to(__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = [el.to(__UpperCAmelCase ) for el in inputs["""mask_labels"""]] lowerCAmelCase__ : Union[str, Any] = [el.to(__UpperCAmelCase ) for el in inputs["""class_labels"""]] with torch.no_grad(): lowerCAmelCase__ : Any = model(**__UpperCAmelCase ) self.assertTrue(outputs.loss is not None )
37
1
'''simple docstring''' from __future__ import annotations def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCAmelCase__ , lowerCAmelCase__ : Tuple = set(UpperCamelCase ), [start] while stack: lowerCAmelCase__ : Any = stack.pop() explored.add(UpperCamelCase ) # Differences from BFS: # 1) pop last element instead of first one # 2) add adjacent elements to stack without exploring them for adj in reversed(graph[v] ): if adj not in explored: stack.append(UpperCamelCase ) return explored _lowerCAmelCase = { '''A''': ['''B''', '''C''', '''D'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F'''], '''D''': ['''B''', '''D'''], '''E''': ['''B''', '''F'''], '''F''': ['''C''', '''E''', '''G'''], '''G''': ['''F'''], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, '''A'''))
37
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { '''microsoft/focalnet-tiny''': '''https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json''', } class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Optional[Any] = '''focalnet''' def __init__( self ,__UpperCAmelCase=224 ,__UpperCAmelCase=4 ,__UpperCAmelCase=3 ,__UpperCAmelCase=96 ,__UpperCAmelCase=False ,__UpperCAmelCase=[192, 384, 768, 768] ,__UpperCAmelCase=[2, 2, 6, 2] ,__UpperCAmelCase=[2, 2, 2, 2] ,__UpperCAmelCase=[3, 3, 3, 3] ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=4.0 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=False ,__UpperCAmelCase=1E-4 ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=1E-5 ,__UpperCAmelCase=32 ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,**__UpperCAmelCase ,) -> Optional[Any]: super().__init__(**__UpperCAmelCase ) lowerCAmelCase__ : Dict = image_size lowerCAmelCase__ : int = patch_size lowerCAmelCase__ : str = num_channels lowerCAmelCase__ : Dict = embed_dim lowerCAmelCase__ : List[str] = use_conv_embed lowerCAmelCase__ : List[Any] = hidden_sizes lowerCAmelCase__ : Dict = depths lowerCAmelCase__ : List[str] = focal_levels lowerCAmelCase__ : List[str] = focal_windows lowerCAmelCase__ : Dict = hidden_act lowerCAmelCase__ : Dict = mlp_ratio lowerCAmelCase__ : Tuple = hidden_dropout_prob lowerCAmelCase__ : Tuple = drop_path_rate lowerCAmelCase__ : Dict = use_layerscale lowerCAmelCase__ : Optional[Any] = layerscale_value lowerCAmelCase__ : str = use_post_layernorm lowerCAmelCase__ : Union[str, Any] = use_post_layernorm_in_modulation lowerCAmelCase__ : int = normalize_modulator lowerCAmelCase__ : Optional[Any] = initializer_range lowerCAmelCase__ : List[str] = layer_norm_eps lowerCAmelCase__ : List[Any] = encoder_stride lowerCAmelCase__ : Dict = ["""stem"""] + [F"""stage{idx}""" for idx in range(1 ,len(self.depths ) + 1 )] lowerCAmelCase__ , lowerCAmelCase__ : Any = get_aligned_output_features_output_indices( out_features=__UpperCAmelCase ,out_indices=__UpperCAmelCase ,stage_names=self.stage_names )
37
1
'''simple docstring''' import inspect from typing import Callable, List, Optional, Union import torch from transformers import ( CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, WhisperForConditionalGeneration, WhisperProcessor, ) from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.utils import logging _lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,) -> str: super().__init__() if safety_checker is None: logger.warning( F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure""" """ that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered""" """ results in services or applications open to the public. Both the diffusers team and Hugging Face""" """ strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling""" """ it only for use-cases that involve analyzing network behavior or auditing its results. For more""" """ information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" ) self.register_modules( speech_model=__UpperCAmelCase ,speech_processor=__UpperCAmelCase ,vae=__UpperCAmelCase ,text_encoder=__UpperCAmelCase ,tokenizer=__UpperCAmelCase ,unet=__UpperCAmelCase ,scheduler=__UpperCAmelCase ,feature_extractor=__UpperCAmelCase ,) def UpperCAmelCase_ ( self ,__UpperCAmelCase = "auto" ) -> str: if slice_size == "auto": lowerCAmelCase__ : int = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> Optional[int]: self.enable_attention_slicing(__UpperCAmelCase ) @torch.no_grad() def __call__( self ,__UpperCAmelCase ,__UpperCAmelCase=1_6000 ,__UpperCAmelCase = 512 ,__UpperCAmelCase = 512 ,__UpperCAmelCase = 50 ,__UpperCAmelCase = 7.5 ,__UpperCAmelCase = None ,__UpperCAmelCase = 1 ,__UpperCAmelCase = 0.0 ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = "pil" ,__UpperCAmelCase = True ,__UpperCAmelCase = None ,__UpperCAmelCase = 1 ,**__UpperCAmelCase ,) -> List[str]: lowerCAmelCase__ : Optional[int] = self.speech_processor.feature_extractor( __UpperCAmelCase ,return_tensors="""pt""" ,sampling_rate=__UpperCAmelCase ).input_features.to(self.device ) lowerCAmelCase__ : Any = self.speech_model.generate(__UpperCAmelCase ,max_length=48_0000 ) lowerCAmelCase__ : Union[str, Any] = self.speech_processor.tokenizer.batch_decode(__UpperCAmelCase ,skip_special_tokens=__UpperCAmelCase ,normalize=__UpperCAmelCase )[ 0 ] if isinstance(__UpperCAmelCase ,__UpperCAmelCase ): lowerCAmelCase__ : Any = 1 elif isinstance(__UpperCAmelCase ,__UpperCAmelCase ): lowerCAmelCase__ : Tuple = len(__UpperCAmelCase ) else: raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(__UpperCAmelCase )}""" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(__UpperCAmelCase ,__UpperCAmelCase ) or callback_steps <= 0) ): raise ValueError( F"""`callback_steps` has to be a positive integer but is {callback_steps} of type""" F""" {type(__UpperCAmelCase )}.""" ) # get prompt text embeddings lowerCAmelCase__ : Union[str, Any] = self.tokenizer( __UpperCAmelCase ,padding="""max_length""" ,max_length=self.tokenizer.model_max_length ,return_tensors="""pt""" ,) lowerCAmelCase__ : List[str] = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: lowerCAmelCase__ : Optional[int] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( """The following part of your input was truncated because CLIP can only handle sequences up to""" F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" ) lowerCAmelCase__ : Optional[Any] = text_input_ids[:, : self.tokenizer.model_max_length] lowerCAmelCase__ : Optional[int] = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = text_embeddings.shape lowerCAmelCase__ : Tuple = text_embeddings.repeat(1 ,__UpperCAmelCase ,1 ) lowerCAmelCase__ : List[str] = text_embeddings.view(bs_embed * num_images_per_prompt ,__UpperCAmelCase ,-1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. lowerCAmelCase__ : Optional[int] = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: lowerCAmelCase__ : List[str] if negative_prompt is None: lowerCAmelCase__ : Any = [""""""] * batch_size elif type(__UpperCAmelCase ) is not type(__UpperCAmelCase ): raise TypeError( F"""`negative_prompt` should be the same type to `prompt`, but got {type(__UpperCAmelCase )} !=""" F""" {type(__UpperCAmelCase )}.""" ) elif isinstance(__UpperCAmelCase ,__UpperCAmelCase ): lowerCAmelCase__ : Any = [negative_prompt] elif batch_size != len(__UpperCAmelCase ): raise ValueError( F"""`negative_prompt`: {negative_prompt} has batch size {len(__UpperCAmelCase )}, but `prompt`:""" F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches""" """ the batch size of `prompt`.""" ) else: lowerCAmelCase__ : List[Any] = negative_prompt lowerCAmelCase__ : List[str] = text_input_ids.shape[-1] lowerCAmelCase__ : Union[str, Any] = self.tokenizer( __UpperCAmelCase ,padding="""max_length""" ,max_length=__UpperCAmelCase ,truncation=__UpperCAmelCase ,return_tensors="""pt""" ,) lowerCAmelCase__ : Union[str, Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method lowerCAmelCase__ : Optional[int] = uncond_embeddings.shape[1] lowerCAmelCase__ : int = uncond_embeddings.repeat(1 ,__UpperCAmelCase ,1 ) lowerCAmelCase__ : Any = uncond_embeddings.view(batch_size * num_images_per_prompt ,__UpperCAmelCase ,-1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes lowerCAmelCase__ : List[Any] = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. lowerCAmelCase__ : Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) lowerCAmelCase__ : Tuple = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps lowerCAmelCase__ : Dict = torch.randn(__UpperCAmelCase ,generator=__UpperCAmelCase ,device="""cpu""" ,dtype=__UpperCAmelCase ).to( self.device ) else: lowerCAmelCase__ : int = torch.randn(__UpperCAmelCase ,generator=__UpperCAmelCase ,device=self.device ,dtype=__UpperCAmelCase ) else: if latents.shape != latents_shape: raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" ) lowerCAmelCase__ : int = latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(__UpperCAmelCase ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand lowerCAmelCase__ : Any = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler lowerCAmelCase__ : Any = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] lowerCAmelCase__ : List[str] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) lowerCAmelCase__ : int = {} if accepts_eta: lowerCAmelCase__ : List[str] = eta for i, t in enumerate(self.progress_bar(__UpperCAmelCase ) ): # expand the latents if we are doing classifier free guidance lowerCAmelCase__ : str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents lowerCAmelCase__ : Any = self.scheduler.scale_model_input(__UpperCAmelCase ,__UpperCAmelCase ) # predict the noise residual lowerCAmelCase__ : List[str] = self.unet(__UpperCAmelCase ,__UpperCAmelCase ,encoder_hidden_states=__UpperCAmelCase ).sample # perform guidance if do_classifier_free_guidance: lowerCAmelCase__ , lowerCAmelCase__ : Tuple = noise_pred.chunk(2 ) lowerCAmelCase__ : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 lowerCAmelCase__ : Dict = self.scheduler.step(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,**__UpperCAmelCase ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = 1 / 0.1_8_2_1_5 * latents lowerCAmelCase__ : Optional[int] = self.vae.decode(__UpperCAmelCase ).sample lowerCAmelCase__ : Any = (image / 2 + 0.5).clamp(0 ,1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 lowerCAmelCase__ : Any = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy() if output_type == "pil": lowerCAmelCase__ : Dict = self.numpy_to_pil(__UpperCAmelCase ) if not return_dict: return image return StableDiffusionPipelineOutput(images=__UpperCAmelCase ,nsfw_content_detected=__UpperCAmelCase )
37
'''simple docstring''' import argparse import os.path as osp import re import torch from safetensors.torch import load_file, save_file # =================# # UNet Conversion # # =================# _lowerCAmelCase = [ # (stable-diffusion, HF Diffusers) ('''time_embed.0.weight''', '''time_embedding.linear_1.weight'''), ('''time_embed.0.bias''', '''time_embedding.linear_1.bias'''), ('''time_embed.2.weight''', '''time_embedding.linear_2.weight'''), ('''time_embed.2.bias''', '''time_embedding.linear_2.bias'''), ('''input_blocks.0.0.weight''', '''conv_in.weight'''), ('''input_blocks.0.0.bias''', '''conv_in.bias'''), ('''out.0.weight''', '''conv_norm_out.weight'''), ('''out.0.bias''', '''conv_norm_out.bias'''), ('''out.2.weight''', '''conv_out.weight'''), ('''out.2.bias''', '''conv_out.bias'''), ] _lowerCAmelCase = [ # (stable-diffusion, HF Diffusers) ('''in_layers.0''', '''norm1'''), ('''in_layers.2''', '''conv1'''), ('''out_layers.0''', '''norm2'''), ('''out_layers.3''', '''conv2'''), ('''emb_layers.1''', '''time_emb_proj'''), ('''skip_connection''', '''conv_shortcut'''), ] _lowerCAmelCase = [] # hardcoded number of downblocks and resnets/attentions... # would need smarter logic for other networks. for i in range(4): # loop over downblocks/upblocks for j in range(2): # loop over resnets/attentions for downblocks _lowerCAmelCase = F"""down_blocks.{i}.resnets.{j}.""" _lowerCAmelCase = F"""input_blocks.{3*i + j + 1}.0.""" unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix)) if i < 3: # no attention layers in down_blocks.3 _lowerCAmelCase = F"""down_blocks.{i}.attentions.{j}.""" _lowerCAmelCase = F"""input_blocks.{3*i + j + 1}.1.""" unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix)) for j in range(3): # loop over resnets/attentions for upblocks _lowerCAmelCase = F"""up_blocks.{i}.resnets.{j}.""" _lowerCAmelCase = F"""output_blocks.{3*i + j}.0.""" unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix)) if i > 0: # no attention layers in up_blocks.0 _lowerCAmelCase = F"""up_blocks.{i}.attentions.{j}.""" _lowerCAmelCase = F"""output_blocks.{3*i + j}.1.""" unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix)) if i < 3: # no downsample in down_blocks.3 _lowerCAmelCase = F"""down_blocks.{i}.downsamplers.0.conv.""" _lowerCAmelCase = F"""input_blocks.{3*(i+1)}.0.op.""" unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix)) # no upsample in up_blocks.3 _lowerCAmelCase = F"""up_blocks.{i}.upsamplers.0.""" _lowerCAmelCase = F"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}.""" unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix)) _lowerCAmelCase = '''mid_block.attentions.0.''' _lowerCAmelCase = '''middle_block.1.''' unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix)) for j in range(2): _lowerCAmelCase = F"""mid_block.resnets.{j}.""" _lowerCAmelCase = F"""middle_block.{2*j}.""" unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix)) def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Any = {k: k for k in unet_state_dict.keys()} for sd_name, hf_name in unet_conversion_map: lowerCAmelCase__ : Optional[int] = sd_name for k, v in mapping.items(): if "resnets" in k: for sd_part, hf_part in unet_conversion_map_resnet: lowerCAmelCase__ : Any = v.replace(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : List[Any] = v for k, v in mapping.items(): for sd_part, hf_part in unet_conversion_map_layer: lowerCAmelCase__ : List[Any] = v.replace(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = v lowerCAmelCase__ : Tuple = {v: unet_state_dict[k] for k, v in mapping.items()} return new_state_dict # ================# # VAE Conversion # # ================# _lowerCAmelCase = [ # (stable-diffusion, HF Diffusers) ('''nin_shortcut''', '''conv_shortcut'''), ('''norm_out''', '''conv_norm_out'''), ('''mid.attn_1.''', '''mid_block.attentions.0.'''), ] for i in range(4): # down_blocks have two resnets for j in range(2): _lowerCAmelCase = F"""encoder.down_blocks.{i}.resnets.{j}.""" _lowerCAmelCase = F"""encoder.down.{i}.block.{j}.""" vae_conversion_map.append((sd_down_prefix, hf_down_prefix)) if i < 3: _lowerCAmelCase = F"""down_blocks.{i}.downsamplers.0.""" _lowerCAmelCase = F"""down.{i}.downsample.""" vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix)) _lowerCAmelCase = F"""up_blocks.{i}.upsamplers.0.""" _lowerCAmelCase = F"""up.{3-i}.upsample.""" vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix)) # up_blocks have three resnets # also, up blocks in hf are numbered in reverse from sd for j in range(3): _lowerCAmelCase = F"""decoder.up_blocks.{i}.resnets.{j}.""" _lowerCAmelCase = F"""decoder.up.{3-i}.block.{j}.""" vae_conversion_map.append((sd_up_prefix, hf_up_prefix)) # this part accounts for mid blocks in both the encoder and the decoder for i in range(2): _lowerCAmelCase = F"""mid_block.resnets.{i}.""" _lowerCAmelCase = F"""mid.block_{i+1}.""" vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix)) _lowerCAmelCase = [ # (stable-diffusion, HF Diffusers) ('''norm.''', '''group_norm.'''), ('''q.''', '''query.'''), ('''k.''', '''key.'''), ('''v.''', '''value.'''), ('''proj_out.''', '''proj_attn.'''), ] def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" return w.reshape(*w.shape , 1 , 1 ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Optional[int] = {k: k for k in vae_state_dict.keys()} for k, v in mapping.items(): for sd_part, hf_part in vae_conversion_map: lowerCAmelCase__ : str = v.replace(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = v for k, v in mapping.items(): if "attentions" in k: for sd_part, hf_part in vae_conversion_map_attn: lowerCAmelCase__ : Dict = v.replace(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : List[Any] = v lowerCAmelCase__ : Union[str, Any] = {v: vae_state_dict[k] for k, v in mapping.items()} lowerCAmelCase__ : Tuple = ["""q""", """k""", """v""", """proj_out"""] for k, v in new_state_dict.items(): for weight_name in weights_to_convert: if f"""mid.attn_1.{weight_name}.weight""" in k: print(f"""Reshaping {k} for SD format""" ) lowerCAmelCase__ : Optional[int] = reshape_weight_for_sd(UpperCamelCase ) return new_state_dict # =========================# # Text Encoder Conversion # # =========================# _lowerCAmelCase = [ # (stable-diffusion, HF Diffusers) ('''resblocks.''', '''text_model.encoder.layers.'''), ('''ln_1''', '''layer_norm1'''), ('''ln_2''', '''layer_norm2'''), ('''.c_fc.''', '''.fc1.'''), ('''.c_proj.''', '''.fc2.'''), ('''.attn''', '''.self_attn'''), ('''ln_final.''', '''transformer.text_model.final_layer_norm.'''), ('''token_embedding.weight''', '''transformer.text_model.embeddings.token_embedding.weight'''), ('''positional_embedding''', '''transformer.text_model.embeddings.position_embedding.weight'''), ] _lowerCAmelCase = {re.escape(x[1]): x[0] for x in textenc_conversion_lst} _lowerCAmelCase = re.compile('''|'''.join(protected.keys())) # Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp _lowerCAmelCase = {'''q''': 0, '''k''': 1, '''v''': 2} def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Optional[Any] = {} lowerCAmelCase__ : int = {} lowerCAmelCase__ : List[Any] = {} for k, v in text_enc_dict.items(): if ( k.endswith(""".self_attn.q_proj.weight""" ) or k.endswith(""".self_attn.k_proj.weight""" ) or k.endswith(""".self_attn.v_proj.weight""" ) ): lowerCAmelCase__ : Optional[int] = k[: -len(""".q_proj.weight""" )] lowerCAmelCase__ : Tuple = k[-len("""q_proj.weight""" )] if k_pre not in capture_qkv_weight: lowerCAmelCase__ : List[Any] = [None, None, None] lowerCAmelCase__ : Dict = v continue if ( k.endswith(""".self_attn.q_proj.bias""" ) or k.endswith(""".self_attn.k_proj.bias""" ) or k.endswith(""".self_attn.v_proj.bias""" ) ): lowerCAmelCase__ : str = k[: -len(""".q_proj.bias""" )] lowerCAmelCase__ : List[str] = k[-len("""q_proj.bias""" )] if k_pre not in capture_qkv_bias: lowerCAmelCase__ : Union[str, Any] = [None, None, None] lowerCAmelCase__ : Any = v continue lowerCAmelCase__ : Dict = textenc_pattern.sub(lambda UpperCamelCase : protected[re.escape(m.group(0 ) )] , UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = v for k_pre, tensors in capture_qkv_weight.items(): if None in tensors: raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" ) lowerCAmelCase__ : Any = textenc_pattern.sub(lambda UpperCamelCase : protected[re.escape(m.group(0 ) )] , UpperCamelCase ) lowerCAmelCase__ : Tuple = torch.cat(UpperCamelCase ) for k_pre, tensors in capture_qkv_bias.items(): if None in tensors: raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" ) lowerCAmelCase__ : str = textenc_pattern.sub(lambda UpperCamelCase : protected[re.escape(m.group(0 ) )] , UpperCamelCase ) lowerCAmelCase__ : List[Any] = torch.cat(UpperCamelCase ) return new_state_dict def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" return text_enc_dict if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''') parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''') parser.add_argument( '''--use_safetensors''', action='''store_true''', help='''Save weights use safetensors, default is ckpt.''' ) _lowerCAmelCase = parser.parse_args() assert args.model_path is not None, "Must provide a model path!" assert args.checkpoint_path is not None, "Must provide a checkpoint path!" # Path for safetensors _lowerCAmelCase = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.safetensors''') _lowerCAmelCase = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.safetensors''') _lowerCAmelCase = osp.join(args.model_path, '''text_encoder''', '''model.safetensors''') # Load models from safetensors if it exists, if it doesn't pytorch if osp.exists(unet_path): _lowerCAmelCase = load_file(unet_path, device='''cpu''') else: _lowerCAmelCase = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.bin''') _lowerCAmelCase = torch.load(unet_path, map_location='''cpu''') if osp.exists(vae_path): _lowerCAmelCase = load_file(vae_path, device='''cpu''') else: _lowerCAmelCase = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.bin''') _lowerCAmelCase = torch.load(vae_path, map_location='''cpu''') if osp.exists(text_enc_path): _lowerCAmelCase = load_file(text_enc_path, device='''cpu''') else: _lowerCAmelCase = osp.join(args.model_path, '''text_encoder''', '''pytorch_model.bin''') _lowerCAmelCase = torch.load(text_enc_path, map_location='''cpu''') # Convert the UNet model _lowerCAmelCase = convert_unet_state_dict(unet_state_dict) _lowerCAmelCase = {'''model.diffusion_model.''' + k: v for k, v in unet_state_dict.items()} # Convert the VAE model _lowerCAmelCase = convert_vae_state_dict(vae_state_dict) _lowerCAmelCase = {'''first_stage_model.''' + k: v for k, v in vae_state_dict.items()} # Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper _lowerCAmelCase = '''text_model.encoder.layers.22.layer_norm2.bias''' in text_enc_dict if is_vaa_model: # Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm _lowerCAmelCase = {'''transformer.''' + k: v for k, v in text_enc_dict.items()} _lowerCAmelCase = convert_text_enc_state_dict_vaa(text_enc_dict) _lowerCAmelCase = {'''cond_stage_model.model.''' + k: v for k, v in text_enc_dict.items()} else: _lowerCAmelCase = convert_text_enc_state_dict(text_enc_dict) _lowerCAmelCase = {'''cond_stage_model.transformer.''' + k: v for k, v in text_enc_dict.items()} # Put together new checkpoint _lowerCAmelCase = {**unet_state_dict, **vae_state_dict, **text_enc_dict} if args.half: _lowerCAmelCase = {k: v.half() for k, v in state_dict.items()} if args.use_safetensors: save_file(state_dict, args.checkpoint_path) else: _lowerCAmelCase = {'''state_dict''': state_dict} torch.save(state_dict, args.checkpoint_path)
37
1
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_OBJECT_DETECTION_MAPPING, AutoFeatureExtractor, AutoModelForObjectDetection, ObjectDetectionPipeline, is_vision_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_pytesseract, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class lowerCAmelCase_: '''simple docstring''' @staticmethod def UpperCAmelCase_ ( *__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple: pass @is_pipeline_test @require_vision @require_timm @require_torch class lowerCAmelCase_( unittest.TestCase ): '''simple docstring''' __lowercase : Tuple = MODEL_FOR_OBJECT_DETECTION_MAPPING def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Dict: lowerCAmelCase__ : Optional[int] = ObjectDetectionPipeline(model=__UpperCAmelCase ,image_processor=__UpperCAmelCase ) return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"] def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Tuple: lowerCAmelCase__ : Any = object_detector("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ,threshold=0.0 ) self.assertGreater(len(__UpperCAmelCase ) ,0 ) for detected_object in outputs: self.assertEqual( __UpperCAmelCase ,{ """score""": ANY(__UpperCAmelCase ), """label""": ANY(__UpperCAmelCase ), """box""": {"""xmin""": ANY(__UpperCAmelCase ), """ymin""": ANY(__UpperCAmelCase ), """xmax""": ANY(__UpperCAmelCase ), """ymax""": ANY(__UpperCAmelCase )}, } ,) import datasets lowerCAmelCase__ : Any = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" ,"""image""" ,split="""test""" ) lowerCAmelCase__ : List[str] = [ Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ), """http://images.cocodataset.org/val2017/000000039769.jpg""", # RGBA dataset[0]["""file"""], # LA dataset[1]["""file"""], # L dataset[2]["""file"""], ] lowerCAmelCase__ : List[str] = object_detector(__UpperCAmelCase ,threshold=0.0 ) self.assertEqual(len(__UpperCAmelCase ) ,len(__UpperCAmelCase ) ) for outputs in batch_outputs: self.assertGreater(len(__UpperCAmelCase ) ,0 ) for detected_object in outputs: self.assertEqual( __UpperCAmelCase ,{ """score""": ANY(__UpperCAmelCase ), """label""": ANY(__UpperCAmelCase ), """box""": {"""xmin""": ANY(__UpperCAmelCase ), """ymin""": ANY(__UpperCAmelCase ), """xmax""": ANY(__UpperCAmelCase ), """ymax""": ANY(__UpperCAmelCase )}, } ,) @require_tf @unittest.skip("""Object detection not implemented in TF""" ) def UpperCAmelCase_ ( self ) -> Dict: pass @require_torch def UpperCAmelCase_ ( self ) -> Dict: lowerCAmelCase__ : str = """hf-internal-testing/tiny-detr-mobilenetsv3""" lowerCAmelCase__ : List[Any] = AutoModelForObjectDetection.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ : str = ObjectDetectionPipeline(model=__UpperCAmelCase ,feature_extractor=__UpperCAmelCase ) lowerCAmelCase__ : Tuple = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" ,threshold=0.0 ) self.assertEqual( nested_simplify(__UpperCAmelCase ,decimals=4 ) ,[ {"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}}, {"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}}, ] ,) lowerCAmelCase__ : List[Any] = object_detector( [ """http://images.cocodataset.org/val2017/000000039769.jpg""", """http://images.cocodataset.org/val2017/000000039769.jpg""", ] ,threshold=0.0 ,) self.assertEqual( nested_simplify(__UpperCAmelCase ,decimals=4 ) ,[ [ {"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}}, {"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}}, ], [ {"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}}, {"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}}, ], ] ,) @require_torch @slow def UpperCAmelCase_ ( self ) -> List[str]: lowerCAmelCase__ : Dict = """facebook/detr-resnet-50""" lowerCAmelCase__ : int = AutoModelForObjectDetection.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ : Any = AutoFeatureExtractor.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ : int = ObjectDetectionPipeline(model=__UpperCAmelCase ,feature_extractor=__UpperCAmelCase ) lowerCAmelCase__ : int = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" ) self.assertEqual( nested_simplify(__UpperCAmelCase ,decimals=4 ) ,[ {"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}}, {"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}}, {"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}}, {"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ] ,) lowerCAmelCase__ : Optional[int] = object_detector( [ """http://images.cocodataset.org/val2017/000000039769.jpg""", """http://images.cocodataset.org/val2017/000000039769.jpg""", ] ) self.assertEqual( nested_simplify(__UpperCAmelCase ,decimals=4 ) ,[ [ {"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}}, {"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}}, {"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}}, {"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ], [ {"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}}, {"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}}, {"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}}, {"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ], ] ,) @require_torch @slow def UpperCAmelCase_ ( self ) -> List[Any]: lowerCAmelCase__ : List[Any] = """facebook/detr-resnet-50""" lowerCAmelCase__ : Optional[int] = pipeline("""object-detection""" ,model=__UpperCAmelCase ) lowerCAmelCase__ : Dict = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" ) self.assertEqual( nested_simplify(__UpperCAmelCase ,decimals=4 ) ,[ {"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}}, {"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}}, {"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}}, {"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ] ,) lowerCAmelCase__ : Optional[Any] = object_detector( [ """http://images.cocodataset.org/val2017/000000039769.jpg""", """http://images.cocodataset.org/val2017/000000039769.jpg""", ] ) self.assertEqual( nested_simplify(__UpperCAmelCase ,decimals=4 ) ,[ [ {"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}}, {"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}}, {"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}}, {"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ], [ {"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}}, {"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}}, {"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}}, {"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ], ] ,) @require_torch @slow def UpperCAmelCase_ ( self ) -> int: lowerCAmelCase__ : Optional[int] = 0.9_9_8_5 lowerCAmelCase__ : Optional[Any] = """facebook/detr-resnet-50""" lowerCAmelCase__ : Optional[int] = pipeline("""object-detection""" ,model=__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" ,threshold=__UpperCAmelCase ) self.assertEqual( nested_simplify(__UpperCAmelCase ,decimals=4 ) ,[ {"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ] ,) @require_torch @require_pytesseract @slow def UpperCAmelCase_ ( self ) -> Union[str, Any]: lowerCAmelCase__ : int = """Narsil/layoutlmv3-finetuned-funsd""" lowerCAmelCase__ : Union[str, Any] = 0.9_9_9_3 lowerCAmelCase__ : int = pipeline("""object-detection""" ,model=__UpperCAmelCase ,threshold=__UpperCAmelCase ) lowerCAmelCase__ : Any = object_detector( """https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png""" ) self.assertEqual( nested_simplify(__UpperCAmelCase ,decimals=4 ) ,[ {"""score""": 0.9_9_9_3, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}}, {"""score""": 0.9_9_9_3, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}}, ] ,)
37
'''simple docstring''' import os from bleurt import score # From: git+https://github.com/google-research/bleurt.git import datasets _lowerCAmelCase = datasets.logging.get_logger(__name__) _lowerCAmelCase = '''\ @inproceedings{bleurt, title={BLEURT: Learning Robust Metrics for Text Generation}, author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh}, booktitle={ACL}, year={2020}, url={https://arxiv.org/abs/2004.04696} } ''' _lowerCAmelCase = '''\ BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018) and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune it for your specific application (the latter is expected to perform better). See the project\'s README at https://github.com/google-research/bleurt#readme for more information. ''' _lowerCAmelCase = ''' BLEURT score. Args: `predictions` (list of str): prediction/candidate sentences `references` (list of str): reference sentences `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None. Returns: \'scores\': List of scores. Examples: >>> predictions = ["hello there", "general kenobi"] >>> references = ["hello there", "general kenobi"] >>> bleurt = datasets.load_metric("bleurt") >>> results = bleurt.compute(predictions=predictions, references=references) >>> print([round(v, 2) for v in results["scores"]]) [1.03, 1.04] ''' _lowerCAmelCase = { '''bleurt-tiny-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip''', '''bleurt-tiny-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip''', '''bleurt-base-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip''', '''bleurt-base-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip''', '''bleurt-large-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip''', '''bleurt-large-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip''', '''BLEURT-20-D3''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip''', '''BLEURT-20-D6''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip''', '''BLEURT-20-D12''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip''', '''BLEURT-20''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip''', } @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_( datasets.Metric ): '''simple docstring''' def UpperCAmelCase_ ( self ) -> Union[str, Any]: return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,homepage="""https://github.com/google-research/bleurt""" ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { """predictions""": datasets.Value("""string""" ,id="""sequence""" ), """references""": datasets.Value("""string""" ,id="""sequence""" ), } ) ,codebase_urls=["""https://github.com/google-research/bleurt"""] ,reference_urls=["""https://github.com/google-research/bleurt""", """https://arxiv.org/abs/2004.04696"""] ,) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Tuple: # check that config name specifies a valid BLEURT model if self.config_name == "default": logger.warning( """Using default BLEURT-Base checkpoint for sequence maximum length 128. """ """You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512').""" ) lowerCAmelCase__ : str = """bleurt-base-128""" if self.config_name.lower() in CHECKPOINT_URLS: lowerCAmelCase__ : Union[str, Any] = self.config_name.lower() elif self.config_name.upper() in CHECKPOINT_URLS: lowerCAmelCase__ : List[Any] = self.config_name.upper() else: raise KeyError( F"""{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}""" ) # download the model checkpoint specified by self.config_name and set up the scorer lowerCAmelCase__ : int = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] ) lowerCAmelCase__ : Dict = score.BleurtScorer(os.path.join(__UpperCAmelCase ,__UpperCAmelCase ) ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Union[str, Any]: lowerCAmelCase__ : Union[str, Any] = self.scorer.score(references=__UpperCAmelCase ,candidates=__UpperCAmelCase ) return {"scores": scores}
37
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { '''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''', } class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : List[str] = '''lxmert''' __lowercase : str = {} def __init__( self ,__UpperCAmelCase=3_0522 ,__UpperCAmelCase=768 ,__UpperCAmelCase=12 ,__UpperCAmelCase=9500 ,__UpperCAmelCase=1600 ,__UpperCAmelCase=400 ,__UpperCAmelCase=3072 ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=512 ,__UpperCAmelCase=2 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=1E-12 ,__UpperCAmelCase=9 ,__UpperCAmelCase=5 ,__UpperCAmelCase=5 ,__UpperCAmelCase=2048 ,__UpperCAmelCase=4 ,__UpperCAmelCase=6.6_7 ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,**__UpperCAmelCase ,) -> Any: lowerCAmelCase__ : str = vocab_size lowerCAmelCase__ : int = hidden_size lowerCAmelCase__ : List[str] = num_attention_heads lowerCAmelCase__ : Tuple = hidden_act lowerCAmelCase__ : List[str] = intermediate_size lowerCAmelCase__ : Dict = hidden_dropout_prob lowerCAmelCase__ : Optional[Any] = attention_probs_dropout_prob lowerCAmelCase__ : Optional[int] = max_position_embeddings lowerCAmelCase__ : Optional[Any] = type_vocab_size lowerCAmelCase__ : Tuple = initializer_range lowerCAmelCase__ : Dict = layer_norm_eps lowerCAmelCase__ : Union[str, Any] = num_qa_labels lowerCAmelCase__ : Optional[int] = num_object_labels lowerCAmelCase__ : Dict = num_attr_labels lowerCAmelCase__ : Tuple = l_layers lowerCAmelCase__ : str = x_layers lowerCAmelCase__ : Any = r_layers lowerCAmelCase__ : Dict = visual_feat_dim lowerCAmelCase__ : Union[str, Any] = visual_pos_dim lowerCAmelCase__ : Union[str, Any] = visual_loss_normalizer lowerCAmelCase__ : Optional[Any] = task_matched lowerCAmelCase__ : Any = task_mask_lm lowerCAmelCase__ : Union[str, Any] = task_obj_predict lowerCAmelCase__ : List[str] = task_qa lowerCAmelCase__ : Any = visual_obj_loss lowerCAmelCase__ : Dict = visual_attr_loss lowerCAmelCase__ : List[str] = visual_feat_loss lowerCAmelCase__ : Optional[Any] = {"""vision""": r_layers, """cross_encoder""": x_layers, """language""": l_layers} super().__init__(**__UpperCAmelCase )
37
'''simple docstring''' import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch _lowerCAmelCase = logging.get_logger(__name__) class lowerCAmelCase_: '''simple docstring''' def __init__( self ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ) -> str: if not conversation_id: lowerCAmelCase__ : List[str] = uuid.uuida() if past_user_inputs is None: lowerCAmelCase__ : List[Any] = [] if generated_responses is None: lowerCAmelCase__ : str = [] lowerCAmelCase__ : uuid.UUID = conversation_id lowerCAmelCase__ : List[str] = past_user_inputs lowerCAmelCase__ : List[str] = generated_responses lowerCAmelCase__ : Optional[str] = text def __eq__( self ,__UpperCAmelCase ) -> Dict: if not isinstance(__UpperCAmelCase ,__UpperCAmelCase ): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = False ) -> Optional[Any]: if self.new_user_input: if overwrite: logger.warning( F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """ F"""with: \"{text}\".""" ) lowerCAmelCase__ : Optional[int] = text else: logger.warning( F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """ F"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" ) else: lowerCAmelCase__ : Optional[Any] = text def UpperCAmelCase_ ( self ) -> List[Any]: if self.new_user_input: self.past_user_inputs.append(self.new_user_input ) lowerCAmelCase__ : Union[str, Any] = None def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Tuple: self.generated_responses.append(__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> List[str]: for user_input, generated_response in zip(self.past_user_inputs ,self.generated_responses ): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__( self ) -> Tuple: lowerCAmelCase__ : Tuple = F"""Conversation id: {self.uuid} \n""" for is_user, text in self.iter_texts(): lowerCAmelCase__ : Any = """user""" if is_user else """bot""" output += F"""{name} >> {text} \n""" return output @add_end_docstrings( SCREAMING_SNAKE_CASE_ , R''' min_length_for_response (`int`, *optional*, defaults to 32): The minimum length (in number of tokens) for a response. minimum_tokens (`int`, *optional*, defaults to 10): The minimum length of tokens to leave for a response. ''' , ) class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple: super().__init__(*__UpperCAmelCase ,**__UpperCAmelCase ) if self.tokenizer.pad_token_id is None: lowerCAmelCase__ : Tuple = self.tokenizer.eos_token def UpperCAmelCase_ ( self ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,**__UpperCAmelCase ) -> Optional[int]: lowerCAmelCase__ : List[Any] = {} lowerCAmelCase__ : Optional[int] = {} lowerCAmelCase__ : List[str] = {} if min_length_for_response is not None: lowerCAmelCase__ : Any = min_length_for_response if minimum_tokens is not None: lowerCAmelCase__ : Optional[int] = minimum_tokens if "max_length" in generate_kwargs: lowerCAmelCase__ : Optional[Any] = generate_kwargs["""max_length"""] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: lowerCAmelCase__ : int = clean_up_tokenization_spaces if generate_kwargs: forward_params.update(__UpperCAmelCase ) return preprocess_params, forward_params, postprocess_params def __call__( self ,__UpperCAmelCase ,__UpperCAmelCase=0 ,**__UpperCAmelCase ) -> List[str]: lowerCAmelCase__ : Optional[int] = super().__call__(__UpperCAmelCase ,num_workers=__UpperCAmelCase ,**__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) and len(__UpperCAmelCase ) == 1: return outputs[0] return outputs def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=32 ) -> Dict[str, Any]: if not isinstance(__UpperCAmelCase ,__UpperCAmelCase ): raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" ) if conversation.new_user_input is None: raise ValueError( F"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """ """Add user inputs with the conversation's `add_user_input` method""" ) if hasattr(self.tokenizer ,"""_build_conversation_input_ids""" ): lowerCAmelCase__ : str = self.tokenizer._build_conversation_input_ids(__UpperCAmelCase ) else: # If the tokenizer cannot handle conversations, we default to only the old version lowerCAmelCase__ : List[Any] = self._legacy_parse_and_tokenize(__UpperCAmelCase ) if self.framework == "pt": lowerCAmelCase__ : List[Any] = torch.LongTensor([input_ids] ) elif self.framework == "tf": lowerCAmelCase__ : Dict = tf.constant([input_ids] ) return {"input_ids": input_ids, "conversation": conversation} def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=10 ,**__UpperCAmelCase ) -> Dict: lowerCAmelCase__ : Optional[Any] = generate_kwargs.get("""max_length""" ,self.model.config.max_length ) lowerCAmelCase__ : Optional[Any] = model_inputs["""input_ids"""].shape[1] if max_length - minimum_tokens < n: logger.warning(F"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" ) lowerCAmelCase__ : str = max_length - minimum_tokens lowerCAmelCase__ : Union[str, Any] = model_inputs["""input_ids"""][:, -trim:] if "attention_mask" in model_inputs: lowerCAmelCase__ : Tuple = model_inputs["""attention_mask"""][:, -trim:] lowerCAmelCase__ : str = model_inputs.pop("""conversation""" ) lowerCAmelCase__ : Union[str, Any] = max_length lowerCAmelCase__ : Any = self.model.generate(**__UpperCAmelCase ,**__UpperCAmelCase ) if self.model.config.is_encoder_decoder: lowerCAmelCase__ : int = 1 else: lowerCAmelCase__ : int = n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=True ) -> List[str]: lowerCAmelCase__ : Optional[int] = model_outputs["""output_ids"""] lowerCAmelCase__ : Tuple = self.tokenizer.decode( output_ids[0] ,skip_special_tokens=__UpperCAmelCase ,clean_up_tokenization_spaces=__UpperCAmelCase ,) lowerCAmelCase__ : Union[str, Any] = model_outputs["""conversation"""] conversation.mark_processed() conversation.append_response(__UpperCAmelCase ) return conversation def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Dict: lowerCAmelCase__ : Dict = self.tokenizer.eos_token_id lowerCAmelCase__ : int = [] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ) + [eos_token_id] ) else: input_ids.extend(self.tokenizer.encode(__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ) ) if len(__UpperCAmelCase ) > self.tokenizer.model_max_length: lowerCAmelCase__ : Optional[Any] = input_ids[-self.tokenizer.model_max_length :] return input_ids
37
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { '''google/pegasus-large''': '''https://huggingface.co/google/pegasus-large/resolve/main/config.json''', # See all PEGASUS models at https://huggingface.co/models?filter=pegasus } class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : int = '''pegasus''' __lowercase : Optional[int] = ['''past_key_values'''] __lowercase : Union[str, Any] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self ,__UpperCAmelCase=5_0265 ,__UpperCAmelCase=1024 ,__UpperCAmelCase=12 ,__UpperCAmelCase=4096 ,__UpperCAmelCase=16 ,__UpperCAmelCase=12 ,__UpperCAmelCase=4096 ,__UpperCAmelCase=16 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=1024 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=0 ,__UpperCAmelCase=False ,__UpperCAmelCase=0 ,__UpperCAmelCase=1 ,__UpperCAmelCase=1 ,**__UpperCAmelCase ,) -> int: lowerCAmelCase__ : Any = vocab_size lowerCAmelCase__ : str = max_position_embeddings lowerCAmelCase__ : Union[str, Any] = d_model lowerCAmelCase__ : str = encoder_ffn_dim lowerCAmelCase__ : Union[str, Any] = encoder_layers lowerCAmelCase__ : Optional[Any] = encoder_attention_heads lowerCAmelCase__ : List[Any] = decoder_ffn_dim lowerCAmelCase__ : Tuple = decoder_layers lowerCAmelCase__ : int = decoder_attention_heads lowerCAmelCase__ : List[str] = dropout lowerCAmelCase__ : List[Any] = attention_dropout lowerCAmelCase__ : int = activation_dropout lowerCAmelCase__ : List[Any] = activation_function lowerCAmelCase__ : Tuple = init_std lowerCAmelCase__ : List[Any] = encoder_layerdrop lowerCAmelCase__ : List[str] = decoder_layerdrop lowerCAmelCase__ : Dict = use_cache lowerCAmelCase__ : Optional[int] = encoder_layers lowerCAmelCase__ : Any = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=__UpperCAmelCase ,eos_token_id=__UpperCAmelCase ,is_encoder_decoder=__UpperCAmelCase ,decoder_start_token_id=__UpperCAmelCase ,forced_eos_token_id=__UpperCAmelCase ,**__UpperCAmelCase ,) @property def UpperCAmelCase_ ( self ) -> int: return self.encoder_attention_heads @property def UpperCAmelCase_ ( self ) -> int: return self.d_model
37
'''simple docstring''' from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax _lowerCAmelCase = logging.get_logger(__name__) @add_end_docstrings(SCREAMING_SNAKE_CASE_ ) class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' def __init__( self ,**__UpperCAmelCase ) -> Tuple: super().__init__(**__UpperCAmelCase ) requires_backends(self ,"""vision""" ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == """tf""" else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self ,__UpperCAmelCase ,**__UpperCAmelCase ) -> str: return super().__call__(__UpperCAmelCase ,**__UpperCAmelCase ) def UpperCAmelCase_ ( self ,**__UpperCAmelCase ) -> str: lowerCAmelCase__ : List[Any] = {} if "candidate_labels" in kwargs: lowerCAmelCase__ : int = kwargs["""candidate_labels"""] if "hypothesis_template" in kwargs: lowerCAmelCase__ : Optional[int] = kwargs["""hypothesis_template"""] return preprocess_params, {}, {} def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=None ,__UpperCAmelCase="This is a photo of {}." ) -> int: lowerCAmelCase__ : str = load_image(__UpperCAmelCase ) lowerCAmelCase__ : Dict = self.image_processor(images=[image] ,return_tensors=self.framework ) lowerCAmelCase__ : List[Any] = candidate_labels lowerCAmelCase__ : List[str] = [hypothesis_template.format(__UpperCAmelCase ) for x in candidate_labels] lowerCAmelCase__ : Optional[Any] = self.tokenizer(__UpperCAmelCase ,return_tensors=self.framework ,padding=__UpperCAmelCase ) lowerCAmelCase__ : Tuple = [text_inputs] return inputs def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Union[str, Any]: lowerCAmelCase__ : Tuple = model_inputs.pop("""candidate_labels""" ) lowerCAmelCase__ : Union[str, Any] = model_inputs.pop("""text_inputs""" ) if isinstance(text_inputs[0] ,__UpperCAmelCase ): lowerCAmelCase__ : int = text_inputs[0] else: # Batching case. lowerCAmelCase__ : Dict = text_inputs[0][0] lowerCAmelCase__ : Any = self.model(**__UpperCAmelCase ,**__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = { """candidate_labels""": candidate_labels, """logits""": outputs.logits_per_image, } return model_outputs def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Any: lowerCAmelCase__ : Union[str, Any] = model_outputs.pop("""candidate_labels""" ) lowerCAmelCase__ : List[str] = model_outputs["""logits"""][0] if self.framework == "pt": lowerCAmelCase__ : List[str] = logits.softmax(dim=-1 ).squeeze(-1 ) lowerCAmelCase__ : Optional[Any] = probs.tolist() if not isinstance(__UpperCAmelCase ,__UpperCAmelCase ): lowerCAmelCase__ : Dict = [scores] elif self.framework == "tf": lowerCAmelCase__ : Any = stable_softmax(__UpperCAmelCase ,axis=-1 ) lowerCAmelCase__ : List[Any] = probs.numpy().tolist() else: raise ValueError(F"""Unsupported framework: {self.framework}""" ) lowerCAmelCase__ : Tuple = [ {"""score""": score, """label""": candidate_label} for score, candidate_label in sorted(zip(__UpperCAmelCase ,__UpperCAmelCase ) ,key=lambda __UpperCAmelCase : -x[0] ) ] return result
37
1
'''simple docstring''' import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , ) @pytest.mark.usefixtures('''sm_env''' ) @parameterized_class( [ { '''framework''': '''pytorch''', '''script''': '''run_glue.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.p3.16xlarge''', '''results''': {'''train_runtime''': 6_5_0, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6}, }, { '''framework''': '''pytorch''', '''script''': '''run_ddp.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.p3.16xlarge''', '''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6}, }, { '''framework''': '''tensorflow''', '''script''': '''run_tf_dist.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.p3.16xlarge''', '''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.6, '''eval_loss''': 0.7}, }, ] ) class lowerCAmelCase_( unittest.TestCase ): '''simple docstring''' def UpperCAmelCase_ ( self ) -> Optional[Any]: if self.framework == "pytorch": subprocess.run( F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() ,encoding="""utf-8""" ,check=__UpperCAmelCase ,) assert hasattr(self ,"""env""" ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]: lowerCAmelCase__ : Optional[int] = F"""{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}""" # distributed data settings lowerCAmelCase__ : Any = {"""smdistributed""": {"""dataparallel""": {"""enabled""": True}}} if self.script != """run_ddp.py""" else None # creates estimator return HuggingFace( entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=__UpperCAmelCase ,instance_count=__UpperCAmelCase ,instance_type=self.instance_type ,debugger_hook_config=__UpperCAmelCase ,hyperparameters={**self.env.distributed_hyperparameters, """model_name_or_path""": self.model_name_or_path} ,metric_definitions=self.env.metric_definitions ,distribution=__UpperCAmelCase ,py_version="""py36""" ,) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]: TrainingJobAnalytics(__UpperCAmelCase ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" ) @parameterized.expand([(2,)] ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Any: # create estimator lowerCAmelCase__ : List[Any] = self.create_estimator(__UpperCAmelCase ) # run training estimator.fit() # result dataframe lowerCAmelCase__ : Any = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis lowerCAmelCase__ : int = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] ) lowerCAmelCase__ : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping lowerCAmelCase__ : List[str] = ( Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" ,99_9999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy ) assert all(t <= self.results["""eval_loss"""] for t in eval_loss ) # dump tests result into json file to share in PR with open(F"""{estimator.latest_training_job.name}.json""" ,"""w""" ) as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} ,__UpperCAmelCase )
37
'''simple docstring''' import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , ) @pytest.mark.usefixtures('''sm_env''' ) @parameterized_class( [ { '''framework''': '''pytorch''', '''script''': '''run_glue.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.p3.16xlarge''', '''results''': {'''train_runtime''': 6_5_0, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6}, }, { '''framework''': '''pytorch''', '''script''': '''run_ddp.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.p3.16xlarge''', '''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6}, }, { '''framework''': '''tensorflow''', '''script''': '''run_tf_dist.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.p3.16xlarge''', '''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.6, '''eval_loss''': 0.7}, }, ] ) class lowerCAmelCase_( unittest.TestCase ): '''simple docstring''' def UpperCAmelCase_ ( self ) -> Optional[Any]: if self.framework == "pytorch": subprocess.run( F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() ,encoding="""utf-8""" ,check=__UpperCAmelCase ,) assert hasattr(self ,"""env""" ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]: lowerCAmelCase__ : Optional[int] = F"""{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}""" # distributed data settings lowerCAmelCase__ : Any = {"""smdistributed""": {"""dataparallel""": {"""enabled""": True}}} if self.script != """run_ddp.py""" else None # creates estimator return HuggingFace( entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=__UpperCAmelCase ,instance_count=__UpperCAmelCase ,instance_type=self.instance_type ,debugger_hook_config=__UpperCAmelCase ,hyperparameters={**self.env.distributed_hyperparameters, """model_name_or_path""": self.model_name_or_path} ,metric_definitions=self.env.metric_definitions ,distribution=__UpperCAmelCase ,py_version="""py36""" ,) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]: TrainingJobAnalytics(__UpperCAmelCase ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" ) @parameterized.expand([(2,)] ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Any: # create estimator lowerCAmelCase__ : List[Any] = self.create_estimator(__UpperCAmelCase ) # run training estimator.fit() # result dataframe lowerCAmelCase__ : Any = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis lowerCAmelCase__ : int = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] ) lowerCAmelCase__ : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping lowerCAmelCase__ : List[str] = ( Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" ,99_9999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy ) assert all(t <= self.results["""eval_loss"""] for t in eval_loss ) # dump tests result into json file to share in PR with open(F"""{estimator.latest_training_job.name}.json""" ,"""w""" ) as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} ,__UpperCAmelCase )
37
1