code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
from __future__ import annotations import unittest import numpy as np from transformers import LayoutLMConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.layoutlm.modeling_tf_layoutlm import ( TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMForMaskedLM, TFLayoutLMForQuestionAnswering, TFLayoutLMForSequenceClassification, TFLayoutLMForTokenClassification, TFLayoutLMModel, ) class _snake_case : def __init__( self : Optional[int], __lowercase : Any, __lowercase : Optional[int]=13, __lowercase : Tuple=7, __lowercase : List[Any]=True, __lowercase : int=True, __lowercase : Dict=True, __lowercase : List[Any]=True, __lowercase : Optional[int]=99, __lowercase : Tuple=32, __lowercase : Dict=2, __lowercase : Any=4, __lowercase : Dict=37, __lowercase : List[Any]="gelu", __lowercase : str=0.1, __lowercase : Optional[int]=0.1, __lowercase : int=512, __lowercase : Any=16, __lowercase : List[str]=2, __lowercase : Union[str, Any]=0.02, __lowercase : str=3, __lowercase : str=4, __lowercase : Tuple=None, __lowercase : List[str]=1000, ): lowercase__ = parent lowercase__ = batch_size lowercase__ = seq_length lowercase__ = is_training lowercase__ = use_input_mask lowercase__ = use_token_type_ids lowercase__ = use_labels lowercase__ = vocab_size lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = intermediate_size lowercase__ = hidden_act lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = max_position_embeddings lowercase__ = type_vocab_size lowercase__ = type_sequence_label_size lowercase__ = initializer_range lowercase__ = num_labels lowercase__ = num_choices lowercase__ = scope lowercase__ = range_bbox def A__ ( self : Dict ): lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) # convert bbox to numpy since TF does not support item assignment lowercase__ = ids_tensor([self.batch_size, self.seq_length, 4], self.range_bbox ).numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: lowercase__ = bbox[i, j, 3] lowercase__ = bbox[i, j, 1] lowercase__ = t if bbox[i, j, 2] < bbox[i, j, 0]: lowercase__ = bbox[i, j, 2] lowercase__ = bbox[i, j, 0] lowercase__ = t lowercase__ = tf.convert_to_tensor(__lowercase ) lowercase__ = None if self.use_input_mask: lowercase__ = random_attention_mask([self.batch_size, self.seq_length] ) lowercase__ = None if self.use_token_type_ids: lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size ) lowercase__ = None lowercase__ = None lowercase__ = None if self.use_labels: lowercase__ = ids_tensor([self.batch_size], self.type_sequence_label_size ) lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.num_labels ) lowercase__ = ids_tensor([self.batch_size], self.num_choices ) lowercase__ = LayoutLMConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, ) return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def A__ ( self : List[Any], __lowercase : List[Any], __lowercase : Any, __lowercase : Optional[int], __lowercase : Optional[int], __lowercase : Union[str, Any], __lowercase : Optional[Any], __lowercase : List[Any], __lowercase : List[str] ): lowercase__ = TFLayoutLMModel(config=__lowercase ) lowercase__ = model(__lowercase, __lowercase, attention_mask=__lowercase, token_type_ids=__lowercase ) lowercase__ = model(__lowercase, __lowercase, token_type_ids=__lowercase ) lowercase__ = model(__lowercase, __lowercase ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) ) def A__ ( self : List[Any], __lowercase : Union[str, Any], __lowercase : int, __lowercase : Union[str, Any], __lowercase : Tuple, __lowercase : Optional[Any], __lowercase : List[Any], __lowercase : str, __lowercase : List[Any] ): lowercase__ = TFLayoutLMForMaskedLM(config=__lowercase ) lowercase__ = model(__lowercase, __lowercase, attention_mask=__lowercase, token_type_ids=__lowercase, labels=__lowercase ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) ) def A__ ( self : str, __lowercase : int, __lowercase : int, __lowercase : Optional[Any], __lowercase : int, __lowercase : List[str], __lowercase : List[Any], __lowercase : int, __lowercase : str ): lowercase__ = self.num_labels lowercase__ = TFLayoutLMForSequenceClassification(config=__lowercase ) lowercase__ = model(__lowercase, __lowercase, attention_mask=__lowercase, token_type_ids=__lowercase ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def A__ ( self : Any, __lowercase : List[str], __lowercase : List[Any], __lowercase : str, __lowercase : int, __lowercase : Tuple, __lowercase : Tuple, __lowercase : Any, __lowercase : Dict ): lowercase__ = self.num_labels lowercase__ = TFLayoutLMForTokenClassification(config=__lowercase ) lowercase__ = model(__lowercase, __lowercase, attention_mask=__lowercase, token_type_ids=__lowercase, labels=__lowercase ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) ) def A__ ( self : Union[str, Any], __lowercase : int, __lowercase : List[str], __lowercase : int, __lowercase : Optional[int], __lowercase : Dict, __lowercase : Tuple, __lowercase : Any, __lowercase : int ): lowercase__ = TFLayoutLMForQuestionAnswering(config=__lowercase ) lowercase__ = model(__lowercase, __lowercase, attention_mask=__lowercase, token_type_ids=__lowercase ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) ) def A__ ( self : int ): lowercase__ = self.prepare_config_and_inputs() ( ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ) = config_and_inputs lowercase__ = { "input_ids": input_ids, "bbox": bbox, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_tf class _snake_case ( lowercase__ , lowercase__ , unittest.TestCase): UpperCamelCase__ : Optional[Any] =( ( TFLayoutLMModel, TFLayoutLMForMaskedLM, TFLayoutLMForTokenClassification, TFLayoutLMForSequenceClassification, TFLayoutLMForQuestionAnswering, ) if is_tf_available() else () ) UpperCamelCase__ : Tuple =( { """feature-extraction""": TFLayoutLMModel, """fill-mask""": TFLayoutLMForMaskedLM, """text-classification""": TFLayoutLMForSequenceClassification, """token-classification""": TFLayoutLMForTokenClassification, """zero-shot""": TFLayoutLMForSequenceClassification, } if is_tf_available() else {} ) UpperCamelCase__ : int =False UpperCamelCase__ : Union[str, Any] =True UpperCamelCase__ : str =1_0 def A__ ( self : Optional[Any] ): lowercase__ = TFLayoutLMModelTester(self ) lowercase__ = ConfigTester(self, config_class=__lowercase, hidden_size=37 ) def A__ ( self : List[str] ): self.config_tester.run_common_tests() def A__ ( self : List[str] ): lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowercase ) def A__ ( self : Tuple ): lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__lowercase ) def A__ ( self : Dict ): lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__lowercase ) def A__ ( self : Tuple ): lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__lowercase ) def A__ ( self : Optional[int] ): lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__lowercase ) @slow def A__ ( self : Optional[Any] ): for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ = TFLayoutLMModel.from_pretrained(__lowercase ) self.assertIsNotNone(__lowercase ) @unittest.skip("Onnx compliancy broke with TF 2.10" ) def A__ ( self : Dict ): pass def __lowerCAmelCase ( ): # Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on: # fmt: off lowercase__ = tf.convert_to_tensor([[101,1019,1014,1016,1037,1_2849,4747,1004,1_4246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,1_1300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,1_9274,2772,6205,2_7814,1_6147,1_6147,4343,2047,1_0283,1_0969,1_4389,1012,2338,102]] ) # noqa: E231 lowercase__ = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231 lowercase__ = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231 lowercase__ = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231 # these are sequence labels (i.e. at the token level) lowercase__ = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231 # fmt: on return input_ids, attention_mask, bbox, token_type_ids, labels @require_tf class _snake_case ( unittest.TestCase): @slow def A__ ( self : str ): lowercase__ = TFLayoutLMModel.from_pretrained("microsoft/layoutlm-base-uncased" ) lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = prepare_layoutlm_batch_inputs() # forward pass lowercase__ = model(input_ids=__lowercase, bbox=__lowercase, attention_mask=__lowercase, token_type_ids=__lowercase ) # test the sequence output on [0, :3, :3] lowercase__ = tf.convert_to_tensor( [[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]], ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3], __lowercase, atol=1e-3 ) ) # test the pooled output on [1, :3] lowercase__ = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] ) self.assertTrue(np.allclose(outputs.pooler_output[1, :3], __lowercase, atol=1e-3 ) ) @slow def A__ ( self : Optional[Any] ): # initialize model with randomly initialized sequence classification head lowercase__ = TFLayoutLMForSequenceClassification.from_pretrained("microsoft/layoutlm-base-uncased", num_labels=2 ) lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = prepare_layoutlm_batch_inputs() # forward pass lowercase__ = model( input_ids=__lowercase, bbox=__lowercase, attention_mask=__lowercase, token_type_ids=__lowercase, labels=tf.convert_to_tensor([1, 1] ), ) # test whether we get a loss as a scalar lowercase__ = outputs.loss lowercase__ = (2,) self.assertEqual(loss.shape, __lowercase ) # test the shape of the logits lowercase__ = outputs.logits lowercase__ = (2, 2) self.assertEqual(logits.shape, __lowercase ) @slow def A__ ( self : List[str] ): # initialize model with randomly initialized token classification head lowercase__ = TFLayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased", num_labels=13 ) lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = prepare_layoutlm_batch_inputs() # forward pass lowercase__ = model( input_ids=__lowercase, bbox=__lowercase, attention_mask=__lowercase, token_type_ids=__lowercase, labels=__lowercase ) # test the shape of the logits lowercase__ = outputs.logits lowercase__ = tf.convert_to_tensor((2, 25, 13) ) self.assertEqual(logits.shape, __lowercase ) @slow def A__ ( self : Optional[Any] ): # initialize model with randomly initialized token classification head lowercase__ = TFLayoutLMForQuestionAnswering.from_pretrained("microsoft/layoutlm-base-uncased" ) lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = prepare_layoutlm_batch_inputs() # forward pass lowercase__ = model(input_ids=__lowercase, bbox=__lowercase, attention_mask=__lowercase, token_type_ids=__lowercase ) # test the shape of the logits lowercase__ = tf.convert_to_tensor((2, 25) ) self.assertEqual(outputs.start_logits.shape, __lowercase ) self.assertEqual(outputs.end_logits.shape, __lowercase )
37
import os import unittest from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class _snake_case ( lowercase__ , unittest.TestCase): UpperCamelCase__ : Dict =TransfoXLTokenizer UpperCamelCase__ : List[Any] =False UpperCamelCase__ : List[Any] =False def A__ ( self : Union[str, Any] ): super().setUp() lowercase__ = [ "<unk>", "[CLS]", "[SEP]", "want", "unwanted", "wa", "un", "running", ",", "low", "l", ] lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file, "w", encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) def A__ ( self : Union[str, Any], **__lowercase : Any ): lowercase__ = True return TransfoXLTokenizer.from_pretrained(self.tmpdirname, **__lowercase ) def A__ ( self : Tuple, __lowercase : Optional[int] ): lowercase__ = "<unk> UNwanted , running" lowercase__ = "<unk> unwanted, running" return input_text, output_text def A__ ( self : str ): lowercase__ = TransfoXLTokenizer(vocab_file=self.vocab_file, lower_case=__lowercase ) lowercase__ = tokenizer.tokenize("<unk> UNwanted , running" ) self.assertListEqual(__lowercase, ["<unk>", "unwanted", ",", "running"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ), [0, 4, 8, 7] ) def A__ ( self : Tuple ): lowercase__ = TransfoXLTokenizer(lower_case=__lowercase ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? " ), ["hello", "!", "how", "are", "you", "?"] ) def A__ ( self : Tuple ): lowercase__ = TransfoXLTokenizer(lower_case=__lowercase ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? " ), ["HeLLo", "!", "how", "Are", "yoU", "?"] ) def A__ ( self : str ): lowercase__ = TransfoXLTokenizer(lower_case=__lowercase ) lowercase__ = "Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?" lowercase__ = [ "Hello", "(", "bracket", ")", "and", "side", "@-@", "scrolled", "[", "and", "]", "Henry", "'s", "$", "5", "@,@", "000", "with", "3", "@.@", "34", "m", ".", "What", "'s", "up", "!", "?", ] self.assertListEqual(tokenizer.tokenize(__lowercase ), __lowercase ) self.assertEqual(tokenizer.convert_tokens_to_string(__lowercase ), __lowercase ) def A__ ( self : List[str] ): lowercase__ = self.get_tokenizer() lowercase__ = len(__lowercase ) tokenizer.add_tokens(["new1", "new2"] ) tokenizer.move_added_token("new1", 1 ) # Check that moved token is not copied (duplicate) self.assertEqual(len(__lowercase ), original_len + 2 ) # Check that token is moved to specified id self.assertEqual(tokenizer.encode("new1" ), [1] ) self.assertEqual(tokenizer.decode([1] ), "new1" )
37
1
from collections import defaultdict from math import gcd def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ = 150_0000 ): lowercase__ = defaultdict(SCREAMING_SNAKE_CASE_ ) lowercase__ = 2 while 2 * euclid_m * (euclid_m + 1) <= limit: for euclid_n in range((euclid_m % 2) + 1 , SCREAMING_SNAKE_CASE_ , 2 ): if gcd(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) > 1: continue lowercase__ = 2 * euclid_m * (euclid_m + euclid_n) for perimeter in range(SCREAMING_SNAKE_CASE_ , limit + 1 , SCREAMING_SNAKE_CASE_ ): frequencies[perimeter] += 1 euclid_m += 1 return sum(1 for frequency in frequencies.values() if frequency == 1 ) if __name__ == "__main__": print(F'{solution() = }')
37
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def __lowerCAmelCase ( ): lowercase__ = HfArgumentParser(SCREAMING_SNAKE_CASE_ ) lowercase__ = parser.parse_args_into_dataclasses()[0] lowercase__ = TensorFlowBenchmark(args=SCREAMING_SNAKE_CASE_ ) try: lowercase__ = parser.parse_args_into_dataclasses()[0] except ValueError as e: lowercase__ = "Arg --no_{0} is no longer used, please use --no-{0} instead." lowercase__ = " ".join(str(SCREAMING_SNAKE_CASE_ ).split(" " )[:-1] ) lowercase__ = "" lowercase__ = eval(str(SCREAMING_SNAKE_CASE_ ).split(" " )[-1] ) lowercase__ = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(SCREAMING_SNAKE_CASE_ ) if len(SCREAMING_SNAKE_CASE_ ) > 0: lowercase__ = full_error_msg + begin_error_msg + str(SCREAMING_SNAKE_CASE_ ) raise ValueError(SCREAMING_SNAKE_CASE_ ) benchmark.run() if __name__ == "__main__": main()
37
1
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ = 100_0000 ): lowercase__ = limit + 1 lowercase__ = [0] * limit for first_term in range(1 , SCREAMING_SNAKE_CASE_ ): for n in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ = first_term + n / first_term if common_difference % 4: # d must be divisble by 4 continue else: common_difference /= 4 if ( first_term > common_difference and first_term < 4 * common_difference ): # since x,y,z are positive integers frequency[n] += 1 # so z>0 and a>d ,also 4d<a lowercase__ = sum(1 for x in frequency[1:limit] if x == 10 ) return count if __name__ == "__main__": print(F'{solution() = }')
37
import os import re import shutil from argparse import ArgumentParser, Namespace from datasets.commands import BaseDatasetsCLICommand from datasets.utils.logging import get_logger lowercase_ = """<<<<<<< This should probably be modified because it mentions: """ lowercase_ = """======= >>>>>>> """ lowercase_ = [ """TextEncoderConfig""", """ByteTextEncoder""", """SubwordTextEncoder""", """encoder_config""", """maybe_build_from_corpus""", """manual_dir""", ] lowercase_ = [ # (pattern, replacement) # Order is important here for some replacements (r"""tfds\.core""", r"""datasets"""), (r"""tf\.io\.gfile\.GFile""", r"""open"""), (r"""tf\.([\w\d]+)""", r"""datasets.Value('\1')"""), (r"""tfds\.features\.Text\(\)""", r"""datasets.Value('string')"""), (r"""tfds\.features\.Text\(""", r"""datasets.Value('string'),"""), (r"""features\s*=\s*tfds.features.FeaturesDict\(""", r"""features=datasets.Features("""), (r"""tfds\.features\.FeaturesDict\(""", r"""dict("""), (r"""The TensorFlow Datasets Authors""", r"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""), (r"""tfds\.""", r"""datasets."""), (r"""dl_manager\.manual_dir""", r"""self.config.data_dir"""), (r"""self\.builder_config""", r"""self.config"""), ] def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): return ConvertCommand(args.tfds_path , args.datasets_directory ) class _snake_case ( lowercase__): @staticmethod def A__ ( __lowercase : ArgumentParser ): lowercase__ = parser.add_parser( "convert", help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.", ) train_parser.add_argument( "--tfds_path", type=__lowercase, required=__lowercase, help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.", ) train_parser.add_argument( "--datasets_directory", type=__lowercase, required=__lowercase, help="Path to the HuggingFace Datasets folder." ) train_parser.set_defaults(func=__lowercase ) def __init__( self : Tuple, __lowercase : str, __lowercase : str, *__lowercase : Tuple ): lowercase__ = get_logger("datasets-cli/converting" ) lowercase__ = tfds_path lowercase__ = datasets_directory def A__ ( self : Any ): if os.path.isdir(self._tfds_path ): lowercase__ = os.path.abspath(self._tfds_path ) elif os.path.isfile(self._tfds_path ): lowercase__ = os.path.dirname(self._tfds_path ) else: raise ValueError("--tfds_path is neither a directory nor a file. Please check path." ) lowercase__ = os.path.abspath(self._datasets_directory ) self._logger.info(F'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' ) lowercase__ = [] lowercase__ = [] lowercase__ = {} if os.path.isdir(self._tfds_path ): lowercase__ = os.listdir(__lowercase ) else: lowercase__ = [os.path.basename(self._tfds_path )] for f_name in file_names: self._logger.info(F'''Looking at file {f_name}''' ) lowercase__ = os.path.join(__lowercase, __lowercase ) lowercase__ = os.path.join(__lowercase, __lowercase ) if not os.path.isfile(__lowercase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name: self._logger.info("Skipping file" ) continue with open(__lowercase, encoding="utf-8" ) as f: lowercase__ = f.readlines() lowercase__ = [] lowercase__ = False lowercase__ = False lowercase__ = [] for line in lines: lowercase__ = line # Convert imports if "import tensorflow.compat.v2 as tf" in out_line: continue elif "@tfds.core" in out_line: continue elif "builder=self" in out_line: continue elif "import tensorflow_datasets.public_api as tfds" in out_line: lowercase__ = "import datasets\n" elif "import tensorflow" in out_line: # order is important here lowercase__ = "" continue elif "from absl import logging" in out_line: lowercase__ = "from datasets import logging\n" elif "getLogger" in out_line: lowercase__ = out_line.replace("getLogger", "get_logger" ) elif any(expression in out_line for expression in TO_HIGHLIGHT ): lowercase__ = True lowercase__ = list(filter(lambda __lowercase : e in out_line, __lowercase ) ) out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(__lowercase ) + "\n" ) out_lines.append(__lowercase ) out_lines.append(__lowercase ) continue else: for pattern, replacement in TO_CONVERT: lowercase__ = re.sub(__lowercase, __lowercase, __lowercase ) # Take care of saving utilities (to later move them together with main script) if "tensorflow_datasets" in out_line: lowercase__ = re.match(R"from\stensorflow_datasets.*import\s([^\.\r\n]+)", __lowercase ) tfds_imports.extend(imp.strip() for imp in match.group(1 ).split("," ) ) lowercase__ = "from . import " + match.group(1 ) # Check we have not forget anything if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line: raise ValueError(F'''Error converting {out_line.strip()}''' ) if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line: lowercase__ = True out_lines.append(__lowercase ) if is_builder or "wmt" in f_name: # We create a new directory for each dataset lowercase__ = f_name.replace(".py", "" ) lowercase__ = os.path.join(__lowercase, __lowercase ) lowercase__ = os.path.join(__lowercase, __lowercase ) os.makedirs(__lowercase, exist_ok=__lowercase ) self._logger.info(F'''Adding directory {output_dir}''' ) imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} ) else: # Utilities will be moved at the end utils_files.append(__lowercase ) if needs_manual_update: with_manual_update.append(__lowercase ) with open(__lowercase, "w", encoding="utf-8" ) as f: f.writelines(__lowercase ) self._logger.info(F'''Converted in {output_file}''' ) for utils_file in utils_files: try: lowercase__ = os.path.basename(__lowercase ) lowercase__ = imports_to_builder_map[f_name.replace(".py", "" )] self._logger.info(F'''Moving {dest_folder} to {utils_file}''' ) shutil.copy(__lowercase, __lowercase ) except KeyError: self._logger.error(F'''Cannot find destination folder for {utils_file}. Please copy manually.''' ) if with_manual_update: for file_path in with_manual_update: self._logger.warning( F'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
37
1
from __future__ import annotations from dataclasses import dataclass @dataclass class _snake_case : UpperCamelCase__ : float UpperCamelCase__ : TreeNode | None =None UpperCamelCase__ : TreeNode | None =None def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): # Validation def is_valid_tree(SCREAMING_SNAKE_CASE_ ) -> bool: if node is None: return True if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): return False try: float(node.data ) except (TypeError, ValueError): return False return is_valid_tree(node.left ) and is_valid_tree(node.right ) if not is_valid_tree(SCREAMING_SNAKE_CASE_ ): raise ValueError( "Each node should be type of TreeNode and data should be float." ) def is_binary_search_tree_recursive_check( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> bool: if node is None: return True return ( left_bound < node.data < right_bound and is_binary_search_tree_recursive_check(node.left , SCREAMING_SNAKE_CASE_ , node.data ) and is_binary_search_tree_recursive_check( node.right , node.data , SCREAMING_SNAKE_CASE_ ) ) return is_binary_search_tree_recursive_check(SCREAMING_SNAKE_CASE_ , -float("inf" ) , float("inf" ) ) if __name__ == "__main__": import doctest doctest.testmod()
37
import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer lowercase_ = logging.get_logger(__name__) lowercase_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} lowercase_ = { """vocab_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""", }, """merges_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""", }, """tokenizer_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""", }, } lowercase_ = { """allenai/led-base-16384""": 1_6384, } class _snake_case ( lowercase__): UpperCamelCase__ : int =VOCAB_FILES_NAMES UpperCamelCase__ : Any =PRETRAINED_VOCAB_FILES_MAP UpperCamelCase__ : Dict =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase__ : List[Any] =LEDTokenizer UpperCamelCase__ : Tuple =["""input_ids""", """attention_mask"""] def __init__( self : Optional[Any], __lowercase : Optional[Any]=None, __lowercase : Dict=None, __lowercase : Tuple=None, __lowercase : Union[str, Any]="replace", __lowercase : Tuple="<s>", __lowercase : Optional[Any]="</s>", __lowercase : Tuple="</s>", __lowercase : List[str]="<s>", __lowercase : Tuple="<unk>", __lowercase : Dict="<pad>", __lowercase : Dict="<mask>", __lowercase : Any=False, __lowercase : Any=True, **__lowercase : List[Any], ): super().__init__( __lowercase, __lowercase, tokenizer_file=__lowercase, errors=__lowercase, bos_token=__lowercase, eos_token=__lowercase, sep_token=__lowercase, cls_token=__lowercase, unk_token=__lowercase, pad_token=__lowercase, mask_token=__lowercase, add_prefix_space=__lowercase, trim_offsets=__lowercase, **__lowercase, ) lowercase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space", __lowercase ) != add_prefix_space: lowercase__ = getattr(__lowercase, pre_tok_state.pop("type" ) ) lowercase__ = add_prefix_space lowercase__ = pre_tok_class(**__lowercase ) lowercase__ = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` lowercase__ = "post_processor" lowercase__ = getattr(self.backend_tokenizer, __lowercase, __lowercase ) if tokenizer_component_instance: lowercase__ = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: lowercase__ = tuple(state["sep"] ) if "cls" in state: lowercase__ = tuple(state["cls"] ) lowercase__ = False if state.get("add_prefix_space", __lowercase ) != add_prefix_space: lowercase__ = add_prefix_space lowercase__ = True if state.get("trim_offsets", __lowercase ) != trim_offsets: lowercase__ = trim_offsets lowercase__ = True if changes_to_apply: lowercase__ = getattr(__lowercase, state.pop("type" ) ) lowercase__ = component_class(**__lowercase ) setattr(self.backend_tokenizer, __lowercase, __lowercase ) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def A__ ( self : str ): if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def A__ ( self : Optional[int], __lowercase : Dict ): lowercase__ = AddedToken(__lowercase, lstrip=__lowercase, rstrip=__lowercase ) if isinstance(__lowercase, __lowercase ) else value lowercase__ = value def A__ ( self : Any, *__lowercase : List[Any], **__lowercase : Optional[Any] ): lowercase__ = kwargs.get("is_split_into_words", __lowercase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*__lowercase, **__lowercase ) def A__ ( self : int, *__lowercase : Union[str, Any], **__lowercase : List[str] ): lowercase__ = kwargs.get("is_split_into_words", __lowercase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*__lowercase, **__lowercase ) def A__ ( self : Optional[Any], __lowercase : str, __lowercase : Optional[str] = None ): lowercase__ = self._tokenizer.model.save(__lowercase, name=__lowercase ) return tuple(__lowercase ) def A__ ( self : List[str], __lowercase : int, __lowercase : Optional[int]=None ): lowercase__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def A__ ( self : int, __lowercase : List[int], __lowercase : Optional[List[int]] = None ): lowercase__ = [self.sep_token_id] lowercase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def A__ ( self : Union[str, Any], __lowercase : Union[Dict[str, EncodedInput], BatchEncoding], __lowercase : Optional[int] = None, __lowercase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD, __lowercase : Optional[int] = None, __lowercase : Optional[bool] = None, ): lowercase__ = super()._pad( encoded_inputs=__lowercase, max_length=__lowercase, padding_strategy=__lowercase, pad_to_multiple_of=__lowercase, return_attention_mask=__lowercase, ) # Load from model defaults if return_attention_mask is None: lowercase__ = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: lowercase__ = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. lowercase__ = len(encoded_inputs["global_attention_mask"] ) != len(__lowercase ) if needs_to_be_padded: lowercase__ = len(__lowercase ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` lowercase__ = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": lowercase__ = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
37
1
import re from filelock import FileLock try: import nltk lowercase_ = True except (ImportError, ModuleNotFoundError): lowercase_ = False if NLTK_AVAILABLE: with FileLock(""".lock""") as lock: nltk.download("""punkt""", quiet=True) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): re.sub("<n>" , "" , SCREAMING_SNAKE_CASE_ ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(SCREAMING_SNAKE_CASE_ ) )
37
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def __lowerCAmelCase ( ): lowercase__ = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=SCREAMING_SNAKE_CASE_ ) lowercase__ = parser.add_subparsers(help="accelerate command helpers" ) # Register commands get_config_parser(subparsers=SCREAMING_SNAKE_CASE_ ) env_command_parser(subparsers=SCREAMING_SNAKE_CASE_ ) launch_command_parser(subparsers=SCREAMING_SNAKE_CASE_ ) tpu_command_parser(subparsers=SCREAMING_SNAKE_CASE_ ) test_command_parser(subparsers=SCREAMING_SNAKE_CASE_ ) # Let's go lowercase__ = parser.parse_args() if not hasattr(SCREAMING_SNAKE_CASE_ , "func" ): parser.print_help() exit(1 ) # Run args.func(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": main()
37
1
lowercase_ = """Input must be a string of 8 numbers plus letter""" lowercase_ = """TRWAGMYFPDXBNJZSQVHLCKE""" def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ = f'''Expected string as input, found {type(SCREAMING_SNAKE_CASE_ ).__name__}''' raise TypeError(SCREAMING_SNAKE_CASE_ ) lowercase__ = spanish_id.replace("-" , "" ).upper() if len(SCREAMING_SNAKE_CASE_ ) != 9: raise ValueError(SCREAMING_SNAKE_CASE_ ) try: lowercase__ = int(spanish_id_clean[0:8] ) lowercase__ = spanish_id_clean[8] except ValueError as ex: raise ValueError(SCREAMING_SNAKE_CASE_ ) from ex if letter.isdigit(): raise ValueError(SCREAMING_SNAKE_CASE_ ) return letter == LOOKUP_LETTERS[number % 23] if __name__ == "__main__": import doctest doctest.testmod()
37
import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class _snake_case ( unittest.TestCase): def __init__( self : Dict, __lowercase : int, __lowercase : Union[str, Any]=7, __lowercase : Union[str, Any]=3, __lowercase : Any=18, __lowercase : Union[str, Any]=30, __lowercase : Any=400, __lowercase : List[str]=True, __lowercase : Dict=None, __lowercase : List[str]=True, __lowercase : int=False, __lowercase : Union[str, Any]=True, __lowercase : str=True, __lowercase : Optional[int]=[0.5, 0.5, 0.5], __lowercase : List[Any]=[0.5, 0.5, 0.5], ): lowercase__ = parent lowercase__ = batch_size lowercase__ = num_channels lowercase__ = image_size lowercase__ = min_resolution lowercase__ = max_resolution lowercase__ = do_resize lowercase__ = size if size is not None else {"height": 18, "width": 20} lowercase__ = do_thumbnail lowercase__ = do_align_axis lowercase__ = do_pad lowercase__ = do_normalize lowercase__ = image_mean lowercase__ = image_std def A__ ( self : Optional[Any] ): return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class _snake_case ( lowercase__ , unittest.TestCase): UpperCamelCase__ : Optional[int] =DonutImageProcessor if is_vision_available() else None def A__ ( self : str ): lowercase__ = DonutImageProcessingTester(self ) @property def A__ ( self : List[str] ): return self.image_processor_tester.prepare_image_processor_dict() def A__ ( self : Optional[Any] ): lowercase__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowercase, "do_resize" ) ) self.assertTrue(hasattr(__lowercase, "size" ) ) self.assertTrue(hasattr(__lowercase, "do_thumbnail" ) ) self.assertTrue(hasattr(__lowercase, "do_align_long_axis" ) ) self.assertTrue(hasattr(__lowercase, "do_pad" ) ) self.assertTrue(hasattr(__lowercase, "do_normalize" ) ) self.assertTrue(hasattr(__lowercase, "image_mean" ) ) self.assertTrue(hasattr(__lowercase, "image_std" ) ) def A__ ( self : str ): lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size, {"height": 18, "width": 20} ) lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict, size=42 ) self.assertEqual(image_processor.size, {"height": 42, "width": 42} ) # Previous config had dimensions in (width, height) order lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict, size=(42, 84) ) self.assertEqual(image_processor.size, {"height": 84, "width": 42} ) def A__ ( self : List[str] ): pass @is_flaky() def A__ ( self : Dict ): # Initialize image_processing lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase, Image.Image ) # Test not batched input lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) # Test batched lowercase__ = image_processing(__lowercase, return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) @is_flaky() def A__ ( self : Optional[Any] ): # Initialize image_processing lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, numpify=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase, np.ndarray ) # Test not batched input lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) # Test batched lowercase__ = image_processing(__lowercase, return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) @is_flaky() def A__ ( self : Tuple ): # Initialize image_processing lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, torchify=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase, torch.Tensor ) # Test not batched input lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) # Test batched lowercase__ = image_processing(__lowercase, return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), )
37
1
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from timm import create_model from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import BitConfig, BitForImageClassification, BitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() lowercase_ = logging.get_logger(__name__) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): lowercase__ = "huggingface/label-files" lowercase__ = "imagenet-1k-id2label.json" lowercase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) ) lowercase__ = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()} lowercase__ = {v: k for k, v in idalabel.items()} lowercase__ = "std_conv" if "bit" in model_name else False # note that when using BiT as backbone for ViT-hybrid checkpoints, # one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same", # config.conv_layer = "std_conv_same" lowercase__ = BitConfig( conv_layer=SCREAMING_SNAKE_CASE_ , num_labels=1000 , idalabel=SCREAMING_SNAKE_CASE_ , labelaid=SCREAMING_SNAKE_CASE_ , ) return config def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): if "stem.conv" in name: lowercase__ = name.replace("stem.conv" , "bit.embedder.convolution" ) if "blocks" in name: lowercase__ = name.replace("blocks" , "layers" ) if "head.fc" in name: lowercase__ = name.replace("head.fc" , "classifier.1" ) if name.startswith("norm" ): lowercase__ = "bit." + name if "bit" not in name and "classifier" not in name: lowercase__ = "bit.encoder." + name return name def __lowerCAmelCase ( ): lowercase__ = "http://images.cocodataset.org/val2017/000000039769.jpg" lowercase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ) return im @torch.no_grad() def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ): lowercase__ = get_config(SCREAMING_SNAKE_CASE_ ) # load original model from timm lowercase__ = create_model(SCREAMING_SNAKE_CASE_ , pretrained=SCREAMING_SNAKE_CASE_ ) timm_model.eval() # load state_dict of original model lowercase__ = timm_model.state_dict() for key in state_dict.copy().keys(): lowercase__ = state_dict.pop(SCREAMING_SNAKE_CASE_ ) lowercase__ = val.squeeze() if "head" in key else val # load HuggingFace model lowercase__ = BitForImageClassification(SCREAMING_SNAKE_CASE_ ) model.eval() model.load_state_dict(SCREAMING_SNAKE_CASE_ ) # create image processor lowercase__ = create_transform(**resolve_data_config({} , model=SCREAMING_SNAKE_CASE_ ) ) lowercase__ = transform.transforms lowercase__ = { "bilinear": PILImageResampling.BILINEAR, "bicubic": PILImageResampling.BICUBIC, "nearest": PILImageResampling.NEAREST, } lowercase__ = BitImageProcessor( do_resize=SCREAMING_SNAKE_CASE_ , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=SCREAMING_SNAKE_CASE_ , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=SCREAMING_SNAKE_CASE_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) lowercase__ = prepare_img() lowercase__ = transform(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 ) lowercase__ = processor(SCREAMING_SNAKE_CASE_ , return_tensors="pt" ).pixel_values # verify pixel values assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # verify logits with torch.no_grad(): lowercase__ = model(SCREAMING_SNAKE_CASE_ ) lowercase__ = outputs.logits print("Logits:" , logits[0, :3] ) print("Predicted class:" , model.config.idalabel[logits.argmax(-1 ).item()] ) lowercase__ = timm_model(SCREAMING_SNAKE_CASE_ ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(SCREAMING_SNAKE_CASE_ , outputs.logits , atol=1e-3 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ ) print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) processor.save_pretrained(SCREAMING_SNAKE_CASE_ ) if push_to_hub: print(f'''Pushing model {model_name} and processor to the hub''' ) model.push_to_hub(f'''ybelkada/{model_name}''' ) processor.push_to_hub(f'''ybelkada/{model_name}''' ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""resnetv2_50x1_bitm""", type=str, help="""Name of the BiT timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to push the model to the hub.""", ) lowercase_ = parser.parse_args() convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
37
import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class _snake_case ( lowercase__): def A__ ( self : Optional[Any], __lowercase : str ): with open(__lowercase, encoding="utf-8" ) as input_file: lowercase__ = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" ) lowercase__ = input_file.read() lowercase__ = regexp.search(__lowercase ) return match def A__ ( self : str, __lowercase : str ): with open(__lowercase, encoding="utf-8" ) as input_file: lowercase__ = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()", re.DOTALL ) lowercase__ = input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` lowercase__ = regexp.finditer(__lowercase ) lowercase__ = [match for match in matches if match is not None and match.group(1 ) is not None] return matches[0] if matches else None def A__ ( self : Union[str, Any] ): lowercase__ = Path("./datasets" ) lowercase__ = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_encoding_on_file_open(str(__lowercase ) ): raise AssertionError(F'''open(...) must use utf-8 encoding in {dataset}''' ) def A__ ( self : Union[str, Any] ): lowercase__ = Path("./datasets" ) lowercase__ = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_print_statements(str(__lowercase ) ): raise AssertionError(F'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
37
1
import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, AutoConfig, AutoFeatureExtractor, WavaVecaConfig, WavaVecaFeatureExtractor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 lowercase_ = get_tests_dir("""fixtures""") lowercase_ = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""") lowercase_ = get_tests_dir("""fixtures/dummy-config.json""") class _snake_case ( unittest.TestCase): def A__ ( self : int ): lowercase__ = 0 def A__ ( self : Any ): lowercase__ = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h" ) self.assertIsInstance(__lowercase, __lowercase ) def A__ ( self : List[str] ): lowercase__ = AutoFeatureExtractor.from_pretrained(__lowercase ) self.assertIsInstance(__lowercase, __lowercase ) def A__ ( self : List[str] ): with tempfile.TemporaryDirectory() as tmpdirname: lowercase__ = WavaVecaConfig() # remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally lowercase__ = AutoFeatureExtractor.from_pretrained(__lowercase ).to_dict() config_dict.pop("feature_extractor_type" ) lowercase__ = WavaVecaFeatureExtractor(**__lowercase ) # save in new folder model_config.save_pretrained(__lowercase ) config.save_pretrained(__lowercase ) lowercase__ = AutoFeatureExtractor.from_pretrained(__lowercase ) # make sure private variable is not incorrectly saved lowercase__ = json.loads(config.to_json_string() ) self.assertTrue("_processor_class" not in dict_as_saved ) self.assertIsInstance(__lowercase, __lowercase ) def A__ ( self : Tuple ): lowercase__ = AutoFeatureExtractor.from_pretrained(__lowercase ) self.assertIsInstance(__lowercase, __lowercase ) def A__ ( self : Optional[int] ): with self.assertRaisesRegex( __lowercase, "bert-base is not a local folder and is not a valid model identifier" ): lowercase__ = AutoFeatureExtractor.from_pretrained("bert-base" ) def A__ ( self : int ): with self.assertRaisesRegex( __lowercase, R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ): lowercase__ = AutoFeatureExtractor.from_pretrained(__lowercase, revision="aaaaaa" ) def A__ ( self : Tuple ): with self.assertRaisesRegex( __lowercase, "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.", ): lowercase__ = AutoFeatureExtractor.from_pretrained("hf-internal-testing/config-no-model" ) def A__ ( self : Optional[Any] ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(__lowercase ): lowercase__ = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" ) # If remote code is disabled, we can't load this config. with self.assertRaises(__lowercase ): lowercase__ = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor", trust_remote_code=__lowercase ) lowercase__ = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor", trust_remote_code=__lowercase ) self.assertEqual(feature_extractor.__class__.__name__, "NewFeatureExtractor" ) # Test feature extractor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(__lowercase ) lowercase__ = AutoFeatureExtractor.from_pretrained(__lowercase, trust_remote_code=__lowercase ) self.assertEqual(reloaded_feature_extractor.__class__.__name__, "NewFeatureExtractor" ) def A__ ( self : List[Any] ): try: AutoConfig.register("custom", __lowercase ) AutoFeatureExtractor.register(__lowercase, __lowercase ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__lowercase ): AutoFeatureExtractor.register(__lowercase, __lowercase ) # Now that the config is registered, it can be used as any other config with the auto-API lowercase__ = CustomFeatureExtractor.from_pretrained(__lowercase ) with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(__lowercase ) lowercase__ = AutoFeatureExtractor.from_pretrained(__lowercase ) self.assertIsInstance(__lowercase, __lowercase ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] def A__ ( self : Union[str, Any] ): class _snake_case ( lowercase__): UpperCamelCase__ : List[str] =True try: AutoConfig.register("custom", __lowercase ) AutoFeatureExtractor.register(__lowercase, __lowercase ) # If remote code is not set, the default is to use local lowercase__ = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" ) self.assertEqual(feature_extractor.__class__.__name__, "NewFeatureExtractor" ) self.assertTrue(feature_extractor.is_local ) # If remote code is disabled, we load the local one. lowercase__ = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor", trust_remote_code=__lowercase ) self.assertEqual(feature_extractor.__class__.__name__, "NewFeatureExtractor" ) self.assertTrue(feature_extractor.is_local ) # If remote is enabled, we load from the Hub lowercase__ = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor", trust_remote_code=__lowercase ) self.assertEqual(feature_extractor.__class__.__name__, "NewFeatureExtractor" ) self.assertTrue(not hasattr(__lowercase, "is_local" ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
37
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase_ = { """configuration_xmod""": [ """XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XmodConfig""", """XmodOnnxConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """XMOD_PRETRAINED_MODEL_ARCHIVE_LIST""", """XmodForCausalLM""", """XmodForMaskedLM""", """XmodForMultipleChoice""", """XmodForQuestionAnswering""", """XmodForSequenceClassification""", """XmodForTokenClassification""", """XmodModel""", """XmodPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
37
1
import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class _snake_case : def __init__( self : Any, __lowercase : Union[str, Any], __lowercase : List[Any]=13, __lowercase : Tuple=7, __lowercase : List[Any]=True, __lowercase : Any=True, __lowercase : str=True, __lowercase : str=True, __lowercase : Union[str, Any]=True, __lowercase : List[Any]=False, __lowercase : List[Any]=False, __lowercase : List[str]=False, __lowercase : Optional[Any]=2, __lowercase : Any=99, __lowercase : Optional[Any]=0, __lowercase : Any=32, __lowercase : List[Any]=5, __lowercase : List[str]=4, __lowercase : Tuple=0.1, __lowercase : int=0.1, __lowercase : str=512, __lowercase : List[Any]=2, __lowercase : List[Any]=0.02, __lowercase : List[Any]=2, __lowercase : Any=4, __lowercase : str="last", __lowercase : Optional[int]=True, __lowercase : Any=None, __lowercase : Tuple=0, ): lowercase__ = parent lowercase__ = batch_size lowercase__ = seq_length lowercase__ = is_training lowercase__ = use_input_lengths lowercase__ = use_token_type_ids lowercase__ = use_labels lowercase__ = gelu_activation lowercase__ = sinusoidal_embeddings lowercase__ = causal lowercase__ = asm lowercase__ = n_langs lowercase__ = vocab_size lowercase__ = n_special lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = max_position_embeddings lowercase__ = type_sequence_label_size lowercase__ = initializer_range lowercase__ = num_labels lowercase__ = num_choices lowercase__ = summary_type lowercase__ = use_proj lowercase__ = scope lowercase__ = bos_token_id def A__ ( self : int ): lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) lowercase__ = random_attention_mask([self.batch_size, self.seq_length] ) lowercase__ = None if self.use_input_lengths: lowercase__ = ( ids_tensor([self.batch_size], vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length lowercase__ = None if self.use_token_type_ids: lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.n_langs ) lowercase__ = None lowercase__ = None lowercase__ = None if self.use_labels: lowercase__ = ids_tensor([self.batch_size], self.type_sequence_label_size ) lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.num_labels ) lowercase__ = ids_tensor([self.batch_size], 2 ).float() lowercase__ = ids_tensor([self.batch_size], self.num_choices ) lowercase__ = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def A__ ( self : int ): return XLMConfig( vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, num_labels=self.num_labels, bos_token_id=self.bos_token_id, ) def A__ ( self : List[str], __lowercase : Optional[int], __lowercase : int, __lowercase : str, __lowercase : Union[str, Any], __lowercase : List[str], __lowercase : str, __lowercase : Tuple, __lowercase : Dict, __lowercase : int, ): lowercase__ = XLMModel(config=__lowercase ) model.to(__lowercase ) model.eval() lowercase__ = model(__lowercase, lengths=__lowercase, langs=__lowercase ) lowercase__ = model(__lowercase, langs=__lowercase ) lowercase__ = model(__lowercase ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def A__ ( self : Tuple, __lowercase : Dict, __lowercase : Optional[int], __lowercase : Tuple, __lowercase : List[Any], __lowercase : List[Any], __lowercase : Any, __lowercase : Dict, __lowercase : Optional[Any], __lowercase : Dict, ): lowercase__ = XLMWithLMHeadModel(__lowercase ) model.to(__lowercase ) model.eval() lowercase__ = model(__lowercase, token_type_ids=__lowercase, labels=__lowercase ) self.parent.assertEqual(result.loss.shape, () ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) ) def A__ ( self : Optional[Any], __lowercase : Tuple, __lowercase : List[str], __lowercase : str, __lowercase : List[str], __lowercase : Tuple, __lowercase : List[str], __lowercase : Any, __lowercase : Union[str, Any], __lowercase : int, ): lowercase__ = XLMForQuestionAnsweringSimple(__lowercase ) model.to(__lowercase ) model.eval() lowercase__ = model(__lowercase ) lowercase__ = model(__lowercase, start_positions=__lowercase, end_positions=__lowercase ) lowercase__ = outputs self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) ) def A__ ( self : Optional[int], __lowercase : str, __lowercase : Any, __lowercase : Union[str, Any], __lowercase : Tuple, __lowercase : str, __lowercase : Any, __lowercase : Dict, __lowercase : Union[str, Any], __lowercase : Any, ): lowercase__ = XLMForQuestionAnswering(__lowercase ) model.to(__lowercase ) model.eval() lowercase__ = model(__lowercase ) lowercase__ = model( __lowercase, start_positions=__lowercase, end_positions=__lowercase, cls_index=__lowercase, is_impossible=__lowercase, p_mask=__lowercase, ) lowercase__ = model( __lowercase, start_positions=__lowercase, end_positions=__lowercase, cls_index=__lowercase, is_impossible=__lowercase, ) ((lowercase__) , ) = result_with_labels.to_tuple() lowercase__ = model(__lowercase, start_positions=__lowercase, end_positions=__lowercase ) ((lowercase__) , ) = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape, () ) self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,) ) def A__ ( self : Optional[int], __lowercase : int, __lowercase : Optional[Any], __lowercase : Tuple, __lowercase : List[Any], __lowercase : Tuple, __lowercase : Dict, __lowercase : Dict, __lowercase : Optional[int], __lowercase : Any, ): lowercase__ = XLMForSequenceClassification(__lowercase ) model.to(__lowercase ) model.eval() lowercase__ = model(__lowercase ) lowercase__ = model(__lowercase, labels=__lowercase ) self.parent.assertEqual(result.loss.shape, () ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) ) def A__ ( self : Optional[int], __lowercase : Tuple, __lowercase : Dict, __lowercase : Tuple, __lowercase : Optional[Any], __lowercase : Union[str, Any], __lowercase : int, __lowercase : Optional[Any], __lowercase : int, __lowercase : Optional[int], ): lowercase__ = self.num_labels lowercase__ = XLMForTokenClassification(__lowercase ) model.to(__lowercase ) model.eval() lowercase__ = model(__lowercase, attention_mask=__lowercase, labels=__lowercase ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) ) def A__ ( self : List[Any], __lowercase : Optional[Any], __lowercase : List[Any], __lowercase : int, __lowercase : List[Any], __lowercase : List[str], __lowercase : Dict, __lowercase : Union[str, Any], __lowercase : int, __lowercase : Dict, ): lowercase__ = self.num_choices lowercase__ = XLMForMultipleChoice(config=__lowercase ) model.to(__lowercase ) model.eval() lowercase__ = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() lowercase__ = token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() lowercase__ = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() lowercase__ = model( __lowercase, attention_mask=__lowercase, token_type_ids=__lowercase, labels=__lowercase, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) ) def A__ ( self : Optional[int] ): lowercase__ = self.prepare_config_and_inputs() ( ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ) = config_and_inputs lowercase__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths} return config, inputs_dict @require_torch class _snake_case ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase): UpperCamelCase__ : Optional[int] =( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) UpperCamelCase__ : Union[str, Any] =( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable UpperCamelCase__ : Union[str, Any] =( { """feature-extraction""": XLMModel, """fill-mask""": XLMWithLMHeadModel, """question-answering""": XLMForQuestionAnsweringSimple, """text-classification""": XLMForSequenceClassification, """text-generation""": XLMWithLMHeadModel, """token-classification""": XLMForTokenClassification, """zero-shot""": XLMForSequenceClassification, } if is_torch_available() else {} ) def A__ ( self : Tuple, __lowercase : str, __lowercase : Optional[Any], __lowercase : Any, __lowercase : Optional[Any], __lowercase : Tuple ): if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def A__ ( self : str, __lowercase : str, __lowercase : Tuple, __lowercase : Optional[int]=False ): lowercase__ = super()._prepare_for_class(__lowercase, __lowercase, return_labels=__lowercase ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": lowercase__ = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=__lowercase ) lowercase__ = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=__lowercase ) return inputs_dict def A__ ( self : Any ): lowercase__ = XLMModelTester(self ) lowercase__ = ConfigTester(self, config_class=__lowercase, emb_dim=37 ) def A__ ( self : Tuple ): self.config_tester.run_common_tests() def A__ ( self : Union[str, Any] ): lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*__lowercase ) def A__ ( self : List[str] ): lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*__lowercase ) def A__ ( self : Tuple ): lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*__lowercase ) def A__ ( self : Any ): lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*__lowercase ) def A__ ( self : Union[str, Any] ): lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*__lowercase ) def A__ ( self : Optional[int] ): lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*__lowercase ) def A__ ( self : str ): lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*__lowercase ) def A__ ( self : Optional[int], __lowercase : Optional[int], __lowercase : List[str], __lowercase : int, __lowercase : str, __lowercase : int, __lowercase : Any=False, __lowercase : Dict=1 ): self.assertIsInstance(__lowercase, __lowercase ) self.assertListEqual( [isinstance(__lowercase, __lowercase ) for iter_attentions in attentions], [True] * len(__lowercase ) ) self.assertEqual(len(__lowercase ), (max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(__lowercase ): # adds PAD dummy token lowercase__ = min_length + idx + 1 lowercase__ = min_length + idx + 1 lowercase__ = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions], [expected_shape] * len(__lowercase ) ) def A__ ( self : Optional[Any], __lowercase : str, __lowercase : List[str], __lowercase : Optional[int], __lowercase : List[str], __lowercase : Tuple, __lowercase : Optional[int]=False, __lowercase : int=1 ): self.assertIsInstance(__lowercase, __lowercase ) self.assertListEqual( [isinstance(__lowercase, __lowercase ) for iter_hidden_states in hidden_states], [True] * len(__lowercase ), ) self.assertEqual(len(__lowercase ), (max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(__lowercase ): # adds PAD dummy token lowercase__ = min_length + idx + 1 lowercase__ = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states], [expected_shape] * len(__lowercase ), ) pass @slow def A__ ( self : Dict ): for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ = XLMModel.from_pretrained(__lowercase ) self.assertIsNotNone(__lowercase ) @require_torch class _snake_case ( unittest.TestCase): @slow def A__ ( self : List[str] ): lowercase__ = XLMWithLMHeadModel.from_pretrained("xlm-mlm-en-2048" ) model.to(__lowercase ) lowercase__ = torch.tensor([[14, 447]], dtype=torch.long, device=__lowercase ) # the president lowercase__ = [ 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference lowercase__ = model.generate(__lowercase, do_sample=__lowercase ) self.assertListEqual(output_ids[0].cpu().numpy().tolist(), __lowercase )
37
import unittest import numpy as np import requests from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: lowercase_ = False if is_vision_available(): from PIL import Image from transformers import PixaStructImageProcessor class _snake_case ( unittest.TestCase): def __init__( self : List[Any], __lowercase : int, __lowercase : Optional[int]=7, __lowercase : List[str]=3, __lowercase : Tuple=18, __lowercase : List[Any]=30, __lowercase : Tuple=400, __lowercase : Any=None, __lowercase : Optional[int]=True, __lowercase : List[str]=True, __lowercase : Union[str, Any]=None, ): lowercase__ = size if size is not None else {"height": 20, "width": 20} lowercase__ = parent lowercase__ = batch_size lowercase__ = num_channels lowercase__ = image_size lowercase__ = min_resolution lowercase__ = max_resolution lowercase__ = size lowercase__ = do_normalize lowercase__ = do_convert_rgb lowercase__ = [512, 1024, 2048, 4096] lowercase__ = patch_size if patch_size is not None else {"height": 16, "width": 16} def A__ ( self : List[str] ): return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb} def A__ ( self : Any ): lowercase__ = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg" lowercase__ = Image.open(requests.get(__lowercase, stream=__lowercase ).raw ).convert("RGB" ) return raw_image @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , ) @require_torch @require_vision class _snake_case ( lowercase__ , unittest.TestCase): UpperCamelCase__ : Any =PixaStructImageProcessor if is_vision_available() else None def A__ ( self : Any ): lowercase__ = PixaStructImageProcessingTester(self ) @property def A__ ( self : Union[str, Any] ): return self.image_processor_tester.prepare_image_processor_dict() def A__ ( self : Optional[Any] ): lowercase__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowercase, "do_normalize" ) ) self.assertTrue(hasattr(__lowercase, "do_convert_rgb" ) ) def A__ ( self : Optional[int] ): lowercase__ = self.image_processor_tester.prepare_dummy_image() lowercase__ = self.image_processing_class(**self.image_processor_dict ) lowercase__ = 2048 lowercase__ = image_processor(__lowercase, return_tensors="pt", max_patches=__lowercase ) self.assertTrue(torch.allclose(inputs.flattened_patches.mean(), torch.tensor(0.0606 ), atol=1e-3, rtol=1e-3 ) ) def A__ ( self : Union[str, Any] ): # Initialize image_processor lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase, Image.Image ) # Test not batched input lowercase__ = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input lowercase__ = image_processor( image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches self.assertEqual( encoded_images.shape, (1, max_patch, expected_hidden_dim), ) # Test batched lowercase__ = image_processor( __lowercase, return_tensors="pt", max_patches=__lowercase ).flattened_patches self.assertEqual( encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), ) def A__ ( self : int ): # Initialize image_processor lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase, Image.Image ) # Test not batched input lowercase__ = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 lowercase__ = True for max_patch in self.image_processor_tester.max_patches: # Test not batched input with self.assertRaises(__lowercase ): lowercase__ = image_processor( image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches lowercase__ = "Hello" lowercase__ = image_processor( image_inputs[0], return_tensors="pt", max_patches=__lowercase, header_text=__lowercase ).flattened_patches self.assertEqual( encoded_images.shape, (1, max_patch, expected_hidden_dim), ) # Test batched lowercase__ = image_processor( __lowercase, return_tensors="pt", max_patches=__lowercase, header_text=__lowercase ).flattened_patches self.assertEqual( encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), ) def A__ ( self : Tuple ): # Initialize image_processor lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, numpify=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase, np.ndarray ) lowercase__ = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input lowercase__ = image_processor( image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches self.assertEqual( encoded_images.shape, (1, max_patch, expected_hidden_dim), ) # Test batched lowercase__ = image_processor( __lowercase, return_tensors="pt", max_patches=__lowercase ).flattened_patches self.assertEqual( encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), ) def A__ ( self : Any ): # Initialize image_processor lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, torchify=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase, torch.Tensor ) # Test not batched input lowercase__ = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input lowercase__ = image_processor( image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches self.assertEqual( encoded_images.shape, (1, max_patch, expected_hidden_dim), ) # Test batched lowercase__ = image_processor( __lowercase, return_tensors="pt", max_patches=__lowercase ).flattened_patches self.assertEqual( encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), ) @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , ) @require_torch @require_vision class _snake_case ( lowercase__ , unittest.TestCase): UpperCamelCase__ : Optional[int] =PixaStructImageProcessor if is_vision_available() else None def A__ ( self : Optional[int] ): lowercase__ = PixaStructImageProcessingTester(self, num_channels=4 ) lowercase__ = 3 @property def A__ ( self : Union[str, Any] ): return self.image_processor_tester.prepare_image_processor_dict() def A__ ( self : Dict ): lowercase__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowercase, "do_normalize" ) ) self.assertTrue(hasattr(__lowercase, "do_convert_rgb" ) ) def A__ ( self : Union[str, Any] ): # Initialize image_processor lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase, Image.Image ) # Test not batched input lowercase__ = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * (self.image_processor_tester.num_channels - 1) ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input lowercase__ = image_processor( image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches self.assertEqual( encoded_images.shape, (1, max_patch, expected_hidden_dim), ) # Test batched lowercase__ = image_processor( __lowercase, return_tensors="pt", max_patches=__lowercase ).flattened_patches self.assertEqual( encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
37
1
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): while b: lowercase__ , lowercase__ = b, a % b return a def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): return a if b == 0 else euclidean_gcd_recursive(SCREAMING_SNAKE_CASE_ , a % b ) def __lowerCAmelCase ( ): print(f'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' ) print(f'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' ) print(f'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' ) print(f'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' ) print(f'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' ) print(f'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' ) print(f'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' ) print(f'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' ) print(f'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' ) print(f'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' ) if __name__ == "__main__": main()
37
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ , lowercase__ = len(SCREAMING_SNAKE_CASE_ ), len(grid[0] ) if ( min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) < 0 or row == row_length or col == col_length or (row, col) in visit or grid[row][col] == 1 ): return 0 if row == row_length - 1 and col == col_length - 1: return 1 visit.add((row, col) ) lowercase__ = 0 count += depth_first_search(SCREAMING_SNAKE_CASE_ , row + 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) count += depth_first_search(SCREAMING_SNAKE_CASE_ , row - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) count += depth_first_search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , col + 1 , SCREAMING_SNAKE_CASE_ ) count += depth_first_search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , col - 1 , SCREAMING_SNAKE_CASE_ ) visit.remove((row, col) ) return count if __name__ == "__main__": import doctest doctest.testmod()
37
1
import unittest import numpy as np from transformers import RobertaPreLayerNormConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, ) class _snake_case ( unittest.TestCase): def __init__( self : Tuple, __lowercase : Any, __lowercase : List[Any]=13, __lowercase : Union[str, Any]=7, __lowercase : Tuple=True, __lowercase : int=True, __lowercase : List[str]=True, __lowercase : Any=True, __lowercase : Optional[Any]=99, __lowercase : List[Any]=32, __lowercase : Tuple=5, __lowercase : Optional[int]=4, __lowercase : Dict=37, __lowercase : str="gelu", __lowercase : List[str]=0.1, __lowercase : List[str]=0.1, __lowercase : Tuple=512, __lowercase : Tuple=16, __lowercase : List[str]=2, __lowercase : int=0.02, __lowercase : List[Any]=4, ): lowercase__ = parent lowercase__ = batch_size lowercase__ = seq_length lowercase__ = is_training lowercase__ = use_attention_mask lowercase__ = use_token_type_ids lowercase__ = use_labels lowercase__ = vocab_size lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = intermediate_size lowercase__ = hidden_act lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = max_position_embeddings lowercase__ = type_vocab_size lowercase__ = type_sequence_label_size lowercase__ = initializer_range lowercase__ = num_choices def A__ ( self : int ): lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) lowercase__ = None if self.use_attention_mask: lowercase__ = random_attention_mask([self.batch_size, self.seq_length] ) lowercase__ = None if self.use_token_type_ids: lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size ) lowercase__ = RobertaPreLayerNormConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=__lowercase, initializer_range=self.initializer_range, ) return config, input_ids, token_type_ids, attention_mask def A__ ( self : str ): lowercase__ = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ , lowercase__ = config_and_inputs lowercase__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask} return config, inputs_dict def A__ ( self : Optional[Any] ): lowercase__ = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ , lowercase__ = config_and_inputs lowercase__ = True lowercase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) lowercase__ = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax # Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40 class _snake_case ( lowercase__ , unittest.TestCase): UpperCamelCase__ : int =True UpperCamelCase__ : Union[str, Any] =( ( FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, ) if is_flax_available() else () ) def A__ ( self : Tuple ): lowercase__ = FlaxRobertaPreLayerNormModelTester(self ) @slow def A__ ( self : Tuple ): for model_class_name in self.all_model_classes: lowercase__ = model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40", from_pt=__lowercase ) lowercase__ = model(np.ones((1, 1) ) ) self.assertIsNotNone(__lowercase ) @require_flax class _snake_case ( unittest.TestCase): @slow def A__ ( self : str ): lowercase__ = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40", from_pt=__lowercase ) lowercase__ = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]], dtype=jnp.intaa ) lowercase__ = model(__lowercase )[0] lowercase__ = [1, 11, 5_0265] self.assertEqual(list(output.shape ), __lowercase ) # compare the actual values for a slice. lowercase__ = np.array( [[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]], dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3], __lowercase, atol=1e-4 ) ) @slow def A__ ( self : Union[str, Any] ): lowercase__ = FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40", from_pt=__lowercase ) lowercase__ = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]], dtype=jnp.intaa ) lowercase__ = model(__lowercase )[0] # compare the actual values for a slice. lowercase__ = np.array( [[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]], dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3], __lowercase, atol=1e-4 ) )
37
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): lowercase__ = 0 for ch in input_str: lowercase__ = ord(SCREAMING_SNAKE_CASE_ ) lowercase__ = pow(2 , SCREAMING_SNAKE_CASE_ ) # If we already turned on bit for current character's unicode if bitmap >> ch_unicode & 1 == 1: return False bitmap |= ch_bit_index_on return True if __name__ == "__main__": import doctest doctest.testmod()
37
1
import re from filelock import FileLock try: import nltk lowercase_ = True except (ImportError, ModuleNotFoundError): lowercase_ = False if NLTK_AVAILABLE: with FileLock(""".lock""") as lock: nltk.download("""punkt""", quiet=True) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): re.sub("<n>" , "" , SCREAMING_SNAKE_CASE_ ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(SCREAMING_SNAKE_CASE_ ) )
37
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): lowercase__ = len(SCREAMING_SNAKE_CASE_ ) for i in range(SCREAMING_SNAKE_CASE_ ): for j in range(i + 1 , SCREAMING_SNAKE_CASE_ ): if numbers[j] < numbers[i]: lowercase__ , lowercase__ = numbers[j], numbers[i] return numbers if __name__ == "__main__": lowercase_ = input("""Enter numbers separated by a comma:\n""").strip() lowercase_ = [int(item) for item in user_input.split(""",""")] print(exchange_sort(unsorted))
37
1
import os import shutil from pathlib import Path from typing import Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging if is_onnx_available(): import onnxruntime as ort lowercase_ = logging.get_logger(__name__) lowercase_ = { """tensor(bool)""": np.bool_, """tensor(int8)""": np.inta, """tensor(uint8)""": np.uinta, """tensor(int16)""": np.intaa, """tensor(uint16)""": np.uintaa, """tensor(int32)""": np.intaa, """tensor(uint32)""": np.uintaa, """tensor(int64)""": np.intaa, """tensor(uint64)""": np.uintaa, """tensor(float16)""": np.floataa, """tensor(float)""": np.floataa, """tensor(double)""": np.floataa, } class _snake_case : def __init__( self : str, __lowercase : str=None, **__lowercase : Optional[int] ): logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future." ) lowercase__ = model lowercase__ = kwargs.get("model_save_dir", __lowercase ) lowercase__ = kwargs.get("latest_model_name", __lowercase ) def __call__( self : Dict, **__lowercase : str ): lowercase__ = {k: np.array(__lowercase ) for k, v in kwargs.items()} return self.model.run(__lowercase, __lowercase ) @staticmethod def A__ ( __lowercase : Union[str, Path], __lowercase : Tuple=None, __lowercase : str=None ): if provider is None: logger.info("No onnxruntime provider specified, using CPUExecutionProvider" ) lowercase__ = "CPUExecutionProvider" return ort.InferenceSession(__lowercase, providers=[provider], sess_options=__lowercase ) def A__ ( self : Union[str, Any], __lowercase : Union[str, Path], __lowercase : Optional[str] = None, **__lowercase : int ): lowercase__ = file_name if file_name is not None else ONNX_WEIGHTS_NAME lowercase__ = self.model_save_dir.joinpath(self.latest_model_name ) lowercase__ = Path(__lowercase ).joinpath(__lowercase ) try: shutil.copyfile(__lowercase, __lowercase ) except shutil.SameFileError: pass # copy external weights (for models >2GB) lowercase__ = self.model_save_dir.joinpath(__lowercase ) if src_path.exists(): lowercase__ = Path(__lowercase ).joinpath(__lowercase ) try: shutil.copyfile(__lowercase, __lowercase ) except shutil.SameFileError: pass def A__ ( self : List[Any], __lowercase : Union[str, os.PathLike], **__lowercase : Dict, ): if os.path.isfile(__lowercase ): logger.error(F'''Provided path ({save_directory}) should be a directory, not a file''' ) return os.makedirs(__lowercase, exist_ok=__lowercase ) # saving model weights/files self._save_pretrained(__lowercase, **__lowercase ) @classmethod def A__ ( cls : Tuple, __lowercase : Union[str, Path], __lowercase : Optional[Union[bool, str, None]] = None, __lowercase : Optional[Union[str, None]] = None, __lowercase : bool = False, __lowercase : Optional[str] = None, __lowercase : Optional[str] = None, __lowercase : Optional[str] = None, __lowercase : Optional["ort.SessionOptions"] = None, **__lowercase : List[str], ): lowercase__ = file_name if file_name is not None else ONNX_WEIGHTS_NAME # load model from local directory if os.path.isdir(__lowercase ): lowercase__ = OnnxRuntimeModel.load_model( os.path.join(__lowercase, __lowercase ), provider=__lowercase, sess_options=__lowercase ) lowercase__ = Path(__lowercase ) # load model from hub else: # download model lowercase__ = hf_hub_download( repo_id=__lowercase, filename=__lowercase, use_auth_token=__lowercase, revision=__lowercase, cache_dir=__lowercase, force_download=__lowercase, ) lowercase__ = Path(__lowercase ).parent lowercase__ = Path(__lowercase ).name lowercase__ = OnnxRuntimeModel.load_model(__lowercase, provider=__lowercase, sess_options=__lowercase ) return cls(model=__lowercase, **__lowercase ) @classmethod def A__ ( cls : Dict, __lowercase : Union[str, Path], __lowercase : bool = True, __lowercase : Optional[str] = None, __lowercase : Optional[str] = None, **__lowercase : Union[str, Any], ): lowercase__ = None if len(str(__lowercase ).split("@" ) ) == 2: lowercase__ , lowercase__ = model_id.split("@" ) return cls._from_pretrained( model_id=__lowercase, revision=__lowercase, cache_dir=__lowercase, force_download=__lowercase, use_auth_token=__lowercase, **__lowercase, )
37
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): if upper_limit < 0: raise ValueError("Limit for the Catalan sequence must be ≥ 0" ) lowercase__ = [0] * (upper_limit + 1) # Base case: C(0) = C(1) = 1 lowercase__ = 1 if upper_limit > 0: lowercase__ = 1 # Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i for i in range(2 , upper_limit + 1 ): for j in range(SCREAMING_SNAKE_CASE_ ): catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1] return catalan_list if __name__ == "__main__": print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""") print("""\n*** Enter -1 at any time to quit ***""") print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""") try: while True: lowercase_ = int(input().strip()) if N < 0: print("""\n********* Goodbye!! ************""") break else: print(F'The Catalan numbers from 0 through {N} are:') print(catalan_numbers(N)) print("""Try another upper limit for the sequence: """, end="""""") except (NameError, ValueError): print("""\n********* Invalid input, goodbye! ************\n""") import doctest doctest.testmod()
37
1
from __future__ import annotations import queue class _snake_case : def __init__( self : Optional[Any], __lowercase : Union[str, Any] ): lowercase__ = data lowercase__ = None lowercase__ = None def __lowerCAmelCase ( ): print("\n********Press N to stop entering at any point of time********\n" ) lowercase__ = input("Enter the value of the root node: " ).strip().lower() lowercase__ = queue.Queue() lowercase__ = TreeNode(int(SCREAMING_SNAKE_CASE_ ) ) q.put(SCREAMING_SNAKE_CASE_ ) while not q.empty(): lowercase__ = q.get() lowercase__ = f'''Enter the left node of {node_found.data}: ''' lowercase__ = input(SCREAMING_SNAKE_CASE_ ).strip().lower() or "n" if check == "n": return tree_node lowercase__ = TreeNode(int(SCREAMING_SNAKE_CASE_ ) ) lowercase__ = left_node q.put(SCREAMING_SNAKE_CASE_ ) lowercase__ = f'''Enter the right node of {node_found.data}: ''' lowercase__ = input(SCREAMING_SNAKE_CASE_ ).strip().lower() or "n" if check == "n": return tree_node lowercase__ = TreeNode(int(SCREAMING_SNAKE_CASE_ ) ) lowercase__ = right_node q.put(SCREAMING_SNAKE_CASE_ ) raise def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not node: return print(node.data , end="," ) pre_order(node.left ) pre_order(node.right ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not node: return in_order(node.left ) print(node.data , end="," ) in_order(node.right ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not node: return post_order(node.left ) post_order(node.right ) print(node.data , end="," ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not node: return lowercase__ = queue.Queue() q.put(SCREAMING_SNAKE_CASE_ ) while not q.empty(): lowercase__ = q.get() print(node_dequeued.data , end="," ) if node_dequeued.left: q.put(node_dequeued.left ) if node_dequeued.right: q.put(node_dequeued.right ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not node: return lowercase__ = queue.Queue() q.put(SCREAMING_SNAKE_CASE_ ) while not q.empty(): lowercase__ = [] while not q.empty(): lowercase__ = q.get() print(node_dequeued.data , end="," ) if node_dequeued.left: list_.append(node_dequeued.left ) if node_dequeued.right: list_.append(node_dequeued.right ) print() for node in list_: q.put(SCREAMING_SNAKE_CASE_ ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not node: return lowercase__ = [] lowercase__ = node while n or stack: while n: # start from root node, find its left child print(n.data , end="," ) stack.append(SCREAMING_SNAKE_CASE_ ) lowercase__ = n.left # end of while means current node doesn't have left child lowercase__ = stack.pop() # start to traverse its right child lowercase__ = n.right def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not node: return lowercase__ = [] lowercase__ = node while n or stack: while n: stack.append(SCREAMING_SNAKE_CASE_ ) lowercase__ = n.left lowercase__ = stack.pop() print(n.data , end="," ) lowercase__ = n.right def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not node: return lowercase__ , lowercase__ = [], [] lowercase__ = node stacka.append(SCREAMING_SNAKE_CASE_ ) while stacka: # to find the reversed order of post order, store it in stack2 lowercase__ = stacka.pop() if n.left: stacka.append(n.left ) if n.right: stacka.append(n.right ) stacka.append(SCREAMING_SNAKE_CASE_ ) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data , end="," ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ = "" , SCREAMING_SNAKE_CASE_=50 , SCREAMING_SNAKE_CASE_="*" ): if not s: return "\n" + width * char lowercase__ , lowercase__ = divmod(width - len(SCREAMING_SNAKE_CASE_ ) - 2 , 2 ) return f'''{left * char} {s} {(left + extra) * char}''' if __name__ == "__main__": import doctest doctest.testmod() print(prompt("""Binary Tree Traversals""")) lowercase_ = build_tree() print(prompt("""Pre Order Traversal""")) pre_order(node) print(prompt() + """\n""") print(prompt("""In Order Traversal""")) in_order(node) print(prompt() + """\n""") print(prompt("""Post Order Traversal""")) post_order(node) print(prompt() + """\n""") print(prompt("""Level Order Traversal""")) level_order(node) print(prompt() + """\n""") print(prompt("""Actual Level Order Traversal""")) level_order_actual(node) print("""*""" * 50 + """\n""") print(prompt("""Pre Order Traversal - Iteration Version""")) pre_order_iter(node) print(prompt() + """\n""") print(prompt("""In Order Traversal - Iteration Version""")) in_order_iter(node) print(prompt() + """\n""") print(prompt("""Post Order Traversal - Iteration Version""")) post_order_iter(node) print(prompt())
37
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowercase_ = { """configuration_roberta_prelayernorm""": [ """ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaPreLayerNormConfig""", """RobertaPreLayerNormOnnxConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""", """RobertaPreLayerNormForCausalLM""", """RobertaPreLayerNormForMaskedLM""", """RobertaPreLayerNormForMultipleChoice""", """RobertaPreLayerNormForQuestionAnswering""", """RobertaPreLayerNormForSequenceClassification""", """RobertaPreLayerNormForTokenClassification""", """RobertaPreLayerNormModel""", """RobertaPreLayerNormPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFRobertaPreLayerNormForCausalLM""", """TFRobertaPreLayerNormForMaskedLM""", """TFRobertaPreLayerNormForMultipleChoice""", """TFRobertaPreLayerNormForQuestionAnswering""", """TFRobertaPreLayerNormForSequenceClassification""", """TFRobertaPreLayerNormForTokenClassification""", """TFRobertaPreLayerNormMainLayer""", """TFRobertaPreLayerNormModel""", """TFRobertaPreLayerNormPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """FlaxRobertaPreLayerNormForCausalLM""", """FlaxRobertaPreLayerNormForMaskedLM""", """FlaxRobertaPreLayerNormForMultipleChoice""", """FlaxRobertaPreLayerNormForQuestionAnswering""", """FlaxRobertaPreLayerNormForSequenceClassification""", """FlaxRobertaPreLayerNormForTokenClassification""", """FlaxRobertaPreLayerNormModel""", """FlaxRobertaPreLayerNormPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_roberta_prelayernorm import ( ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaPreLayerNormConfig, RobertaPreLayerNormOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta_prelayernorm import ( ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaPreLayerNormForCausalLM, RobertaPreLayerNormForMaskedLM, RobertaPreLayerNormForMultipleChoice, RobertaPreLayerNormForQuestionAnswering, RobertaPreLayerNormForSequenceClassification, RobertaPreLayerNormForTokenClassification, RobertaPreLayerNormModel, RobertaPreLayerNormPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta_prelayernorm import ( TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaPreLayerNormForCausalLM, TFRobertaPreLayerNormForMaskedLM, TFRobertaPreLayerNormForMultipleChoice, TFRobertaPreLayerNormForQuestionAnswering, TFRobertaPreLayerNormForSequenceClassification, TFRobertaPreLayerNormForTokenClassification, TFRobertaPreLayerNormMainLayer, TFRobertaPreLayerNormModel, TFRobertaPreLayerNormPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
37
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { """EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json""", # See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox } class _snake_case ( lowercase__): UpperCamelCase__ : Union[str, Any] ="""gpt_neox""" def __init__( self : Optional[int], __lowercase : int=5_0432, __lowercase : Tuple=6144, __lowercase : Optional[int]=44, __lowercase : Any=64, __lowercase : Dict=2_4576, __lowercase : Any="gelu", __lowercase : List[Any]=0.25, __lowercase : Tuple=1_0000, __lowercase : Optional[int]=0.0, __lowercase : List[str]=0.0, __lowercase : List[str]=0.1, __lowercase : Tuple=2048, __lowercase : str=0.02, __lowercase : Union[str, Any]=1e-5, __lowercase : Optional[int]=True, __lowercase : Any=0, __lowercase : Union[str, Any]=2, __lowercase : Dict=False, __lowercase : Dict=True, __lowercase : Optional[Any]=None, **__lowercase : int, ): super().__init__(bos_token_id=__lowercase, eos_token_id=__lowercase, **__lowercase ) lowercase__ = vocab_size lowercase__ = max_position_embeddings lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = intermediate_size lowercase__ = hidden_act lowercase__ = rotary_pct lowercase__ = rotary_emb_base lowercase__ = attention_dropout lowercase__ = hidden_dropout lowercase__ = classifier_dropout lowercase__ = initializer_range lowercase__ = layer_norm_eps lowercase__ = use_cache lowercase__ = tie_word_embeddings lowercase__ = use_parallel_residual lowercase__ = rope_scaling self._rope_scaling_validation() if self.hidden_size % self.num_attention_heads != 0: raise ValueError( "The hidden size is not divisble by the number of attention heads! Make sure to update them!" ) def A__ ( self : str ): if self.rope_scaling is None: return if not isinstance(self.rope_scaling, __lowercase ) or len(self.rope_scaling ) != 2: raise ValueError( "`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, " F'''got {self.rope_scaling}''' ) lowercase__ = self.rope_scaling.get("type", __lowercase ) lowercase__ = self.rope_scaling.get("factor", __lowercase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(__lowercase, __lowercase ) or rope_scaling_factor <= 1.0: raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
37
import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value") lowercase__ = ( ("layer.", "layer_"), ("word_embeddings.weight", "word_embeddings"), ("position_embeddings.weight", "position_embeddings"), ("token_type_embeddings.weight", "token_type_embeddings"), (".", "/"), ("LayerNorm/weight", "LayerNorm/gamma"), ("LayerNorm/bias", "LayerNorm/beta"), ("weight", "kernel"), ) if not os.path.isdir(SCREAMING_SNAKE_CASE_ ): os.makedirs(SCREAMING_SNAKE_CASE_ ) lowercase__ = model.state_dict() def to_tf_var_name(SCREAMING_SNAKE_CASE_ ): for patt, repl in iter(SCREAMING_SNAKE_CASE_ ): lowercase__ = name.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return f'''bert/{name}''' def create_tf_var(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ = tf.dtypes.as_dtype(tensor.dtype ) lowercase__ = tf.get_variable(dtype=SCREAMING_SNAKE_CASE_ , shape=tensor.shape , name=SCREAMING_SNAKE_CASE_ , initializer=tf.zeros_initializer() ) session.run(tf.variables_initializer([tf_var] ) ) session.run(SCREAMING_SNAKE_CASE_ ) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: lowercase__ = to_tf_var_name(SCREAMING_SNAKE_CASE_ ) lowercase__ = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose ): lowercase__ = torch_tensor.T lowercase__ = create_tf_var(tensor=SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ , session=SCREAMING_SNAKE_CASE_ ) tf.keras.backend.set_value(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowercase__ = session.run(SCREAMING_SNAKE_CASE_ ) print(f'''Successfully created {tf_name}: {np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}''' ) lowercase__ = tf.train.Saver(tf.trainable_variables() ) saver.save(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , model_name.replace("-" , "_" ) + ".ckpt" ) ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_=None ): lowercase__ = argparse.ArgumentParser() parser.add_argument("--model_name" , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="model name e.g. bert-base-uncased" ) parser.add_argument( "--cache_dir" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="Directory containing pytorch model" ) parser.add_argument("--pytorch_model_path" , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="/path/to/<pytorch-model-name>.bin" ) parser.add_argument("--tf_cache_dir" , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="Directory in which to save tensorflow model" ) lowercase__ = parser.parse_args(SCREAMING_SNAKE_CASE_ ) lowercase__ = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , ) convert_pytorch_checkpoint_to_tf(model=SCREAMING_SNAKE_CASE_ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name ) if __name__ == "__main__": main()
37
1
lowercase_ = """ # Installazione di Transformers ! pip install transformers datasets # Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e # rimuovi la modalità commento al comando seguente. # ! pip install git+https://github.com/huggingface/transformers.git """ lowercase_ = [{"""type""": """code""", """content""": INSTALL_CONTENT}] lowercase_ = { """{processor_class}""": """FakeProcessorClass""", """{model_class}""": """FakeModelClass""", """{object_class}""": """FakeObjectClass""", }
37
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase_ = { """configuration_timesformer""": ["""TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TimesformerConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TimesformerModel""", """TimesformerForVideoClassification""", """TimesformerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timesformer import ( TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimesformerForVideoClassification, TimesformerModel, TimesformerPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
37
1
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ = [0 for i in range(r + 1 )] # nc0 = 1 lowercase__ = 1 for i in range(1 , n + 1 ): # to compute current row from previous row. lowercase__ = min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=10, r=5))
37
import argparse import os from . import ( ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BART_PRETRAINED_MODEL_ARCHIVE_LIST, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, BartConfig, BertConfig, CamembertConfig, CTRLConfig, DistilBertConfig, DPRConfig, ElectraConfig, FlaubertConfig, GPTaConfig, LayoutLMConfig, LxmertConfig, OpenAIGPTConfig, RobertaConfig, TaConfig, TFAlbertForPreTraining, TFBartForConditionalGeneration, TFBartForSequenceClassification, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFCamembertForMaskedLM, TFCTRLLMHeadModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, TFElectraForPreTraining, TFFlaubertWithLMHeadModel, TFGPTaLMHeadModel, TFLayoutLMForMaskedLM, TFLxmertForPreTraining, TFLxmertVisualFeatureEncoder, TFOpenAIGPTLMHeadModel, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForSequenceClassification, TFTaForConditionalGeneration, TFTransfoXLLMHeadModel, TFWavaVecaModel, TFXLMRobertaForMaskedLM, TFXLMWithLMHeadModel, TFXLNetLMHeadModel, TransfoXLConfig, WavaVecaConfig, WavaVecaModel, XLMConfig, XLMRobertaConfig, XLNetConfig, is_torch_available, load_pytorch_checkpoint_in_tfa_model, ) from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging if is_torch_available(): import numpy as np import torch from . import ( AlbertForPreTraining, BartForConditionalGeneration, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, CamembertForMaskedLM, CTRLLMHeadModel, DistilBertForMaskedLM, DistilBertForQuestionAnswering, DPRContextEncoder, DPRQuestionEncoder, DPRReader, ElectraForPreTraining, FlaubertWithLMHeadModel, GPTaLMHeadModel, LayoutLMForMaskedLM, LxmertForPreTraining, LxmertVisualFeatureEncoder, OpenAIGPTLMHeadModel, RobertaForMaskedLM, RobertaForSequenceClassification, TaForConditionalGeneration, TransfoXLLMHeadModel, XLMRobertaForMaskedLM, XLMWithLMHeadModel, XLNetLMHeadModel, ) logging.set_verbosity_info() lowercase_ = { """bart""": ( BartConfig, TFBartForConditionalGeneration, TFBartForSequenceClassification, BartForConditionalGeneration, BART_PRETRAINED_MODEL_ARCHIVE_LIST, ), """bert""": ( BertConfig, TFBertForPreTraining, BertForPreTraining, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-large-uncased-whole-word-masking-finetuned-squad""": ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-large-cased-whole-word-masking-finetuned-squad""": ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-base-cased-finetuned-mrpc""": ( BertConfig, TFBertForSequenceClassification, BertForSequenceClassification, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """dpr""": ( DPRConfig, TFDPRQuestionEncoder, TFDPRContextEncoder, TFDPRReader, DPRQuestionEncoder, DPRContextEncoder, DPRReader, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ), """gpt2""": ( GPTaConfig, TFGPTaLMHeadModel, GPTaLMHeadModel, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlnet""": ( XLNetConfig, TFXLNetLMHeadModel, XLNetLMHeadModel, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlm""": ( XLMConfig, TFXLMWithLMHeadModel, XLMWithLMHeadModel, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlm-roberta""": ( XLMRobertaConfig, TFXLMRobertaForMaskedLM, XLMRobertaForMaskedLM, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """transfo-xl""": ( TransfoXLConfig, TFTransfoXLLMHeadModel, TransfoXLLMHeadModel, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """openai-gpt""": ( OpenAIGPTConfig, TFOpenAIGPTLMHeadModel, OpenAIGPTLMHeadModel, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """roberta""": ( RobertaConfig, TFRobertaForCausalLM, TFRobertaForMaskedLM, RobertaForMaskedLM, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """layoutlm""": ( LayoutLMConfig, TFLayoutLMForMaskedLM, LayoutLMForMaskedLM, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, ), """roberta-large-mnli""": ( RobertaConfig, TFRobertaForSequenceClassification, RobertaForSequenceClassification, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """camembert""": ( CamembertConfig, TFCamembertForMaskedLM, CamembertForMaskedLM, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """flaubert""": ( FlaubertConfig, TFFlaubertWithLMHeadModel, FlaubertWithLMHeadModel, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """distilbert""": ( DistilBertConfig, TFDistilBertForMaskedLM, DistilBertForMaskedLM, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """distilbert-base-distilled-squad""": ( DistilBertConfig, TFDistilBertForQuestionAnswering, DistilBertForQuestionAnswering, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """lxmert""": ( LxmertConfig, TFLxmertForPreTraining, LxmertForPreTraining, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """lxmert-visual-feature-encoder""": ( LxmertConfig, TFLxmertVisualFeatureEncoder, LxmertVisualFeatureEncoder, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """ctrl""": ( CTRLConfig, TFCTRLLMHeadModel, CTRLLMHeadModel, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """albert""": ( AlbertConfig, TFAlbertForPreTraining, AlbertForPreTraining, ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """t5""": ( TaConfig, TFTaForConditionalGeneration, TaForConditionalGeneration, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """electra""": ( ElectraConfig, TFElectraForPreTraining, ElectraForPreTraining, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """wav2vec2""": ( WavaVecaConfig, TFWavaVecaModel, WavaVecaModel, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), } def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True ): if model_type not in MODEL_CLASSES: raise ValueError(f'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' ) lowercase__ , lowercase__ , lowercase__ , lowercase__ = MODEL_CLASSES[model_type] # Initialise TF model if config_file in aws_config_map: lowercase__ = cached_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , force_download=not use_cached_models ) lowercase__ = config_class.from_json_file(SCREAMING_SNAKE_CASE_ ) lowercase__ = True lowercase__ = True print(f'''Building TensorFlow model from configuration: {config}''' ) lowercase__ = model_class(SCREAMING_SNAKE_CASE_ ) # Load weights from tf checkpoint if pytorch_checkpoint_path in aws_config_map.keys(): lowercase__ = cached_file( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , force_download=not use_cached_models ) # Load PyTorch checkpoint in tf2 model: lowercase__ = load_pytorch_checkpoint_in_tfa_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if compare_with_pt_model: lowercase__ = tf_model(tf_model.dummy_inputs , training=SCREAMING_SNAKE_CASE_ ) # build the network lowercase__ = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" ) lowercase__ = pt_model_class.from_pretrained( pretrained_model_name_or_path=SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ , state_dict=SCREAMING_SNAKE_CASE_ ) with torch.no_grad(): lowercase__ = pt_model(**pt_model.dummy_inputs ) lowercase__ = pto[0].numpy() lowercase__ = tfo[0].numpy() lowercase__ = np.amax(np.abs(np_pt - np_tf ) ) print(f'''Max absolute difference between models outputs {diff}''' ) assert diff <= 2e-2, f'''Error, model absolute difference is >2e-2: {diff}''' # Save pytorch-model print(f'''Save TensorFlow model to {tf_dump_path}''' ) tf_model.save_weights(SCREAMING_SNAKE_CASE_ , save_format="h5" ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , ): if args_model_type is None: lowercase__ = list(MODEL_CLASSES.keys() ) else: lowercase__ = [args_model_type] for j, model_type in enumerate(SCREAMING_SNAKE_CASE_ , start=1 ): print("=" * 100 ) print(f''' Converting model type {j}/{len(SCREAMING_SNAKE_CASE_ )}: {model_type}''' ) print("=" * 100 ) if model_type not in MODEL_CLASSES: raise ValueError(f'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' ) lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = MODEL_CLASSES[model_type] if model_shortcut_names_or_path is None: lowercase__ = list(aws_model_maps.keys() ) if config_shortcut_names_or_path is None: lowercase__ = model_shortcut_names_or_path for i, (model_shortcut_name, config_shortcut_name) in enumerate( zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , start=1 ): print("-" * 100 ) if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name: if not only_convert_finetuned_models: print(f''' Skipping finetuned checkpoint {model_shortcut_name}''' ) continue lowercase__ = model_shortcut_name elif only_convert_finetuned_models: print(f''' Skipping not finetuned checkpoint {model_shortcut_name}''' ) continue print( f''' Converting checkpoint {i}/{len(SCREAMING_SNAKE_CASE_ )}: {model_shortcut_name} - model_type {model_type}''' ) print("-" * 100 ) if config_shortcut_name in aws_config_map: lowercase__ = cached_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , force_download=not use_cached_models ) else: lowercase__ = config_shortcut_name if model_shortcut_name in aws_model_maps: lowercase__ = cached_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , force_download=not use_cached_models ) else: lowercase__ = model_shortcut_name if os.path.isfile(SCREAMING_SNAKE_CASE_ ): lowercase__ = "converted_model" convert_pt_checkpoint_to_tf( model_type=SCREAMING_SNAKE_CASE_ , pytorch_checkpoint_path=SCREAMING_SNAKE_CASE_ , config_file=SCREAMING_SNAKE_CASE_ , tf_dump_path=os.path.join(SCREAMING_SNAKE_CASE_ , model_shortcut_name + "-tf_model.h5" ) , compare_with_pt_model=SCREAMING_SNAKE_CASE_ , ) if remove_cached_files: os.remove(SCREAMING_SNAKE_CASE_ ) os.remove(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_dump_path""", default=None, type=str, required=True, help="""Path to the output Tensorflow dump file.""" ) parser.add_argument( """--model_type""", default=None, type=str, help=( F'Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and ' """convert all the models from AWS.""" ), ) parser.add_argument( """--pytorch_checkpoint_path""", default=None, type=str, help=( """Path to the PyTorch checkpoint path or shortcut name to download from AWS. """ """If not given, will download and convert all the checkpoints from AWS.""" ), ) parser.add_argument( """--config_file""", default=None, type=str, help=( """The config json file corresponding to the pre-trained model. \n""" """This specifies the model architecture. If not given and """ """--pytorch_checkpoint_path is not given or is a shortcut name """ """use the configuration associated to the shortcut name on the AWS""" ), ) parser.add_argument( """--compare_with_pt_model""", action="""store_true""", help="""Compare Tensorflow and PyTorch model predictions.""" ) parser.add_argument( """--use_cached_models""", action="""store_true""", help="""Use cached models if possible instead of updating to latest checkpoint versions.""", ) parser.add_argument( """--remove_cached_files""", action="""store_true""", help="""Remove pytorch models after conversion (save memory when converting in batches).""", ) parser.add_argument("""--only_convert_finetuned_models""", action="""store_true""", help="""Only convert finetuned models.""") lowercase_ = parser.parse_args() # if args.pytorch_checkpoint_path is not None: # convert_pt_checkpoint_to_tf(args.model_type.lower(), # args.pytorch_checkpoint_path, # args.config_file if args.config_file is not None else args.pytorch_checkpoint_path, # args.tf_dump_path, # compare_with_pt_model=args.compare_with_pt_model, # use_cached_models=args.use_cached_models) # else: convert_all_pt_checkpoints_to_tf( args.model_type.lower() if args.model_type is not None else None, args.tf_dump_path, model_shortcut_names_or_path=[args.pytorch_checkpoint_path] if args.pytorch_checkpoint_path is not None else None, config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None, compare_with_pt_model=args.compare_with_pt_model, use_cached_models=args.use_cached_models, remove_cached_files=args.remove_cached_files, only_convert_finetuned_models=args.only_convert_finetuned_models, )
37
1
from statistics import mean, stdev def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 3 ): lowercase__ = min(SCREAMING_SNAKE_CASE_ ) lowercase__ = max(SCREAMING_SNAKE_CASE_ ) # normalize data return [round((x - x_min) / (x_max - x_min) , SCREAMING_SNAKE_CASE_ ) for x in data] def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 3 ): lowercase__ = mean(SCREAMING_SNAKE_CASE_ ) lowercase__ = stdev(SCREAMING_SNAKE_CASE_ ) # standardize data return [round((x - mu) / (sigma) , SCREAMING_SNAKE_CASE_ ) for x in data]
37
import math def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): if 0 not in (x, y): # We use the relation x^y = y*log10(x), where 10 is the base. return y * math.logaa(SCREAMING_SNAKE_CASE_ ) else: if x == 0: # 0 raised to any number is 0 return 0 elif y == 0: return 1 # any number raised to 0 is 1 raise AssertionError("This should never happen" ) if __name__ == "__main__": # Main function # Read two numbers from input and typecast them to int using map function. # Here x is the base and y is the power. lowercase_ = """Enter the base and the power separated by a comma: """ lowercase_ , lowercase_ = map(int, input(prompt).split(""",""")) lowercase_ , lowercase_ = map(int, input(prompt).split(""",""")) # We find the log of each number, using the function res(), which takes two # arguments. lowercase_ = res(xa, ya) lowercase_ = res(xa, ya) # We check for the largest number if resa > resa: print("""Largest number is""", xa, """^""", ya) elif resa > resa: print("""Largest number is""", xa, """^""", ya) else: print("""Both are equal""")
37
1
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowercase_ = logging.get_logger(__name__) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ): lowercase__ = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''deit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''deit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''deit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''deit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''deit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''deit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''deit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''deit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''deit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''deit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ ("cls_token", "deit.embeddings.cls_token"), ("dist_token", "deit.embeddings.distillation_token"), ("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"), ("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"), ("pos_embed", "deit.embeddings.position_embeddings"), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ("pre_logits.fc.weight", "pooler.dense.weight"), ("pre_logits.fc.bias", "pooler.dense.bias"), ] ) # if just the base model, we should remove "deit" from all keys that start with "deit" lowercase__ = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys] else: # layernorm + classification heads rename_keys.extend( [ ("norm.weight", "deit.layernorm.weight"), ("norm.bias", "deit.layernorm.bias"), ("head.weight", "cls_classifier.weight"), ("head.bias", "cls_classifier.bias"), ("head_dist.weight", "distillation_classifier.weight"), ("head_dist.bias", "distillation_classifier.bias"), ] ) return rename_keys def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ): for i in range(config.num_hidden_layers ): if base_model: lowercase__ = "" else: lowercase__ = "deit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowercase__ = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' ) lowercase__ = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict lowercase__ = in_proj_weight[ : config.hidden_size, : ] lowercase__ = in_proj_bias[: config.hidden_size] lowercase__ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowercase__ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowercase__ = in_proj_weight[ -config.hidden_size :, : ] lowercase__ = in_proj_bias[-config.hidden_size :] def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ = dct.pop(SCREAMING_SNAKE_CASE_ ) lowercase__ = val def __lowerCAmelCase ( ): lowercase__ = "http://images.cocodataset.org/val2017/000000039769.jpg" lowercase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ) return im @torch.no_grad() def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ = DeiTConfig() # all deit models have fine-tuned heads lowercase__ = False # dataset (fine-tuned on ImageNet 2012), patch_size and image_size lowercase__ = 1000 lowercase__ = "huggingface/label-files" lowercase__ = "imagenet-1k-id2label.json" lowercase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) ) lowercase__ = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()} lowercase__ = idalabel lowercase__ = {v: k for k, v in idalabel.items()} lowercase__ = int(deit_name[-6:-4] ) lowercase__ = int(deit_name[-3:] ) # size of the architecture if deit_name[9:].startswith("tiny" ): lowercase__ = 192 lowercase__ = 768 lowercase__ = 12 lowercase__ = 3 elif deit_name[9:].startswith("small" ): lowercase__ = 384 lowercase__ = 1536 lowercase__ = 12 lowercase__ = 6 if deit_name[9:].startswith("base" ): pass elif deit_name[4:].startswith("large" ): lowercase__ = 1024 lowercase__ = 4096 lowercase__ = 24 lowercase__ = 16 # load original model from timm lowercase__ = timm.create_model(SCREAMING_SNAKE_CASE_ , pretrained=SCREAMING_SNAKE_CASE_ ) timm_model.eval() # load state_dict of original model, remove and rename some keys lowercase__ = timm_model.state_dict() lowercase__ = create_rename_keys(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for src, dest in rename_keys: rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) read_in_q_k_v(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # load HuggingFace model lowercase__ = DeiTForImageClassificationWithTeacher(SCREAMING_SNAKE_CASE_ ).eval() model.load_state_dict(SCREAMING_SNAKE_CASE_ ) # Check outputs on an image, prepared by DeiTImageProcessor lowercase__ = int( (256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103 lowercase__ = DeiTImageProcessor(size=SCREAMING_SNAKE_CASE_ , crop_size=config.image_size ) lowercase__ = image_processor(images=prepare_img() , return_tensors="pt" ) lowercase__ = encoding["pixel_values"] lowercase__ = model(SCREAMING_SNAKE_CASE_ ) lowercase__ = timm_model(SCREAMING_SNAKE_CASE_ ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(SCREAMING_SNAKE_CASE_ , outputs.logits , atol=1e-3 ) Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ ) print(f'''Saving model {deit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--deit_name""", default="""vit_deit_base_distilled_patch16_224""", type=str, help="""Name of the DeiT timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) lowercase_ = parser.parse_args() convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
37
import pickle import numpy as np from matplotlib import pyplot as plt class _snake_case : def __init__( self : Tuple, __lowercase : Union[str, Any], __lowercase : int, __lowercase : Union[str, Any], __lowercase : str, __lowercase : List[Any], __lowercase : List[str]=0.2, __lowercase : List[str]=0.2 ): lowercase__ = bp_numa lowercase__ = bp_numa lowercase__ = bp_numa lowercase__ = conva_get[:2] lowercase__ = conva_get[2] lowercase__ = size_pa lowercase__ = rate_w lowercase__ = rate_t lowercase__ = [ np.mat(-1 * np.random.rand(self.conva[0], self.conva[0] ) + 0.5 ) for i in range(self.conva[1] ) ] lowercase__ = np.mat(-1 * np.random.rand(self.num_bpa, self.num_bpa ) + 0.5 ) lowercase__ = np.mat(-1 * np.random.rand(self.num_bpa, self.num_bpa ) + 0.5 ) lowercase__ = -2 * np.random.rand(self.conva[1] ) + 1 lowercase__ = -2 * np.random.rand(self.num_bpa ) + 1 lowercase__ = -2 * np.random.rand(self.num_bpa ) + 1 def A__ ( self : Any, __lowercase : List[str] ): # save model dict with pickle lowercase__ = { "num_bp1": self.num_bpa, "num_bp2": self.num_bpa, "num_bp3": self.num_bpa, "conv1": self.conva, "step_conv1": self.step_conva, "size_pooling1": self.size_poolinga, "rate_weight": self.rate_weight, "rate_thre": self.rate_thre, "w_conv1": self.w_conva, "wkj": self.wkj, "vji": self.vji, "thre_conv1": self.thre_conva, "thre_bp2": self.thre_bpa, "thre_bp3": self.thre_bpa, } with open(__lowercase, "wb" ) as f: pickle.dump(__lowercase, __lowercase ) print(F'''Model saved: {save_path}''' ) @classmethod def A__ ( cls : Dict, __lowercase : Union[str, Any] ): # read saved model with open(__lowercase, "rb" ) as f: lowercase__ = pickle.load(__lowercase ) # noqa: S301 lowercase__ = model_dic.get("conv1" ) conv_get.append(model_dic.get("step_conv1" ) ) lowercase__ = model_dic.get("size_pooling1" ) lowercase__ = model_dic.get("num_bp1" ) lowercase__ = model_dic.get("num_bp2" ) lowercase__ = model_dic.get("num_bp3" ) lowercase__ = model_dic.get("rate_weight" ) lowercase__ = model_dic.get("rate_thre" ) # create model instance lowercase__ = CNN(__lowercase, __lowercase, __lowercase, __lowercase, __lowercase, __lowercase, __lowercase ) # modify model parameter lowercase__ = model_dic.get("w_conv1" ) lowercase__ = model_dic.get("wkj" ) lowercase__ = model_dic.get("vji" ) lowercase__ = model_dic.get("thre_conv1" ) lowercase__ = model_dic.get("thre_bp2" ) lowercase__ = model_dic.get("thre_bp3" ) return conv_ins def A__ ( self : str, __lowercase : List[Any] ): return 1 / (1 + np.exp(-1 * x )) def A__ ( self : List[str], __lowercase : Optional[Any] ): return round(__lowercase, 3 ) def A__ ( self : Optional[Any], __lowercase : Dict, __lowercase : Optional[int], __lowercase : Optional[int], __lowercase : Optional[Any], __lowercase : str ): # convolution process lowercase__ = convs[0] lowercase__ = convs[1] lowercase__ = np.shape(__lowercase )[0] # get the data slice of original image data, data_focus lowercase__ = [] for i_focus in range(0, size_data - size_conv + 1, __lowercase ): for j_focus in range(0, size_data - size_conv + 1, __lowercase ): lowercase__ = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(__lowercase ) # calculate the feature map of every single kernel, and saved as list of matrix lowercase__ = [] lowercase__ = int((size_data - size_conv) / conv_step + 1 ) for i_map in range(__lowercase ): lowercase__ = [] for i_focus in range(len(__lowercase ) ): lowercase__ = ( np.sum(np.multiply(data_focus[i_focus], w_convs[i_map] ) ) - thre_convs[i_map] ) featuremap.append(self.sig(__lowercase ) ) lowercase__ = np.asmatrix(__lowercase ).reshape( __lowercase, __lowercase ) data_featuremap.append(__lowercase ) # expanding the data slice to One dimenssion lowercase__ = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(__lowercase ) ) lowercase__ = np.asarray(__lowercase ) return focus_list, data_featuremap def A__ ( self : List[Any], __lowercase : Any, __lowercase : List[Any], __lowercase : Union[str, Any]="average_pool" ): # pooling process lowercase__ = len(featuremaps[0] ) lowercase__ = int(size_map / size_pooling ) lowercase__ = [] for i_map in range(len(__lowercase ) ): lowercase__ = featuremaps[i_map] lowercase__ = [] for i_focus in range(0, __lowercase, __lowercase ): for j_focus in range(0, __lowercase, __lowercase ): lowercase__ = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(__lowercase ) ) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(__lowercase ) ) lowercase__ = np.asmatrix(__lowercase ).reshape(__lowercase, __lowercase ) featuremap_pooled.append(__lowercase ) return featuremap_pooled def A__ ( self : str, __lowercase : Optional[Any] ): # expanding three dimension data to one dimension list lowercase__ = [] for i in range(len(__lowercase ) ): lowercase__ = np.shape(data[i] ) lowercase__ = data[i].reshape(1, shapes[0] * shapes[1] ) lowercase__ = data_listed.getA().tolist()[0] data_expanded.extend(__lowercase ) lowercase__ = np.asarray(__lowercase ) return data_expanded def A__ ( self : Optional[int], __lowercase : Optional[int] ): # expanding matrix to one dimension list lowercase__ = np.asarray(__lowercase ) lowercase__ = np.shape(__lowercase ) lowercase__ = data_mat.reshape(1, shapes[0] * shapes[1] ) return data_expanded def A__ ( self : str, __lowercase : Tuple, __lowercase : List[Any], __lowercase : Any, __lowercase : Union[str, Any], __lowercase : Tuple ): lowercase__ = [] lowercase__ = 0 for i_map in range(__lowercase ): lowercase__ = np.ones((size_map, size_map) ) for i in range(0, __lowercase, __lowercase ): for j in range(0, __lowercase, __lowercase ): lowercase__ = pd_pool[ i_pool ] lowercase__ = i_pool + 1 lowercase__ = np.multiply( __lowercase, np.multiply(out_map[i_map], (1 - out_map[i_map]) ) ) pd_all.append(__lowercase ) return pd_all def A__ ( self : Tuple, __lowercase : int, __lowercase : Optional[Any], __lowercase : List[Any], __lowercase : Optional[Any], __lowercase : List[Any], __lowercase : List[str]=bool ): # model traning print("----------------------Start Training-------------------------" ) print((" - - Shape: Train_Data ", np.shape(__lowercase )) ) print((" - - Shape: Teach_Data ", np.shape(__lowercase )) ) lowercase__ = 0 lowercase__ = [] lowercase__ = 1_0000 while rp < n_repeat and mse >= error_accuracy: lowercase__ = 0 print(F'''-------------Learning Time {rp}--------------''' ) for p in range(len(__lowercase ) ): # print('------------Learning Image: %d--------------'%p) lowercase__ = np.asmatrix(datas_train[p] ) lowercase__ = np.asarray(datas_teach[p] ) lowercase__ , lowercase__ = self.convolute( __lowercase, self.conva, self.w_conva, self.thre_conva, conv_step=self.step_conva, ) lowercase__ = self.pooling(__lowercase, self.size_poolinga ) lowercase__ = np.shape(__lowercase ) lowercase__ = self._expand(__lowercase ) lowercase__ = data_bp_input lowercase__ = np.dot(__lowercase, self.vji.T ) - self.thre_bpa lowercase__ = self.sig(__lowercase ) lowercase__ = np.dot(__lowercase, self.wkj.T ) - self.thre_bpa lowercase__ = self.sig(__lowercase ) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- lowercase__ = np.multiply( (data_teach - bp_outa), np.multiply(__lowercase, (1 - bp_outa) ) ) lowercase__ = np.multiply( np.dot(__lowercase, self.wkj ), np.multiply(__lowercase, (1 - bp_outa) ) ) lowercase__ = np.dot(__lowercase, self.vji ) lowercase__ = pd_i_all / (self.size_poolinga * self.size_poolinga) lowercase__ = pd_conva_pooled.T.getA().tolist() lowercase__ = self._calculate_gradient_from_pool( __lowercase, __lowercase, shape_featuremapa[0], shape_featuremapa[1], self.size_poolinga, ) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1] ): lowercase__ = self._expand_mat(pd_conva_all[k_conv] ) lowercase__ = self.rate_weight * np.dot(__lowercase, __lowercase ) lowercase__ = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0]) ) lowercase__ = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv] ) * self.rate_thre ) # all connected layer lowercase__ = self.wkj + pd_k_all.T * bp_outa * self.rate_weight lowercase__ = self.vji + pd_j_all.T * bp_outa * self.rate_weight lowercase__ = self.thre_bpa - pd_k_all * self.rate_thre lowercase__ = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image lowercase__ = np.sum(abs(data_teach - bp_outa ) ) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) lowercase__ = rp + 1 lowercase__ = error_count / patterns all_mse.append(__lowercase ) def draw_error(): lowercase__ = [error_accuracy for i in range(int(n_repeat * 1.2 ) )] plt.plot(__lowercase, "+-" ) plt.plot(__lowercase, "r--" ) plt.xlabel("Learning Times" ) plt.ylabel("All_mse" ) plt.grid(__lowercase, alpha=0.5 ) plt.show() print("------------------Training Complished---------------------" ) print((" - - Training epoch: ", rp, F''' - - Mse: {mse:.6f}''') ) if draw_e: draw_error() return mse def A__ ( self : List[str], __lowercase : Optional[int] ): # model predict lowercase__ = [] print("-------------------Start Testing-------------------------" ) print((" - - Shape: Test_Data ", np.shape(__lowercase )) ) for p in range(len(__lowercase ) ): lowercase__ = np.asmatrix(datas_test[p] ) lowercase__ , lowercase__ = self.convolute( __lowercase, self.conva, self.w_conva, self.thre_conva, conv_step=self.step_conva, ) lowercase__ = self.pooling(__lowercase, self.size_poolinga ) lowercase__ = self._expand(__lowercase ) lowercase__ = data_bp_input lowercase__ = bp_outa * self.vji.T - self.thre_bpa lowercase__ = self.sig(__lowercase ) lowercase__ = bp_outa * self.wkj.T - self.thre_bpa lowercase__ = self.sig(__lowercase ) produce_out.extend(bp_outa.getA().tolist() ) lowercase__ = [list(map(self.do_round, __lowercase ) ) for each in produce_out] return np.asarray(__lowercase ) def A__ ( self : int, __lowercase : Any ): # return the data of image after convoluting process so we can check it out lowercase__ = np.asmatrix(__lowercase ) lowercase__ , lowercase__ = self.convolute( __lowercase, self.conva, self.w_conva, self.thre_conva, conv_step=self.step_conva, ) lowercase__ = self.pooling(__lowercase, self.size_poolinga ) return data_conveda, data_pooleda if __name__ == "__main__": pass
37
1
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number if __name__ == "__main__": print("""Program to check whether a number is a Perfect number or not...""") lowercase_ = int(input("""Enter number: """).strip()) print(F'{number} is {"" if perfect(number) else "not "}a Perfect Number.')
37
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from timm import create_model from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import BitConfig, BitForImageClassification, BitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() lowercase_ = logging.get_logger(__name__) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): lowercase__ = "huggingface/label-files" lowercase__ = "imagenet-1k-id2label.json" lowercase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) ) lowercase__ = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()} lowercase__ = {v: k for k, v in idalabel.items()} lowercase__ = "std_conv" if "bit" in model_name else False # note that when using BiT as backbone for ViT-hybrid checkpoints, # one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same", # config.conv_layer = "std_conv_same" lowercase__ = BitConfig( conv_layer=SCREAMING_SNAKE_CASE_ , num_labels=1000 , idalabel=SCREAMING_SNAKE_CASE_ , labelaid=SCREAMING_SNAKE_CASE_ , ) return config def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): if "stem.conv" in name: lowercase__ = name.replace("stem.conv" , "bit.embedder.convolution" ) if "blocks" in name: lowercase__ = name.replace("blocks" , "layers" ) if "head.fc" in name: lowercase__ = name.replace("head.fc" , "classifier.1" ) if name.startswith("norm" ): lowercase__ = "bit." + name if "bit" not in name and "classifier" not in name: lowercase__ = "bit.encoder." + name return name def __lowerCAmelCase ( ): lowercase__ = "http://images.cocodataset.org/val2017/000000039769.jpg" lowercase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ) return im @torch.no_grad() def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ): lowercase__ = get_config(SCREAMING_SNAKE_CASE_ ) # load original model from timm lowercase__ = create_model(SCREAMING_SNAKE_CASE_ , pretrained=SCREAMING_SNAKE_CASE_ ) timm_model.eval() # load state_dict of original model lowercase__ = timm_model.state_dict() for key in state_dict.copy().keys(): lowercase__ = state_dict.pop(SCREAMING_SNAKE_CASE_ ) lowercase__ = val.squeeze() if "head" in key else val # load HuggingFace model lowercase__ = BitForImageClassification(SCREAMING_SNAKE_CASE_ ) model.eval() model.load_state_dict(SCREAMING_SNAKE_CASE_ ) # create image processor lowercase__ = create_transform(**resolve_data_config({} , model=SCREAMING_SNAKE_CASE_ ) ) lowercase__ = transform.transforms lowercase__ = { "bilinear": PILImageResampling.BILINEAR, "bicubic": PILImageResampling.BICUBIC, "nearest": PILImageResampling.NEAREST, } lowercase__ = BitImageProcessor( do_resize=SCREAMING_SNAKE_CASE_ , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=SCREAMING_SNAKE_CASE_ , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=SCREAMING_SNAKE_CASE_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) lowercase__ = prepare_img() lowercase__ = transform(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 ) lowercase__ = processor(SCREAMING_SNAKE_CASE_ , return_tensors="pt" ).pixel_values # verify pixel values assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # verify logits with torch.no_grad(): lowercase__ = model(SCREAMING_SNAKE_CASE_ ) lowercase__ = outputs.logits print("Logits:" , logits[0, :3] ) print("Predicted class:" , model.config.idalabel[logits.argmax(-1 ).item()] ) lowercase__ = timm_model(SCREAMING_SNAKE_CASE_ ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(SCREAMING_SNAKE_CASE_ , outputs.logits , atol=1e-3 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ ) print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) processor.save_pretrained(SCREAMING_SNAKE_CASE_ ) if push_to_hub: print(f'''Pushing model {model_name} and processor to the hub''' ) model.push_to_hub(f'''ybelkada/{model_name}''' ) processor.push_to_hub(f'''ybelkada/{model_name}''' ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""resnetv2_50x1_bitm""", type=str, help="""Name of the BiT timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to push the model to the hub.""", ) lowercase_ = parser.parse_args() convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
37
1
class _snake_case : def __init__( self : Union[str, Any] ): lowercase__ = "" lowercase__ = "" lowercase__ = [] def A__ ( self : Optional[Any], __lowercase : int, __lowercase : int ): if m == -1: return n + 1 elif n == -1: return m + 1 elif self.dp[m][n] > -1: return self.dp[m][n] else: if self.worda[m] == self.worda[n]: lowercase__ = self.__min_dist_top_down_dp(m - 1, n - 1 ) else: lowercase__ = self.__min_dist_top_down_dp(__lowercase, n - 1 ) lowercase__ = self.__min_dist_top_down_dp(m - 1, __lowercase ) lowercase__ = self.__min_dist_top_down_dp(m - 1, n - 1 ) lowercase__ = 1 + min(__lowercase, __lowercase, __lowercase ) return self.dp[m][n] def A__ ( self : Optional[Any], __lowercase : str, __lowercase : str ): lowercase__ = worda lowercase__ = worda lowercase__ = [[-1 for _ in range(len(__lowercase ) )] for _ in range(len(__lowercase ) )] return self.__min_dist_top_down_dp(len(__lowercase ) - 1, len(__lowercase ) - 1 ) def A__ ( self : List[str], __lowercase : str, __lowercase : str ): lowercase__ = worda lowercase__ = worda lowercase__ = len(__lowercase ) lowercase__ = len(__lowercase ) lowercase__ = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )] for i in range(m + 1 ): for j in range(n + 1 ): if i == 0: # first string is empty lowercase__ = j elif j == 0: # second string is empty lowercase__ = i elif worda[i - 1] == worda[j - 1]: # last characters are equal lowercase__ = self.dp[i - 1][j - 1] else: lowercase__ = self.dp[i][j - 1] lowercase__ = self.dp[i - 1][j] lowercase__ = self.dp[i - 1][j - 1] lowercase__ = 1 + min(__lowercase, __lowercase, __lowercase ) return self.dp[m][n] if __name__ == "__main__": lowercase_ = EditDistance() print("""****************** Testing Edit Distance DP Algorithm ******************""") print() lowercase_ = input("""Enter the first string: """).strip() lowercase_ = input("""Enter the second string: """).strip() print() print(F'The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}') print(F'The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}') print() print("""*************** End of Testing Edit Distance DP Algorithm ***************""")
37
import json from typing import Iterator, List, Union from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers from tokenizers.implementations.base_tokenizer import BaseTokenizer from tokenizers.models import Unigram from tokenizers.processors import TemplateProcessing class _snake_case ( lowercase__): def __init__( self : Optional[Any], __lowercase : str = "▁", __lowercase : bool = True, __lowercase : Union[str, AddedToken] = "<unk>", __lowercase : Union[str, AddedToken] = "</s>", __lowercase : Union[str, AddedToken] = "<pad>", ): lowercase__ = { "pad": {"id": 0, "token": pad_token}, "eos": {"id": 1, "token": eos_token}, "unk": {"id": 2, "token": unk_token}, } lowercase__ = [None] * len(self.special_tokens ) for token_dict in self.special_tokens.values(): lowercase__ = token_dict["token"] lowercase__ = Tokenizer(Unigram() ) lowercase__ = normalizers.Sequence( [ normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(" {2,}" ), " " ), normalizers.Lowercase(), ] ) lowercase__ = pre_tokenizers.Sequence( [ pre_tokenizers.Metaspace(replacement=__lowercase, add_prefix_space=__lowercase ), pre_tokenizers.Digits(individual_digits=__lowercase ), pre_tokenizers.Punctuation(), ] ) lowercase__ = decoders.Metaspace(replacement=__lowercase, add_prefix_space=__lowercase ) lowercase__ = TemplateProcessing( single=F'''$A {self.special_tokens["eos"]["token"]}''', special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])], ) lowercase__ = { "model": "SentencePieceUnigram", "replacement": replacement, "add_prefix_space": add_prefix_space, } super().__init__(__lowercase, __lowercase ) def A__ ( self : Union[str, Any], __lowercase : Union[str, List[str]], __lowercase : int = 8000, __lowercase : bool = True, ): lowercase__ = trainers.UnigramTrainer( vocab_size=__lowercase, special_tokens=self.special_tokens_list, show_progress=__lowercase, ) if isinstance(__lowercase, __lowercase ): lowercase__ = [files] self._tokenizer.train(__lowercase, trainer=__lowercase ) self.add_unk_id() def A__ ( self : List[Any], __lowercase : Union[Iterator[str], Iterator[Iterator[str]]], __lowercase : int = 8000, __lowercase : bool = True, ): lowercase__ = trainers.UnigramTrainer( vocab_size=__lowercase, special_tokens=self.special_tokens_list, show_progress=__lowercase, ) self._tokenizer.train_from_iterator(__lowercase, trainer=__lowercase ) self.add_unk_id() def A__ ( self : str ): lowercase__ = json.loads(self._tokenizer.to_str() ) lowercase__ = self.special_tokens["unk"]["id"] lowercase__ = Tokenizer.from_str(json.dumps(__lowercase ) )
37
1
from __future__ import annotations def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ = sorted(numsa + numsa ) lowercase__ , lowercase__ = divmod(len(SCREAMING_SNAKE_CASE_ ) , 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() lowercase_ = [float(x) for x in input("""Enter the elements of first array: """).split()] lowercase_ = [float(x) for x in input("""Enter the elements of second array: """).split()] print(F'The median of two arrays is: {median_of_two_arrays(array_a, array_a)}')
37
# Usage: # ./gen-card-facebook-wmt19.py import os from pathlib import Path def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, oder?", } # BLUE scores as follows: # "pair": [fairseq, transformers] lowercase__ = { "ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"], "en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"], "en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"], "de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"], } lowercase__ = f'''{src_lang}-{tgt_lang}''' lowercase__ = f''' --- language: - {src_lang} - {tgt_lang} thumbnail: tags: - translation - wmt19 - facebook license: apache-2.0 datasets: - wmt19 metrics: - bleu --- # FSMT ## Model description This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}. For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616). The abbreviation FSMT stands for FairSeqMachineTranslation All four models are available: * [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru) * [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en) * [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de) * [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en) ## Intended uses & limitations #### How to use ```python from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = "facebook/wmt19-{src_lang}-{tgt_lang}" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) input = "{texts[src_lang]}" input_ids = tokenizer.encode(input, return_tensors="pt") outputs = model.generate(input_ids) decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) print(decoded) # {texts[tgt_lang]} ``` #### Limitations and bias - The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981) ## Training data Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616). ## Eval results pair | fairseq | transformers -------|---------|---------- {pair} | {scores[pair][0]} | {scores[pair][1]} The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support: - model ensemble, therefore the best performing checkpoint was ported (``model4.pt``). - re-ranking The score was calculated using this code: ```bash git clone https://github.com/huggingface/transformers cd transformers export PAIR={pair} export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 export NUM_BEAMS=15 mkdir -p $DATA_DIR sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target echo $PAIR PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS ``` note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`. ## Data Sources - [training, etc.](http://www.statmt.org/wmt19/) - [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561) ### BibTeX entry and citation info ```bibtex @inproceedings{{..., year={{2020}}, title={{Facebook FAIR\'s WMT19 News Translation Task Submission}}, author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}}, booktitle={{Proc. of WMT}}, }} ``` ## TODO - port model ensemble (fairseq uses 4 model checkpoints) ''' os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) lowercase__ = os.path.join(SCREAMING_SNAKE_CASE_ , "README.md" ) print(f'''Generating {path}''' ) with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as f: f.write(SCREAMING_SNAKE_CASE_ ) # make sure we are under the root of the project lowercase_ = Path(__file__).resolve().parent.parent.parent lowercase_ = repo_dir / """model_cards""" for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: lowercase_ , lowercase_ , lowercase_ = model_name.split("""-""") lowercase_ = model_cards_dir / """facebook""" / model_name write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
37
1
from __future__ import annotations import inspect import unittest import numpy as np from transformers import DeiTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, ) from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class _snake_case : def __init__( self : Optional[Any], __lowercase : str, __lowercase : int=13, __lowercase : Optional[Any]=30, __lowercase : Optional[Any]=2, __lowercase : int=3, __lowercase : int=True, __lowercase : int=True, __lowercase : Tuple=32, __lowercase : List[Any]=2, __lowercase : List[Any]=4, __lowercase : int=37, __lowercase : Dict="gelu", __lowercase : Union[str, Any]=0.1, __lowercase : Optional[Any]=0.1, __lowercase : Optional[int]=10, __lowercase : int=0.02, __lowercase : Tuple=3, __lowercase : int=None, __lowercase : Optional[int]=2, ): lowercase__ = parent lowercase__ = batch_size lowercase__ = image_size lowercase__ = patch_size lowercase__ = num_channels lowercase__ = is_training lowercase__ = use_labels lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = intermediate_size lowercase__ = hidden_act lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = type_sequence_label_size lowercase__ = initializer_range lowercase__ = scope lowercase__ = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) lowercase__ = (image_size // patch_size) ** 2 lowercase__ = num_patches + 2 def A__ ( self : int ): lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase__ = None if self.use_labels: lowercase__ = ids_tensor([self.batch_size], self.type_sequence_label_size ) lowercase__ = self.get_config() return config, pixel_values, labels def A__ ( self : Dict ): return DeiTConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=__lowercase, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, ) def A__ ( self : Optional[int], __lowercase : Dict, __lowercase : Optional[Any], __lowercase : Union[str, Any] ): lowercase__ = TFDeiTModel(config=__lowercase ) lowercase__ = model(__lowercase ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def A__ ( self : List[str], __lowercase : Dict, __lowercase : int, __lowercase : Dict ): lowercase__ = TFDeiTForMaskedImageModeling(config=__lowercase ) lowercase__ = model(__lowercase ) self.parent.assertEqual( result.reconstruction.shape, (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowercase__ = 1 lowercase__ = TFDeiTForMaskedImageModeling(__lowercase ) lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowercase__ = model(__lowercase ) self.parent.assertEqual(result.reconstruction.shape, (self.batch_size, 1, self.image_size, self.image_size) ) def A__ ( self : Tuple, __lowercase : Dict, __lowercase : Union[str, Any], __lowercase : Tuple ): lowercase__ = self.type_sequence_label_size lowercase__ = TFDeiTForImageClassification(__lowercase ) lowercase__ = model(__lowercase, labels=__lowercase ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowercase__ = 1 lowercase__ = TFDeiTForImageClassification(__lowercase ) lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowercase__ = model(__lowercase, labels=__lowercase ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) ) def A__ ( self : Union[str, Any] ): lowercase__ = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ = config_and_inputs lowercase__ = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class _snake_case ( lowercase__ , lowercase__ , unittest.TestCase): UpperCamelCase__ : List[Any] =( ( TFDeiTModel, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, ) if is_tf_available() else () ) UpperCamelCase__ : Optional[Any] =( { """feature-extraction""": TFDeiTModel, """image-classification""": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher), } if is_tf_available() else {} ) UpperCamelCase__ : List[Any] =False UpperCamelCase__ : Optional[Any] =False UpperCamelCase__ : Tuple =False UpperCamelCase__ : int =False def A__ ( self : Dict ): lowercase__ = TFDeiTModelTester(self ) lowercase__ = ConfigTester(self, config_class=__lowercase, has_text_modality=__lowercase, hidden_size=37 ) def A__ ( self : Union[str, Any] ): self.config_tester.run_common_tests() @unittest.skip(reason="DeiT does not use inputs_embeds" ) def A__ ( self : List[str] ): pass def A__ ( self : int ): lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ = model_class(__lowercase ) self.assertIsInstance(model.get_input_embeddings(), (tf.keras.layers.Layer) ) lowercase__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__lowercase, tf.keras.layers.Dense ) ) def A__ ( self : List[str] ): lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ = model_class(__lowercase ) lowercase__ = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ = [*signature.parameters.keys()] lowercase__ = ["pixel_values"] self.assertListEqual(arg_names[:1], __lowercase ) def A__ ( self : Union[str, Any] ): lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowercase ) def A__ ( self : List[str] ): lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__lowercase ) def A__ ( self : Union[str, Any] ): lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowercase ) def A__ ( self : Optional[Any], __lowercase : List[str], __lowercase : str, __lowercase : Optional[int]=False ): lowercase__ = super()._prepare_for_class(__lowercase, __lowercase, return_labels=__lowercase ) if return_labels: if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters: del inputs_dict["labels"] return inputs_dict @slow def A__ ( self : Optional[int] ): for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ = TFDeiTModel.from_pretrained(__lowercase ) self.assertIsNotNone(__lowercase ) def __lowerCAmelCase ( ): lowercase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class _snake_case ( unittest.TestCase): @cached_property def A__ ( self : List[str] ): return ( DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" ) if is_vision_available() else None ) @slow def A__ ( self : Dict ): lowercase__ = TFDeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ) lowercase__ = self.default_image_processor lowercase__ = prepare_img() lowercase__ = image_processor(images=__lowercase, return_tensors="tf" ) # forward pass lowercase__ = model(**__lowercase ) # verify the logits lowercase__ = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape, __lowercase ) lowercase__ = tf.constant([-1.0266, 0.1912, -1.2861] ) self.assertTrue(np.allclose(outputs.logits[0, :3], __lowercase, atol=1e-4 ) )
37
import os import unittest from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class _snake_case ( lowercase__ , unittest.TestCase): UpperCamelCase__ : Dict =TransfoXLTokenizer UpperCamelCase__ : List[Any] =False UpperCamelCase__ : List[Any] =False def A__ ( self : Union[str, Any] ): super().setUp() lowercase__ = [ "<unk>", "[CLS]", "[SEP]", "want", "unwanted", "wa", "un", "running", ",", "low", "l", ] lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file, "w", encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) def A__ ( self : Union[str, Any], **__lowercase : Any ): lowercase__ = True return TransfoXLTokenizer.from_pretrained(self.tmpdirname, **__lowercase ) def A__ ( self : Tuple, __lowercase : Optional[int] ): lowercase__ = "<unk> UNwanted , running" lowercase__ = "<unk> unwanted, running" return input_text, output_text def A__ ( self : str ): lowercase__ = TransfoXLTokenizer(vocab_file=self.vocab_file, lower_case=__lowercase ) lowercase__ = tokenizer.tokenize("<unk> UNwanted , running" ) self.assertListEqual(__lowercase, ["<unk>", "unwanted", ",", "running"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ), [0, 4, 8, 7] ) def A__ ( self : Tuple ): lowercase__ = TransfoXLTokenizer(lower_case=__lowercase ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? " ), ["hello", "!", "how", "are", "you", "?"] ) def A__ ( self : Tuple ): lowercase__ = TransfoXLTokenizer(lower_case=__lowercase ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? " ), ["HeLLo", "!", "how", "Are", "yoU", "?"] ) def A__ ( self : str ): lowercase__ = TransfoXLTokenizer(lower_case=__lowercase ) lowercase__ = "Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?" lowercase__ = [ "Hello", "(", "bracket", ")", "and", "side", "@-@", "scrolled", "[", "and", "]", "Henry", "'s", "$", "5", "@,@", "000", "with", "3", "@.@", "34", "m", ".", "What", "'s", "up", "!", "?", ] self.assertListEqual(tokenizer.tokenize(__lowercase ), __lowercase ) self.assertEqual(tokenizer.convert_tokens_to_string(__lowercase ), __lowercase ) def A__ ( self : List[str] ): lowercase__ = self.get_tokenizer() lowercase__ = len(__lowercase ) tokenizer.add_tokens(["new1", "new2"] ) tokenizer.move_added_token("new1", 1 ) # Check that moved token is not copied (duplicate) self.assertEqual(len(__lowercase ), original_len + 2 ) # Check that token is moved to specified id self.assertEqual(tokenizer.encode("new1" ), [1] ) self.assertEqual(tokenizer.decode([1] ), "new1" )
37
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowercase_ = { """configuration_roberta_prelayernorm""": [ """ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaPreLayerNormConfig""", """RobertaPreLayerNormOnnxConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""", """RobertaPreLayerNormForCausalLM""", """RobertaPreLayerNormForMaskedLM""", """RobertaPreLayerNormForMultipleChoice""", """RobertaPreLayerNormForQuestionAnswering""", """RobertaPreLayerNormForSequenceClassification""", """RobertaPreLayerNormForTokenClassification""", """RobertaPreLayerNormModel""", """RobertaPreLayerNormPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFRobertaPreLayerNormForCausalLM""", """TFRobertaPreLayerNormForMaskedLM""", """TFRobertaPreLayerNormForMultipleChoice""", """TFRobertaPreLayerNormForQuestionAnswering""", """TFRobertaPreLayerNormForSequenceClassification""", """TFRobertaPreLayerNormForTokenClassification""", """TFRobertaPreLayerNormMainLayer""", """TFRobertaPreLayerNormModel""", """TFRobertaPreLayerNormPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """FlaxRobertaPreLayerNormForCausalLM""", """FlaxRobertaPreLayerNormForMaskedLM""", """FlaxRobertaPreLayerNormForMultipleChoice""", """FlaxRobertaPreLayerNormForQuestionAnswering""", """FlaxRobertaPreLayerNormForSequenceClassification""", """FlaxRobertaPreLayerNormForTokenClassification""", """FlaxRobertaPreLayerNormModel""", """FlaxRobertaPreLayerNormPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_roberta_prelayernorm import ( ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaPreLayerNormConfig, RobertaPreLayerNormOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta_prelayernorm import ( ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaPreLayerNormForCausalLM, RobertaPreLayerNormForMaskedLM, RobertaPreLayerNormForMultipleChoice, RobertaPreLayerNormForQuestionAnswering, RobertaPreLayerNormForSequenceClassification, RobertaPreLayerNormForTokenClassification, RobertaPreLayerNormModel, RobertaPreLayerNormPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta_prelayernorm import ( TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaPreLayerNormForCausalLM, TFRobertaPreLayerNormForMaskedLM, TFRobertaPreLayerNormForMultipleChoice, TFRobertaPreLayerNormForQuestionAnswering, TFRobertaPreLayerNormForSequenceClassification, TFRobertaPreLayerNormForTokenClassification, TFRobertaPreLayerNormMainLayer, TFRobertaPreLayerNormModel, TFRobertaPreLayerNormPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
37
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def __lowerCAmelCase ( ): lowercase__ = HfArgumentParser(SCREAMING_SNAKE_CASE_ ) lowercase__ = parser.parse_args_into_dataclasses()[0] lowercase__ = TensorFlowBenchmark(args=SCREAMING_SNAKE_CASE_ ) try: lowercase__ = parser.parse_args_into_dataclasses()[0] except ValueError as e: lowercase__ = "Arg --no_{0} is no longer used, please use --no-{0} instead." lowercase__ = " ".join(str(SCREAMING_SNAKE_CASE_ ).split(" " )[:-1] ) lowercase__ = "" lowercase__ = eval(str(SCREAMING_SNAKE_CASE_ ).split(" " )[-1] ) lowercase__ = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(SCREAMING_SNAKE_CASE_ ) if len(SCREAMING_SNAKE_CASE_ ) > 0: lowercase__ = full_error_msg + begin_error_msg + str(SCREAMING_SNAKE_CASE_ ) raise ValueError(SCREAMING_SNAKE_CASE_ ) benchmark.run() if __name__ == "__main__": main()
37
1
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): lowercase__ = [0] * len(SCREAMING_SNAKE_CASE_ ) for i in range(1 , len(SCREAMING_SNAKE_CASE_ ) ): # use last results for better performance - dynamic programming lowercase__ = prefix_result[i - 1] while j > 0 and input_string[i] != input_string[j]: lowercase__ = prefix_result[j - 1] if input_string[i] == input_string[j]: j += 1 lowercase__ = j return prefix_result def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): return max(prefix_function(SCREAMING_SNAKE_CASE_ ) ) if __name__ == "__main__": import doctest doctest.testmod()
37
import os import re import shutil from argparse import ArgumentParser, Namespace from datasets.commands import BaseDatasetsCLICommand from datasets.utils.logging import get_logger lowercase_ = """<<<<<<< This should probably be modified because it mentions: """ lowercase_ = """======= >>>>>>> """ lowercase_ = [ """TextEncoderConfig""", """ByteTextEncoder""", """SubwordTextEncoder""", """encoder_config""", """maybe_build_from_corpus""", """manual_dir""", ] lowercase_ = [ # (pattern, replacement) # Order is important here for some replacements (r"""tfds\.core""", r"""datasets"""), (r"""tf\.io\.gfile\.GFile""", r"""open"""), (r"""tf\.([\w\d]+)""", r"""datasets.Value('\1')"""), (r"""tfds\.features\.Text\(\)""", r"""datasets.Value('string')"""), (r"""tfds\.features\.Text\(""", r"""datasets.Value('string'),"""), (r"""features\s*=\s*tfds.features.FeaturesDict\(""", r"""features=datasets.Features("""), (r"""tfds\.features\.FeaturesDict\(""", r"""dict("""), (r"""The TensorFlow Datasets Authors""", r"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""), (r"""tfds\.""", r"""datasets."""), (r"""dl_manager\.manual_dir""", r"""self.config.data_dir"""), (r"""self\.builder_config""", r"""self.config"""), ] def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): return ConvertCommand(args.tfds_path , args.datasets_directory ) class _snake_case ( lowercase__): @staticmethod def A__ ( __lowercase : ArgumentParser ): lowercase__ = parser.add_parser( "convert", help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.", ) train_parser.add_argument( "--tfds_path", type=__lowercase, required=__lowercase, help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.", ) train_parser.add_argument( "--datasets_directory", type=__lowercase, required=__lowercase, help="Path to the HuggingFace Datasets folder." ) train_parser.set_defaults(func=__lowercase ) def __init__( self : Tuple, __lowercase : str, __lowercase : str, *__lowercase : Tuple ): lowercase__ = get_logger("datasets-cli/converting" ) lowercase__ = tfds_path lowercase__ = datasets_directory def A__ ( self : Any ): if os.path.isdir(self._tfds_path ): lowercase__ = os.path.abspath(self._tfds_path ) elif os.path.isfile(self._tfds_path ): lowercase__ = os.path.dirname(self._tfds_path ) else: raise ValueError("--tfds_path is neither a directory nor a file. Please check path." ) lowercase__ = os.path.abspath(self._datasets_directory ) self._logger.info(F'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' ) lowercase__ = [] lowercase__ = [] lowercase__ = {} if os.path.isdir(self._tfds_path ): lowercase__ = os.listdir(__lowercase ) else: lowercase__ = [os.path.basename(self._tfds_path )] for f_name in file_names: self._logger.info(F'''Looking at file {f_name}''' ) lowercase__ = os.path.join(__lowercase, __lowercase ) lowercase__ = os.path.join(__lowercase, __lowercase ) if not os.path.isfile(__lowercase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name: self._logger.info("Skipping file" ) continue with open(__lowercase, encoding="utf-8" ) as f: lowercase__ = f.readlines() lowercase__ = [] lowercase__ = False lowercase__ = False lowercase__ = [] for line in lines: lowercase__ = line # Convert imports if "import tensorflow.compat.v2 as tf" in out_line: continue elif "@tfds.core" in out_line: continue elif "builder=self" in out_line: continue elif "import tensorflow_datasets.public_api as tfds" in out_line: lowercase__ = "import datasets\n" elif "import tensorflow" in out_line: # order is important here lowercase__ = "" continue elif "from absl import logging" in out_line: lowercase__ = "from datasets import logging\n" elif "getLogger" in out_line: lowercase__ = out_line.replace("getLogger", "get_logger" ) elif any(expression in out_line for expression in TO_HIGHLIGHT ): lowercase__ = True lowercase__ = list(filter(lambda __lowercase : e in out_line, __lowercase ) ) out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(__lowercase ) + "\n" ) out_lines.append(__lowercase ) out_lines.append(__lowercase ) continue else: for pattern, replacement in TO_CONVERT: lowercase__ = re.sub(__lowercase, __lowercase, __lowercase ) # Take care of saving utilities (to later move them together with main script) if "tensorflow_datasets" in out_line: lowercase__ = re.match(R"from\stensorflow_datasets.*import\s([^\.\r\n]+)", __lowercase ) tfds_imports.extend(imp.strip() for imp in match.group(1 ).split("," ) ) lowercase__ = "from . import " + match.group(1 ) # Check we have not forget anything if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line: raise ValueError(F'''Error converting {out_line.strip()}''' ) if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line: lowercase__ = True out_lines.append(__lowercase ) if is_builder or "wmt" in f_name: # We create a new directory for each dataset lowercase__ = f_name.replace(".py", "" ) lowercase__ = os.path.join(__lowercase, __lowercase ) lowercase__ = os.path.join(__lowercase, __lowercase ) os.makedirs(__lowercase, exist_ok=__lowercase ) self._logger.info(F'''Adding directory {output_dir}''' ) imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} ) else: # Utilities will be moved at the end utils_files.append(__lowercase ) if needs_manual_update: with_manual_update.append(__lowercase ) with open(__lowercase, "w", encoding="utf-8" ) as f: f.writelines(__lowercase ) self._logger.info(F'''Converted in {output_file}''' ) for utils_file in utils_files: try: lowercase__ = os.path.basename(__lowercase ) lowercase__ = imports_to_builder_map[f_name.replace(".py", "" )] self._logger.info(F'''Moving {dest_folder} to {utils_file}''' ) shutil.copy(__lowercase, __lowercase ) except KeyError: self._logger.error(F'''Cannot find destination folder for {utils_file}. Please copy manually.''' ) if with_manual_update: for file_path in with_manual_update: self._logger.warning( F'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
37
1
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _snake_case ( lowercase__): UpperCamelCase__ : str =["""image_processor""", """tokenizer"""] UpperCamelCase__ : Dict ="""CLIPImageProcessor""" UpperCamelCase__ : int =("""CLIPTokenizer""", """CLIPTokenizerFast""") def __init__( self : str, __lowercase : str=None, __lowercase : Union[str, Any]=None, **__lowercase : Tuple ): lowercase__ = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead.", __lowercase, ) lowercase__ = kwargs.pop("feature_extractor" ) lowercase__ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(__lowercase, __lowercase ) def __call__( self : str, __lowercase : int=None, __lowercase : Any=None, __lowercase : Optional[int]=None, **__lowercase : str ): if text is None and images is None: raise ValueError("You have to specify either text or images. Both cannot be none." ) if text is not None: lowercase__ = self.tokenizer(__lowercase, return_tensors=__lowercase, **__lowercase ) if images is not None: lowercase__ = self.image_processor(__lowercase, return_tensors=__lowercase, **__lowercase ) if text is not None and images is not None: lowercase__ = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**__lowercase ), tensor_type=__lowercase ) def A__ ( self : Union[str, Any], *__lowercase : int, **__lowercase : Tuple ): return self.tokenizer.batch_decode(*__lowercase, **__lowercase ) def A__ ( self : int, *__lowercase : Optional[Any], **__lowercase : List[Any] ): return self.tokenizer.decode(*__lowercase, **__lowercase ) @property def A__ ( self : Optional[Any] ): lowercase__ = self.tokenizer.model_input_names lowercase__ = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def A__ ( self : int ): warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.", __lowercase, ) return self.image_processor_class @property def A__ ( self : Optional[Any] ): warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.", __lowercase, ) return self.image_processor
37
import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer lowercase_ = logging.get_logger(__name__) lowercase_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} lowercase_ = { """vocab_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""", }, """merges_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""", }, """tokenizer_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""", }, } lowercase_ = { """allenai/led-base-16384""": 1_6384, } class _snake_case ( lowercase__): UpperCamelCase__ : int =VOCAB_FILES_NAMES UpperCamelCase__ : Any =PRETRAINED_VOCAB_FILES_MAP UpperCamelCase__ : Dict =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase__ : List[Any] =LEDTokenizer UpperCamelCase__ : Tuple =["""input_ids""", """attention_mask"""] def __init__( self : Optional[Any], __lowercase : Optional[Any]=None, __lowercase : Dict=None, __lowercase : Tuple=None, __lowercase : Union[str, Any]="replace", __lowercase : Tuple="<s>", __lowercase : Optional[Any]="</s>", __lowercase : Tuple="</s>", __lowercase : List[str]="<s>", __lowercase : Tuple="<unk>", __lowercase : Dict="<pad>", __lowercase : Dict="<mask>", __lowercase : Any=False, __lowercase : Any=True, **__lowercase : List[Any], ): super().__init__( __lowercase, __lowercase, tokenizer_file=__lowercase, errors=__lowercase, bos_token=__lowercase, eos_token=__lowercase, sep_token=__lowercase, cls_token=__lowercase, unk_token=__lowercase, pad_token=__lowercase, mask_token=__lowercase, add_prefix_space=__lowercase, trim_offsets=__lowercase, **__lowercase, ) lowercase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space", __lowercase ) != add_prefix_space: lowercase__ = getattr(__lowercase, pre_tok_state.pop("type" ) ) lowercase__ = add_prefix_space lowercase__ = pre_tok_class(**__lowercase ) lowercase__ = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` lowercase__ = "post_processor" lowercase__ = getattr(self.backend_tokenizer, __lowercase, __lowercase ) if tokenizer_component_instance: lowercase__ = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: lowercase__ = tuple(state["sep"] ) if "cls" in state: lowercase__ = tuple(state["cls"] ) lowercase__ = False if state.get("add_prefix_space", __lowercase ) != add_prefix_space: lowercase__ = add_prefix_space lowercase__ = True if state.get("trim_offsets", __lowercase ) != trim_offsets: lowercase__ = trim_offsets lowercase__ = True if changes_to_apply: lowercase__ = getattr(__lowercase, state.pop("type" ) ) lowercase__ = component_class(**__lowercase ) setattr(self.backend_tokenizer, __lowercase, __lowercase ) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def A__ ( self : str ): if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def A__ ( self : Optional[int], __lowercase : Dict ): lowercase__ = AddedToken(__lowercase, lstrip=__lowercase, rstrip=__lowercase ) if isinstance(__lowercase, __lowercase ) else value lowercase__ = value def A__ ( self : Any, *__lowercase : List[Any], **__lowercase : Optional[Any] ): lowercase__ = kwargs.get("is_split_into_words", __lowercase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*__lowercase, **__lowercase ) def A__ ( self : int, *__lowercase : Union[str, Any], **__lowercase : List[str] ): lowercase__ = kwargs.get("is_split_into_words", __lowercase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*__lowercase, **__lowercase ) def A__ ( self : Optional[Any], __lowercase : str, __lowercase : Optional[str] = None ): lowercase__ = self._tokenizer.model.save(__lowercase, name=__lowercase ) return tuple(__lowercase ) def A__ ( self : List[str], __lowercase : int, __lowercase : Optional[int]=None ): lowercase__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def A__ ( self : int, __lowercase : List[int], __lowercase : Optional[List[int]] = None ): lowercase__ = [self.sep_token_id] lowercase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def A__ ( self : Union[str, Any], __lowercase : Union[Dict[str, EncodedInput], BatchEncoding], __lowercase : Optional[int] = None, __lowercase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD, __lowercase : Optional[int] = None, __lowercase : Optional[bool] = None, ): lowercase__ = super()._pad( encoded_inputs=__lowercase, max_length=__lowercase, padding_strategy=__lowercase, pad_to_multiple_of=__lowercase, return_attention_mask=__lowercase, ) # Load from model defaults if return_attention_mask is None: lowercase__ = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: lowercase__ = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. lowercase__ = len(encoded_inputs["global_attention_mask"] ) != len(__lowercase ) if needs_to_be_padded: lowercase__ = len(__lowercase ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` lowercase__ = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": lowercase__ = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
37
1
from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging lowercase_ = logging.get_logger(__name__) class _snake_case ( lowercase__): UpperCamelCase__ : Tuple =["""input_features""", """attention_mask"""] def __init__( self : str, __lowercase : Optional[int]=80, __lowercase : Optional[int]=1_6000, __lowercase : Any=80, __lowercase : Optional[Any]=0.0, __lowercase : Optional[Any]=True, __lowercase : Optional[Any]=True, __lowercase : Any=True, **__lowercase : str, ): super().__init__(feature_size=__lowercase, sampling_rate=__lowercase, padding_value=__lowercase, **__lowercase ) lowercase__ = num_mel_bins lowercase__ = do_ceptral_normalize lowercase__ = normalize_means lowercase__ = normalize_vars lowercase__ = True def A__ ( self : Dict, __lowercase : np.ndarray, ): lowercase__ = waveform * (2**15) # Kaldi compliance: 16-bit signed integers lowercase__ = torch.from_numpy(__lowercase ).unsqueeze(0 ) lowercase__ = ta_kaldi.fbank(__lowercase, num_mel_bins=self.num_mel_bins, sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def A__ ( __lowercase : np.ndarray, __lowercase : int, __lowercase : Optional[bool] = True, __lowercase : Optional[bool] = True, __lowercase : float = 0.0, ): # make sure we normalize float32 arrays if normalize_means: lowercase__ = x[:input_length].mean(axis=0 ) lowercase__ = np.subtract(__lowercase, __lowercase ) if normalize_vars: lowercase__ = x[:input_length].std(axis=0 ) lowercase__ = np.divide(__lowercase, __lowercase ) if input_length < x.shape[0]: lowercase__ = padding_value # make sure array is in float32 lowercase__ = x.astype(np.floataa ) return x def A__ ( self : Union[str, Any], __lowercase : List[np.ndarray], __lowercase : Optional[np.ndarray] = None ): lowercase__ = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(__lowercase, __lowercase, self.normalize_means, self.normalize_vars, self.padding_value ) for x, n in zip(__lowercase, __lowercase ) ] def __call__( self : Union[str, Any], __lowercase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], __lowercase : Union[bool, str, PaddingStrategy] = False, __lowercase : Optional[int] = None, __lowercase : bool = False, __lowercase : Optional[int] = None, __lowercase : Optional[Union[str, TensorType]] = None, __lowercase : Optional[int] = None, __lowercase : Optional[bool] = None, **__lowercase : Optional[Any], ): if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' F''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with''' F''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) lowercase__ = isinstance(__lowercase, np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) lowercase__ = is_batched_numpy or ( isinstance(__lowercase, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) )) ) if is_batched: lowercase__ = [np.asarray(__lowercase, dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(__lowercase, np.ndarray ): lowercase__ = np.asarray(__lowercase, dtype=np.floataa ) elif isinstance(__lowercase, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): lowercase__ = raw_speech.astype(np.floataa ) # always return batch if not is_batched: lowercase__ = [raw_speech] # extract fbank features lowercase__ = [self._extract_fbank_features(__lowercase ) for waveform in raw_speech] # convert into correct format for padding lowercase__ = BatchFeature({"input_features": features} ) lowercase__ = self.pad( __lowercase, padding=__lowercase, max_length=__lowercase, truncation=__lowercase, pad_to_multiple_of=__lowercase, return_attention_mask=__lowercase, **__lowercase, ) # make sure list is in array format lowercase__ = padded_inputs.get("input_features" ) if isinstance(input_features[0], __lowercase ): lowercase__ = [np.asarray(__lowercase, dtype=np.floataa ) for feature in input_features] lowercase__ = padded_inputs.get("attention_mask" ) if attention_mask is not None: lowercase__ = [np.asarray(__lowercase, dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: lowercase__ = ( np.array(__lowercase, dtype=np.intaa ) if self._get_padding_strategies(__lowercase, max_length=__lowercase ) is not PaddingStrategy.DO_NOT_PAD else None ) lowercase__ = self.normalize( padded_inputs["input_features"], attention_mask=__lowercase ) if return_tensors is not None: lowercase__ = padded_inputs.convert_to_tensors(__lowercase ) return padded_inputs
37
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def __lowerCAmelCase ( ): lowercase__ = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=SCREAMING_SNAKE_CASE_ ) lowercase__ = parser.add_subparsers(help="accelerate command helpers" ) # Register commands get_config_parser(subparsers=SCREAMING_SNAKE_CASE_ ) env_command_parser(subparsers=SCREAMING_SNAKE_CASE_ ) launch_command_parser(subparsers=SCREAMING_SNAKE_CASE_ ) tpu_command_parser(subparsers=SCREAMING_SNAKE_CASE_ ) test_command_parser(subparsers=SCREAMING_SNAKE_CASE_ ) # Let's go lowercase__ = parser.parse_args() if not hasattr(SCREAMING_SNAKE_CASE_ , "func" ): parser.print_help() exit(1 ) # Run args.func(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": main()
37
1
from math import sqrt def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ = 100_0000 ): lowercase__ = 0 lowercase__ = 0 lowercase__ = 42 while num_cuboids <= limit: max_cuboid_size += 1 for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ): if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer(): num_cuboids += ( min(SCREAMING_SNAKE_CASE_ , sum_shortest_sides // 2 ) - max(1 , sum_shortest_sides - max_cuboid_size ) + 1 ) return max_cuboid_size if __name__ == "__main__": print(F'{solution() = }')
37
import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class _snake_case ( unittest.TestCase): def __init__( self : Dict, __lowercase : int, __lowercase : Union[str, Any]=7, __lowercase : Union[str, Any]=3, __lowercase : Any=18, __lowercase : Union[str, Any]=30, __lowercase : Any=400, __lowercase : List[str]=True, __lowercase : Dict=None, __lowercase : List[str]=True, __lowercase : int=False, __lowercase : Union[str, Any]=True, __lowercase : str=True, __lowercase : Optional[int]=[0.5, 0.5, 0.5], __lowercase : List[Any]=[0.5, 0.5, 0.5], ): lowercase__ = parent lowercase__ = batch_size lowercase__ = num_channels lowercase__ = image_size lowercase__ = min_resolution lowercase__ = max_resolution lowercase__ = do_resize lowercase__ = size if size is not None else {"height": 18, "width": 20} lowercase__ = do_thumbnail lowercase__ = do_align_axis lowercase__ = do_pad lowercase__ = do_normalize lowercase__ = image_mean lowercase__ = image_std def A__ ( self : Optional[Any] ): return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class _snake_case ( lowercase__ , unittest.TestCase): UpperCamelCase__ : Optional[int] =DonutImageProcessor if is_vision_available() else None def A__ ( self : str ): lowercase__ = DonutImageProcessingTester(self ) @property def A__ ( self : List[str] ): return self.image_processor_tester.prepare_image_processor_dict() def A__ ( self : Optional[Any] ): lowercase__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowercase, "do_resize" ) ) self.assertTrue(hasattr(__lowercase, "size" ) ) self.assertTrue(hasattr(__lowercase, "do_thumbnail" ) ) self.assertTrue(hasattr(__lowercase, "do_align_long_axis" ) ) self.assertTrue(hasattr(__lowercase, "do_pad" ) ) self.assertTrue(hasattr(__lowercase, "do_normalize" ) ) self.assertTrue(hasattr(__lowercase, "image_mean" ) ) self.assertTrue(hasattr(__lowercase, "image_std" ) ) def A__ ( self : str ): lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size, {"height": 18, "width": 20} ) lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict, size=42 ) self.assertEqual(image_processor.size, {"height": 42, "width": 42} ) # Previous config had dimensions in (width, height) order lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict, size=(42, 84) ) self.assertEqual(image_processor.size, {"height": 84, "width": 42} ) def A__ ( self : List[str] ): pass @is_flaky() def A__ ( self : Dict ): # Initialize image_processing lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase, Image.Image ) # Test not batched input lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) # Test batched lowercase__ = image_processing(__lowercase, return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) @is_flaky() def A__ ( self : Optional[Any] ): # Initialize image_processing lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, numpify=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase, np.ndarray ) # Test not batched input lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) # Test batched lowercase__ = image_processing(__lowercase, return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) @is_flaky() def A__ ( self : Tuple ): # Initialize image_processing lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, torchify=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase, torch.Tensor ) # Test not batched input lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) # Test batched lowercase__ = image_processing(__lowercase, return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), )
37
1
import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roberta import RobertaTokenizer lowercase_ = logging.get_logger(__name__) lowercase_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} lowercase_ = { """vocab_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""", """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json""" ), }, """merges_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""", """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt""" ), }, """tokenizer_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""", """roberta-base-openai-detector""": ( """https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json""" ), """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json""" ), }, } lowercase_ = { """roberta-base""": 512, """roberta-large""": 512, """roberta-large-mnli""": 512, """distilroberta-base""": 512, """roberta-base-openai-detector""": 512, """roberta-large-openai-detector""": 512, } class _snake_case ( lowercase__): UpperCamelCase__ : int =VOCAB_FILES_NAMES UpperCamelCase__ : Union[str, Any] =PRETRAINED_VOCAB_FILES_MAP UpperCamelCase__ : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase__ : Any =["""input_ids""", """attention_mask"""] UpperCamelCase__ : Optional[int] =RobertaTokenizer def __init__( self : List[str], __lowercase : Optional[Any]=None, __lowercase : List[str]=None, __lowercase : Any=None, __lowercase : Optional[int]="replace", __lowercase : List[Any]="<s>", __lowercase : str="</s>", __lowercase : List[Any]="</s>", __lowercase : int="<s>", __lowercase : Dict="<unk>", __lowercase : List[Any]="<pad>", __lowercase : List[str]="<mask>", __lowercase : Tuple=False, __lowercase : str=True, **__lowercase : Tuple, ): super().__init__( __lowercase, __lowercase, tokenizer_file=__lowercase, errors=__lowercase, bos_token=__lowercase, eos_token=__lowercase, sep_token=__lowercase, cls_token=__lowercase, unk_token=__lowercase, pad_token=__lowercase, mask_token=__lowercase, add_prefix_space=__lowercase, trim_offsets=__lowercase, **__lowercase, ) lowercase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space", __lowercase ) != add_prefix_space: lowercase__ = getattr(__lowercase, pre_tok_state.pop("type" ) ) lowercase__ = add_prefix_space lowercase__ = pre_tok_class(**__lowercase ) lowercase__ = add_prefix_space lowercase__ = "post_processor" lowercase__ = getattr(self.backend_tokenizer, __lowercase, __lowercase ) if tokenizer_component_instance: lowercase__ = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: lowercase__ = tuple(state["sep"] ) if "cls" in state: lowercase__ = tuple(state["cls"] ) lowercase__ = False if state.get("add_prefix_space", __lowercase ) != add_prefix_space: lowercase__ = add_prefix_space lowercase__ = True if state.get("trim_offsets", __lowercase ) != trim_offsets: lowercase__ = trim_offsets lowercase__ = True if changes_to_apply: lowercase__ = getattr(__lowercase, state.pop("type" ) ) lowercase__ = component_class(**__lowercase ) setattr(self.backend_tokenizer, __lowercase, __lowercase ) @property def A__ ( self : Optional[int] ): if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def A__ ( self : List[str], __lowercase : Any ): lowercase__ = AddedToken(__lowercase, lstrip=__lowercase, rstrip=__lowercase ) if isinstance(__lowercase, __lowercase ) else value lowercase__ = value def A__ ( self : List[Any], *__lowercase : Tuple, **__lowercase : Any ): lowercase__ = kwargs.get("is_split_into_words", __lowercase ) assert self.add_prefix_space or not is_split_into_words, ( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*__lowercase, **__lowercase ) def A__ ( self : List[Any], *__lowercase : Tuple, **__lowercase : int ): lowercase__ = kwargs.get("is_split_into_words", __lowercase ) assert self.add_prefix_space or not is_split_into_words, ( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*__lowercase, **__lowercase ) def A__ ( self : List[Any], __lowercase : str, __lowercase : Optional[str] = None ): lowercase__ = self._tokenizer.model.save(__lowercase, name=__lowercase ) return tuple(__lowercase ) def A__ ( self : Any, __lowercase : Tuple, __lowercase : Union[str, Any]=None ): lowercase__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def A__ ( self : Dict, __lowercase : List[int], __lowercase : Optional[List[int]] = None ): lowercase__ = [self.sep_token_id] lowercase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
37
import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class _snake_case ( lowercase__): def A__ ( self : Optional[Any], __lowercase : str ): with open(__lowercase, encoding="utf-8" ) as input_file: lowercase__ = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" ) lowercase__ = input_file.read() lowercase__ = regexp.search(__lowercase ) return match def A__ ( self : str, __lowercase : str ): with open(__lowercase, encoding="utf-8" ) as input_file: lowercase__ = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()", re.DOTALL ) lowercase__ = input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` lowercase__ = regexp.finditer(__lowercase ) lowercase__ = [match for match in matches if match is not None and match.group(1 ) is not None] return matches[0] if matches else None def A__ ( self : Union[str, Any] ): lowercase__ = Path("./datasets" ) lowercase__ = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_encoding_on_file_open(str(__lowercase ) ): raise AssertionError(F'''open(...) must use utf-8 encoding in {dataset}''' ) def A__ ( self : Union[str, Any] ): lowercase__ = Path("./datasets" ) lowercase__ = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_print_statements(str(__lowercase ) ): raise AssertionError(F'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
37
1
import argparse import collections import json import os import re import string import sys import numpy as np lowercase_ = re.compile(r"""\b(a|an|the)\b""", re.UNICODE) lowercase_ = None def __lowerCAmelCase ( ): lowercase__ = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." ) parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." ) parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." ) parser.add_argument( "--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." ) parser.add_argument( "--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." ) parser.add_argument( "--na-prob-thresh" , "-t" , type=SCREAMING_SNAKE_CASE_ , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , ) parser.add_argument( "--out-image-dir" , "-p" , metavar="out_images" , default=SCREAMING_SNAKE_CASE_ , help="Save precision-recall curves to directory." ) parser.add_argument("--verbose" , "-v" , action="store_true" ) if len(sys.argv ) == 1: parser.print_help() sys.exit(1 ) return parser.parse_args() def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): lowercase__ = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: lowercase__ = bool(qa["answers"]["text"] ) return qid_to_has_ans def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): def remove_articles(SCREAMING_SNAKE_CASE_ ): return ARTICLES_REGEX.sub(" " , SCREAMING_SNAKE_CASE_ ) def white_space_fix(SCREAMING_SNAKE_CASE_ ): return " ".join(text.split() ) def remove_punc(SCREAMING_SNAKE_CASE_ ): lowercase__ = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(SCREAMING_SNAKE_CASE_ ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(SCREAMING_SNAKE_CASE_ ) ) ) ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): if not s: return [] return normalize_answer(SCREAMING_SNAKE_CASE_ ).split() def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): return int(normalize_answer(SCREAMING_SNAKE_CASE_ ) == normalize_answer(SCREAMING_SNAKE_CASE_ ) ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ = get_tokens(SCREAMING_SNAKE_CASE_ ) lowercase__ = get_tokens(SCREAMING_SNAKE_CASE_ ) lowercase__ = collections.Counter(SCREAMING_SNAKE_CASE_ ) & collections.Counter(SCREAMING_SNAKE_CASE_ ) lowercase__ = sum(common.values() ) if len(SCREAMING_SNAKE_CASE_ ) == 0 or len(SCREAMING_SNAKE_CASE_ ) == 0: # If either is no-answer, then F1 is 1 if they agree, 0 otherwise return int(gold_toks == pred_toks ) if num_same == 0: return 0 lowercase__ = 1.0 * num_same / len(SCREAMING_SNAKE_CASE_ ) lowercase__ = 1.0 * num_same / len(SCREAMING_SNAKE_CASE_ ) lowercase__ = (2 * precision * recall) / (precision + recall) return fa def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ = {} lowercase__ = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: lowercase__ = qa["id"] lowercase__ = [t for t in qa["answers"]["text"] if normalize_answer(SCREAMING_SNAKE_CASE_ )] if not gold_answers: # For unanswerable questions, only correct answer is empty string lowercase__ = [""] if qid not in preds: print(f'''Missing prediction for {qid}''' ) continue lowercase__ = preds[qid] # Take max over all gold answers lowercase__ = max(compute_exact(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for a in gold_answers ) lowercase__ = max(compute_fa(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for a in gold_answers ) return exact_scores, fa_scores def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ = {} for qid, s in scores.items(): lowercase__ = na_probs[qid] > na_prob_thresh if pred_na: lowercase__ = float(not qid_to_has_ans[qid] ) else: lowercase__ = s return new_scores def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ): if not qid_list: lowercase__ = len(SCREAMING_SNAKE_CASE_ ) return collections.OrderedDict( [ ("exact", 100.0 * sum(exact_scores.values() ) / total), ("f1", 100.0 * sum(fa_scores.values() ) / total), ("total", total), ] ) else: lowercase__ = len(SCREAMING_SNAKE_CASE_ ) return collections.OrderedDict( [ ("exact", 100.0 * sum(exact_scores[k] for k in qid_list ) / total), ("f1", 100.0 * sum(fa_scores[k] for k in qid_list ) / total), ("total", total), ] ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): for k in new_eval: lowercase__ = new_eval[k] def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): plt.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , color="b" , alpha=0.2 , where="post" ) plt.fill_between(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , step="post" , alpha=0.2 , color="b" ) plt.xlabel("Recall" ) plt.ylabel("Precision" ) plt.xlim([0.0, 1.05] ) plt.ylim([0.0, 1.05] ) plt.title(SCREAMING_SNAKE_CASE_ ) plt.savefig(SCREAMING_SNAKE_CASE_ ) plt.clf() def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None ): lowercase__ = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : na_probs[k] ) lowercase__ = 0.0 lowercase__ = 1.0 lowercase__ = 0.0 lowercase__ = [1.0] lowercase__ = [0.0] lowercase__ = 0.0 for i, qid in enumerate(SCREAMING_SNAKE_CASE_ ): if qid_to_has_ans[qid]: true_pos += scores[qid] lowercase__ = true_pos / float(i + 1 ) lowercase__ = true_pos / float(SCREAMING_SNAKE_CASE_ ) if i == len(SCREAMING_SNAKE_CASE_ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]: # i.e., if we can put a threshold after this point avg_prec += cur_p * (cur_r - recalls[-1]) precisions.append(SCREAMING_SNAKE_CASE_ ) recalls.append(SCREAMING_SNAKE_CASE_ ) if out_image: plot_pr_curve(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return {"ap": 100.0 * avg_prec} def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): if out_image_dir and not os.path.exists(SCREAMING_SNAKE_CASE_ ): os.makedirs(SCREAMING_SNAKE_CASE_ ) lowercase__ = sum(1 for v in qid_to_has_ans.values() if v ) if num_true_pos == 0: return lowercase__ = make_precision_recall_eval( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , out_image=os.path.join(SCREAMING_SNAKE_CASE_ , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , ) lowercase__ = make_precision_recall_eval( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , out_image=os.path.join(SCREAMING_SNAKE_CASE_ , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , ) lowercase__ = {k: float(SCREAMING_SNAKE_CASE_ ) for k, v in qid_to_has_ans.items()} lowercase__ = make_precision_recall_eval( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , out_image=os.path.join(SCREAMING_SNAKE_CASE_ , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , ) merge_eval(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , "pr_exact" ) merge_eval(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , "pr_f1" ) merge_eval(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , "pr_oracle" ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): if not qid_list: return lowercase__ = [na_probs[k] for k in qid_list] lowercase__ = np.ones_like(SCREAMING_SNAKE_CASE_ ) / float(len(SCREAMING_SNAKE_CASE_ ) ) plt.hist(SCREAMING_SNAKE_CASE_ , weights=SCREAMING_SNAKE_CASE_ , bins=20 , range=(0.0, 1.0) ) plt.xlabel("Model probability of no-answer" ) plt.ylabel("Proportion of dataset" ) plt.title(f'''Histogram of no-answer probability: {name}''' ) plt.savefig(os.path.join(SCREAMING_SNAKE_CASE_ , f'''na_prob_hist_{name}.png''' ) ) plt.clf() def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] ) lowercase__ = num_no_ans lowercase__ = cur_score lowercase__ = 0.0 lowercase__ = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : na_probs[k] ) for i, qid in enumerate(SCREAMING_SNAKE_CASE_ ): if qid not in scores: continue if qid_to_has_ans[qid]: lowercase__ = scores[qid] else: if preds[qid]: lowercase__ = -1 else: lowercase__ = 0 cur_score += diff if cur_score > best_score: lowercase__ = cur_score lowercase__ = na_probs[qid] return 100.0 * best_score / len(SCREAMING_SNAKE_CASE_ ), best_thresh def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ , lowercase__ = find_best_thresh(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowercase__ , lowercase__ = find_best_thresh(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowercase__ = best_exact lowercase__ = exact_thresh lowercase__ = best_fa lowercase__ = fa_thresh def __lowerCAmelCase ( ): with open(OPTS.data_file ) as f: lowercase__ = json.load(SCREAMING_SNAKE_CASE_ ) lowercase__ = dataset_json["data"] with open(OPTS.pred_file ) as f: lowercase__ = json.load(SCREAMING_SNAKE_CASE_ ) if OPTS.na_prob_file: with open(OPTS.na_prob_file ) as f: lowercase__ = json.load(SCREAMING_SNAKE_CASE_ ) else: lowercase__ = {k: 0.0 for k in preds} lowercase__ = make_qid_to_has_ans(SCREAMING_SNAKE_CASE_ ) # maps qid to True/False lowercase__ = [k for k, v in qid_to_has_ans.items() if v] lowercase__ = [k for k, v in qid_to_has_ans.items() if not v] lowercase__ , lowercase__ = get_raw_scores(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowercase__ = apply_no_ans_threshold(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , OPTS.na_prob_thresh ) lowercase__ = apply_no_ans_threshold(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , OPTS.na_prob_thresh ) lowercase__ = make_eval_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if has_ans_qids: lowercase__ = make_eval_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , qid_list=SCREAMING_SNAKE_CASE_ ) merge_eval(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , "HasAns" ) if no_ans_qids: lowercase__ = make_eval_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , qid_list=SCREAMING_SNAKE_CASE_ ) merge_eval(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , "NoAns" ) if OPTS.na_prob_file: find_all_best_thresh(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if OPTS.na_prob_file and OPTS.out_image_dir: run_precision_recall_analysis(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , OPTS.out_image_dir ) histogram_na_prob(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , OPTS.out_image_dir , "hasAns" ) histogram_na_prob(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , OPTS.out_image_dir , "noAns" ) if OPTS.out_file: with open(OPTS.out_file , "w" ) as f: json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else: print(json.dumps(SCREAMING_SNAKE_CASE_ , indent=2 ) ) if __name__ == "__main__": lowercase_ = parse_args() if OPTS.out_image_dir: import matplotlib matplotlib.use("""Agg""") import matplotlib.pyplot as plt main()
37
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase_ = { """configuration_xmod""": [ """XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XmodConfig""", """XmodOnnxConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """XMOD_PRETRAINED_MODEL_ARCHIVE_LIST""", """XmodForCausalLM""", """XmodForMaskedLM""", """XmodForMultipleChoice""", """XmodForQuestionAnswering""", """XmodForSequenceClassification""", """XmodForTokenClassification""", """XmodModel""", """XmodPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
37
1
from . import ( albert, align, altclip, audio_spectrogram_transformer, auto, autoformer, bark, bart, barthez, bartpho, beit, bert, bert_generation, bert_japanese, bertweet, big_bird, bigbird_pegasus, biogpt, bit, blenderbot, blenderbot_small, blip, blip_a, bloom, bridgetower, byta, camembert, canine, chinese_clip, clap, clip, clipseg, codegen, conditional_detr, convbert, convnext, convnextva, cpm, cpmant, ctrl, cvt, dataavec, deberta, deberta_va, decision_transformer, deformable_detr, deit, deprecated, deta, detr, dialogpt, dinat, distilbert, dit, donut, dpr, dpt, efficientformer, efficientnet, electra, encodec, encoder_decoder, ernie, ernie_m, esm, falcon, flaubert, flava, fnet, focalnet, fsmt, funnel, git, glpn, gpta, gpt_bigcode, gpt_neo, gpt_neox, gpt_neox_japanese, gpt_swa, gptj, gptsan_japanese, graphormer, groupvit, herbert, hubert, ibert, imagegpt, informer, instructblip, jukebox, layoutlm, layoutlmva, layoutlmva, layoutxlm, led, levit, lilt, llama, longformer, longta, luke, lxmert, mam_aaa, marian, markuplm, maskaformer, maskformer, mbart, mbartaa, mega, megatron_bert, megatron_gpta, mgp_str, mluke, mobilebert, mobilenet_va, mobilenet_va, mobilevit, mobilevitva, mpnet, mra, mta, musicgen, mvp, nat, nezha, nllb, nllb_moe, nystromformer, oneformer, open_llama, openai, opt, owlvit, pegasus, pegasus_x, perceiver, phobert, pixastruct, plbart, poolformer, prophetnet, qdqbert, rag, realm, reformer, regnet, rembert, resnet, roberta, roberta_prelayernorm, roc_bert, roformer, rwkv, sam, segformer, sew, sew_d, speech_encoder_decoder, speech_to_text, speech_to_text_a, speechta, splinter, squeezebert, swiftformer, swin, swinasr, swinva, switch_transformers, ta, table_transformer, tapas, time_series_transformer, timesformer, timm_backbone, transfo_xl, trocr, tvlt, umta, unispeech, unispeech_sat, upernet, videomae, vilt, vision_encoder_decoder, vision_text_dual_encoder, visual_bert, vit, vit_hybrid, vit_mae, vit_msn, vivit, wavaveca, wavaveca_conformer, wavaveca_phoneme, wavaveca_with_lm, wavlm, whisper, x_clip, xglm, xlm, xlm_prophetnet, xlm_roberta, xlm_roberta_xl, xlnet, xmod, yolos, yoso, )
37
import unittest import numpy as np import requests from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: lowercase_ = False if is_vision_available(): from PIL import Image from transformers import PixaStructImageProcessor class _snake_case ( unittest.TestCase): def __init__( self : List[Any], __lowercase : int, __lowercase : Optional[int]=7, __lowercase : List[str]=3, __lowercase : Tuple=18, __lowercase : List[Any]=30, __lowercase : Tuple=400, __lowercase : Any=None, __lowercase : Optional[int]=True, __lowercase : List[str]=True, __lowercase : Union[str, Any]=None, ): lowercase__ = size if size is not None else {"height": 20, "width": 20} lowercase__ = parent lowercase__ = batch_size lowercase__ = num_channels lowercase__ = image_size lowercase__ = min_resolution lowercase__ = max_resolution lowercase__ = size lowercase__ = do_normalize lowercase__ = do_convert_rgb lowercase__ = [512, 1024, 2048, 4096] lowercase__ = patch_size if patch_size is not None else {"height": 16, "width": 16} def A__ ( self : List[str] ): return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb} def A__ ( self : Any ): lowercase__ = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg" lowercase__ = Image.open(requests.get(__lowercase, stream=__lowercase ).raw ).convert("RGB" ) return raw_image @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , ) @require_torch @require_vision class _snake_case ( lowercase__ , unittest.TestCase): UpperCamelCase__ : Any =PixaStructImageProcessor if is_vision_available() else None def A__ ( self : Any ): lowercase__ = PixaStructImageProcessingTester(self ) @property def A__ ( self : Union[str, Any] ): return self.image_processor_tester.prepare_image_processor_dict() def A__ ( self : Optional[Any] ): lowercase__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowercase, "do_normalize" ) ) self.assertTrue(hasattr(__lowercase, "do_convert_rgb" ) ) def A__ ( self : Optional[int] ): lowercase__ = self.image_processor_tester.prepare_dummy_image() lowercase__ = self.image_processing_class(**self.image_processor_dict ) lowercase__ = 2048 lowercase__ = image_processor(__lowercase, return_tensors="pt", max_patches=__lowercase ) self.assertTrue(torch.allclose(inputs.flattened_patches.mean(), torch.tensor(0.0606 ), atol=1e-3, rtol=1e-3 ) ) def A__ ( self : Union[str, Any] ): # Initialize image_processor lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase, Image.Image ) # Test not batched input lowercase__ = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input lowercase__ = image_processor( image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches self.assertEqual( encoded_images.shape, (1, max_patch, expected_hidden_dim), ) # Test batched lowercase__ = image_processor( __lowercase, return_tensors="pt", max_patches=__lowercase ).flattened_patches self.assertEqual( encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), ) def A__ ( self : int ): # Initialize image_processor lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase, Image.Image ) # Test not batched input lowercase__ = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 lowercase__ = True for max_patch in self.image_processor_tester.max_patches: # Test not batched input with self.assertRaises(__lowercase ): lowercase__ = image_processor( image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches lowercase__ = "Hello" lowercase__ = image_processor( image_inputs[0], return_tensors="pt", max_patches=__lowercase, header_text=__lowercase ).flattened_patches self.assertEqual( encoded_images.shape, (1, max_patch, expected_hidden_dim), ) # Test batched lowercase__ = image_processor( __lowercase, return_tensors="pt", max_patches=__lowercase, header_text=__lowercase ).flattened_patches self.assertEqual( encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), ) def A__ ( self : Tuple ): # Initialize image_processor lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, numpify=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase, np.ndarray ) lowercase__ = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input lowercase__ = image_processor( image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches self.assertEqual( encoded_images.shape, (1, max_patch, expected_hidden_dim), ) # Test batched lowercase__ = image_processor( __lowercase, return_tensors="pt", max_patches=__lowercase ).flattened_patches self.assertEqual( encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), ) def A__ ( self : Any ): # Initialize image_processor lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, torchify=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase, torch.Tensor ) # Test not batched input lowercase__ = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input lowercase__ = image_processor( image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches self.assertEqual( encoded_images.shape, (1, max_patch, expected_hidden_dim), ) # Test batched lowercase__ = image_processor( __lowercase, return_tensors="pt", max_patches=__lowercase ).flattened_patches self.assertEqual( encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), ) @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , ) @require_torch @require_vision class _snake_case ( lowercase__ , unittest.TestCase): UpperCamelCase__ : Optional[int] =PixaStructImageProcessor if is_vision_available() else None def A__ ( self : Optional[int] ): lowercase__ = PixaStructImageProcessingTester(self, num_channels=4 ) lowercase__ = 3 @property def A__ ( self : Union[str, Any] ): return self.image_processor_tester.prepare_image_processor_dict() def A__ ( self : Dict ): lowercase__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowercase, "do_normalize" ) ) self.assertTrue(hasattr(__lowercase, "do_convert_rgb" ) ) def A__ ( self : Union[str, Any] ): # Initialize image_processor lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase, Image.Image ) # Test not batched input lowercase__ = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * (self.image_processor_tester.num_channels - 1) ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input lowercase__ = image_processor( image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches self.assertEqual( encoded_images.shape, (1, max_patch, expected_hidden_dim), ) # Test batched lowercase__ = image_processor( __lowercase, return_tensors="pt", max_patches=__lowercase ).flattened_patches self.assertEqual( encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
37
1
import os from typing import Dict, List, Union import tensorflow as tf from keras_nlp.tokenizers import BytePairTokenizer from tensorflow_text import pad_model_inputs from .tokenization_gpta import GPTaTokenizer class _snake_case ( tf.keras.layers.Layer): def __init__( self : Optional[Any], __lowercase : Dict[str, int], __lowercase : List[str], __lowercase : int = None, __lowercase : int = None ): super().__init__() lowercase__ = pad_token_id lowercase__ = max_length lowercase__ = vocab lowercase__ = merges lowercase__ = BytePairTokenizer(__lowercase, __lowercase, sequence_length=__lowercase ) @classmethod def A__ ( cls : List[Any], __lowercase : GPTaTokenizer, *__lowercase : Optional[int], **__lowercase : int ): lowercase__ = [" ".join(__lowercase ) for m in tokenizer.bpe_ranks.keys()] lowercase__ = tokenizer.get_vocab() return cls(__lowercase, __lowercase, *__lowercase, **__lowercase ) @classmethod def A__ ( cls : List[Any], __lowercase : Union[str, os.PathLike], *__lowercase : Any, **__lowercase : str ): lowercase__ = GPTaTokenizer.from_pretrained(__lowercase, *__lowercase, **__lowercase ) return cls.from_tokenizer(__lowercase, *__lowercase, **__lowercase ) @classmethod def A__ ( cls : str, __lowercase : Union[str, Any] ): return cls(**__lowercase ) def A__ ( self : Optional[Any] ): return { "vocab": self.vocab, "merges": self.merges, "max_length": self.max_length, "pad_token_id": self.pad_token_id, } def A__ ( self : Any, __lowercase : List[str], __lowercase : int = None ): lowercase__ = self.tf_tokenizer(__lowercase ) lowercase__ = tf.ones_like(__lowercase ) if self.pad_token_id is not None: # pad the tokens up to max length lowercase__ = max_length if max_length is not None else self.max_length if max_length is not None: lowercase__ , lowercase__ = pad_model_inputs( __lowercase, max_seq_length=__lowercase, pad_value=self.pad_token_id ) return {"attention_mask": attention_mask, "input_ids": input_ids}
37
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ , lowercase__ = len(SCREAMING_SNAKE_CASE_ ), len(grid[0] ) if ( min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) < 0 or row == row_length or col == col_length or (row, col) in visit or grid[row][col] == 1 ): return 0 if row == row_length - 1 and col == col_length - 1: return 1 visit.add((row, col) ) lowercase__ = 0 count += depth_first_search(SCREAMING_SNAKE_CASE_ , row + 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) count += depth_first_search(SCREAMING_SNAKE_CASE_ , row - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) count += depth_first_search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , col + 1 , SCREAMING_SNAKE_CASE_ ) count += depth_first_search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , col - 1 , SCREAMING_SNAKE_CASE_ ) visit.remove((row, col) ) return count if __name__ == "__main__": import doctest doctest.testmod()
37
1
import json import os import unittest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_ftfy, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _snake_case ( lowercase__ , unittest.TestCase): UpperCamelCase__ : Optional[Any] =CLIPTokenizer UpperCamelCase__ : int =CLIPTokenizerFast UpperCamelCase__ : Any =True UpperCamelCase__ : Optional[int] ={} UpperCamelCase__ : Union[str, Any] =False def A__ ( self : List[str] ): super().setUp() # fmt: off lowercase__ = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: on lowercase__ = dict(zip(__lowercase, range(len(__lowercase ) ) ) ) lowercase__ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"] lowercase__ = {"unk_token": "<unk>"} lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] ) lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file, "w", encoding="utf-8" ) as fp: fp.write(json.dumps(__lowercase ) + "\n" ) with open(self.merges_file, "w", encoding="utf-8" ) as fp: fp.write("\n".join(__lowercase ) ) def A__ ( self : Optional[Any], **__lowercase : Any ): kwargs.update(self.special_tokens_map ) return CLIPTokenizer.from_pretrained(self.tmpdirname, **__lowercase ) def A__ ( self : List[Any], **__lowercase : List[str] ): kwargs.update(self.special_tokens_map ) return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **__lowercase ) def A__ ( self : int, __lowercase : Any ): lowercase__ = "lower newer" lowercase__ = "lower newer" return input_text, output_text def A__ ( self : Tuple ): lowercase__ = CLIPTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map ) lowercase__ = "lower newer" lowercase__ = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"] lowercase__ = tokenizer.tokenize(__lowercase ) self.assertListEqual(__lowercase, __lowercase ) lowercase__ = tokens + [tokenizer.unk_token] lowercase__ = [10, 2, 16, 9, 3, 2, 16, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ), __lowercase ) @require_ftfy def A__ ( self : Tuple ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowercase__ = self.tokenizer_class.from_pretrained(__lowercase, **__lowercase ) lowercase__ = self.rust_tokenizer_class.from_pretrained(__lowercase, **__lowercase ) lowercase__ = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d." lowercase__ = tokenizer_s.tokenize(__lowercase ) lowercase__ = tokenizer_r.tokenize(__lowercase ) self.assertListEqual(__lowercase, __lowercase ) # Test that the tokenization is identical on an example containing a character (Latin Small Letter A # with Tilde) encoded in 2 different ways lowercase__ = "xa\u0303y" + " " + "x\xe3y" lowercase__ = tokenizer_s.tokenize(__lowercase ) lowercase__ = tokenizer_r.tokenize(__lowercase ) self.assertListEqual(__lowercase, __lowercase ) # Test that the tokenization is identical on unicode of space type lowercase__ = [ "\u0009", # (horizontal tab, '\t') "\u000B", # (vertical tab) "\u000C", # (form feed) "\u0020", # (space, ' ') "\u200E", # (left-to-right mark):w "\u200F", # (right-to-left mark) ] for unicode_seq in spaces_unicodes: lowercase__ = tokenizer_s.tokenize(__lowercase ) lowercase__ = tokenizer_r.tokenize(__lowercase ) self.assertListEqual(__lowercase, __lowercase ) # Test that the tokenization is identical on unicode of line break type lowercase__ = [ "\u000A", # (line feed, '\n') "\r\n", # (carriage return and line feed, '\r\n') "\u000D", # (carriage return, '\r') "\r", # (carriage return, '\r') "\u000D", # (carriage return, '\r') "\u2028", # (line separator) "\u2029", # (paragraph separator) # "\u0085", # (next line) ] # The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms # it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a # space (and thus into an empty list). for unicode_seq in line_break_unicodes: lowercase__ = tokenizer_s.tokenize(__lowercase ) lowercase__ = tokenizer_r.tokenize(__lowercase ) self.assertListEqual(__lowercase, __lowercase ) def A__ ( self : Dict ): # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowercase__ = "hello" # `hello` is a token in the vocabulary of `pretrained_name` lowercase__ = F'''{text_of_1_token} {text_of_1_token}''' lowercase__ = self.rust_tokenizer_class.from_pretrained( __lowercase, use_fast=__lowercase, ) lowercase__ = tokenizer_r(__lowercase, return_offsets_mapping=__lowercase, add_special_tokens=__lowercase ) self.assertEqual(encoding.offset_mapping[0], (0, len(__lowercase )) ) self.assertEqual( encoding.offset_mapping[1], (len(__lowercase ) + 1, len(__lowercase ) + 1 + len(__lowercase )), ) lowercase__ = F''' {text}''' lowercase__ = self.rust_tokenizer_class.from_pretrained( __lowercase, use_fast=__lowercase, ) lowercase__ = tokenizer_r(__lowercase, return_offsets_mapping=__lowercase, add_special_tokens=__lowercase ) self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(__lowercase )) ) self.assertEqual( encoding.offset_mapping[1], (1 + len(__lowercase ) + 1, 1 + len(__lowercase ) + 1 + len(__lowercase )), ) def A__ ( self : Tuple ): # Test related to the breaking change introduced in transformers v4.17.0 # We need to check that an error in raised when the user try to load a previous version of the tokenizer. with self.assertRaises(__lowercase ) as context: self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" ) self.assertTrue( context.exception.args[0].startswith( "The `backend_tokenizer` provided does not match the expected format." ) ) @require_ftfy def A__ ( self : Dict ): super().test_tokenization_python_rust_equals() def A__ ( self : Union[str, Any] ): # CLIP always lower cases letters pass
37
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): lowercase__ = 0 for ch in input_str: lowercase__ = ord(SCREAMING_SNAKE_CASE_ ) lowercase__ = pow(2 , SCREAMING_SNAKE_CASE_ ) # If we already turned on bit for current character's unicode if bitmap >> ch_unicode & 1 == 1: return False bitmap |= ch_bit_index_on return True if __name__ == "__main__": import doctest doctest.testmod()
37
1
import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ASTConfig from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_torchaudio_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ASTForAudioClassification, ASTModel from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_torchaudio_available(): import torchaudio from transformers import ASTFeatureExtractor class _snake_case : def __init__( self : int, __lowercase : int, __lowercase : Any=13, __lowercase : Optional[int]=2, __lowercase : str=24, __lowercase : int=16, __lowercase : str=True, __lowercase : str=True, __lowercase : List[Any]=32, __lowercase : List[str]=5, __lowercase : Union[str, Any]=4, __lowercase : Optional[int]=37, __lowercase : int="gelu", __lowercase : Dict=0.1, __lowercase : int=0.1, __lowercase : List[str]=10, __lowercase : List[Any]=0.02, __lowercase : int=None, __lowercase : Optional[int]=2, __lowercase : Optional[Any]=2, ): lowercase__ = parent lowercase__ = batch_size lowercase__ = patch_size lowercase__ = max_length lowercase__ = num_mel_bins lowercase__ = is_training lowercase__ = use_labels lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = intermediate_size lowercase__ = hidden_act lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = type_sequence_label_size lowercase__ = initializer_range lowercase__ = scope lowercase__ = frequency_stride lowercase__ = time_stride # in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) lowercase__ = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1 lowercase__ = (self.max_length - self.patch_size) // self.time_stride + 1 lowercase__ = frequency_out_dimension * time_out_dimension lowercase__ = num_patches + 2 def A__ ( self : str ): lowercase__ = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] ) lowercase__ = None if self.use_labels: lowercase__ = ids_tensor([self.batch_size], self.type_sequence_label_size ) lowercase__ = self.get_config() return config, input_values, labels def A__ ( self : int ): return ASTConfig( patch_size=self.patch_size, max_length=self.max_length, num_mel_bins=self.num_mel_bins, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=__lowercase, initializer_range=self.initializer_range, frequency_stride=self.frequency_stride, time_stride=self.time_stride, ) def A__ ( self : Union[str, Any], __lowercase : str, __lowercase : Tuple, __lowercase : int ): lowercase__ = ASTModel(config=__lowercase ) model.to(__lowercase ) model.eval() lowercase__ = model(__lowercase ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def A__ ( self : int ): lowercase__ = self.prepare_config_and_inputs() ( ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ) = config_and_inputs lowercase__ = {"input_values": input_values} return config, inputs_dict @require_torch class _snake_case ( lowercase__ , lowercase__ , unittest.TestCase): UpperCamelCase__ : List[Any] =( ( ASTModel, ASTForAudioClassification, ) if is_torch_available() else () ) UpperCamelCase__ : Dict =( {"""audio-classification""": ASTForAudioClassification, """feature-extraction""": ASTModel} if is_torch_available() else {} ) UpperCamelCase__ : Optional[Any] =False UpperCamelCase__ : Union[str, Any] =False UpperCamelCase__ : int =False UpperCamelCase__ : List[str] =False def A__ ( self : List[Any], __lowercase : str, __lowercase : List[str], __lowercase : List[Any], __lowercase : Optional[Any], __lowercase : str ): if pipeline_test_casse_name == "AudioClassificationPipelineTests": return True return False def A__ ( self : str ): lowercase__ = ASTModelTester(self ) lowercase__ = ConfigTester(self, config_class=__lowercase, has_text_modality=__lowercase, hidden_size=37 ) def A__ ( self : List[Any] ): self.config_tester.run_common_tests() @unittest.skip(reason="AST does not use inputs_embeds" ) def A__ ( self : Dict ): pass def A__ ( self : Any ): lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ = model_class(__lowercase ) self.assertIsInstance(model.get_input_embeddings(), (nn.Module) ) lowercase__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__lowercase, nn.Linear ) ) def A__ ( self : Optional[int] ): lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ = model_class(__lowercase ) lowercase__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ = [*signature.parameters.keys()] lowercase__ = ["input_values"] self.assertListEqual(arg_names[:1], __lowercase ) def A__ ( self : int ): lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowercase ) @slow def A__ ( self : Any ): for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ = ASTModel.from_pretrained(__lowercase ) self.assertIsNotNone(__lowercase ) def __lowerCAmelCase ( ): lowercase__ = hf_hub_download( repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" ) lowercase__ , lowercase__ = torchaudio.load(SCREAMING_SNAKE_CASE_ ) return audio, sampling_rate @require_torch @require_torchaudio class _snake_case ( unittest.TestCase): @cached_property def A__ ( self : List[str] ): return ( ASTFeatureExtractor.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ) if is_torchaudio_available() else None ) @slow def A__ ( self : Tuple ): lowercase__ = self.default_feature_extractor lowercase__ = ASTForAudioClassification.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ).to(__lowercase ) lowercase__ = self.default_feature_extractor lowercase__ , lowercase__ = prepare_audio() lowercase__ = audio.squeeze().numpy() lowercase__ = feature_extractor(__lowercase, sampling_rate=__lowercase, return_tensors="pt" ).to(__lowercase ) # forward pass with torch.no_grad(): lowercase__ = model(**__lowercase ) # verify the logits lowercase__ = torch.Size((1, 527) ) self.assertEqual(outputs.logits.shape, __lowercase ) lowercase__ = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(__lowercase ) self.assertTrue(torch.allclose(outputs.logits[0, :3], __lowercase, atol=1e-4 ) )
37
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): lowercase__ = len(SCREAMING_SNAKE_CASE_ ) for i in range(SCREAMING_SNAKE_CASE_ ): for j in range(i + 1 , SCREAMING_SNAKE_CASE_ ): if numbers[j] < numbers[i]: lowercase__ , lowercase__ = numbers[j], numbers[i] return numbers if __name__ == "__main__": lowercase_ = input("""Enter numbers separated by a comma:\n""").strip() lowercase_ = [int(item) for item in user_input.split(""",""")] print(exchange_sort(unsorted))
37
1
import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionTextToImagePipeline from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device lowercase_ = False class _snake_case ( unittest.TestCase): pass @nightly @require_torch_gpu class _snake_case ( unittest.TestCase): def A__ ( self : Union[str, Any] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def A__ ( self : Any ): lowercase__ = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" ) # remove text_unet pipe.remove_unused_weights() pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) lowercase__ = "A painting of a squirrel eating a burger " lowercase__ = torch.manual_seed(0 ) lowercase__ = pipe( prompt=__lowercase, generator=__lowercase, guidance_scale=7.5, num_inference_steps=2, output_type="numpy" ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(__lowercase ) lowercase__ = VersatileDiffusionTextToImagePipeline.from_pretrained(__lowercase ) pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) lowercase__ = generator.manual_seed(0 ) lowercase__ = pipe( prompt=__lowercase, generator=__lowercase, guidance_scale=7.5, num_inference_steps=2, output_type="numpy" ).images assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass" def A__ ( self : str ): lowercase__ = VersatileDiffusionTextToImagePipeline.from_pretrained( "shi-labs/versatile-diffusion", torch_dtype=torch.floataa ) pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) lowercase__ = "A painting of a squirrel eating a burger " lowercase__ = torch.manual_seed(0 ) lowercase__ = pipe( prompt=__lowercase, generator=__lowercase, guidance_scale=7.5, num_inference_steps=50, output_type="numpy" ).images lowercase__ = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) lowercase__ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
37
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): if upper_limit < 0: raise ValueError("Limit for the Catalan sequence must be ≥ 0" ) lowercase__ = [0] * (upper_limit + 1) # Base case: C(0) = C(1) = 1 lowercase__ = 1 if upper_limit > 0: lowercase__ = 1 # Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i for i in range(2 , upper_limit + 1 ): for j in range(SCREAMING_SNAKE_CASE_ ): catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1] return catalan_list if __name__ == "__main__": print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""") print("""\n*** Enter -1 at any time to quit ***""") print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""") try: while True: lowercase_ = int(input().strip()) if N < 0: print("""\n********* Goodbye!! ************""") break else: print(F'The Catalan numbers from 0 through {N} are:') print(catalan_numbers(N)) print("""Try another upper limit for the sequence: """, end="""""") except (NameError, ValueError): print("""\n********* Invalid input, goodbye! ************\n""") import doctest doctest.testmod()
37
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowercase_ = logging.get_logger(__name__) class _snake_case ( lowercase__): UpperCamelCase__ : Dict =["""pixel_values"""] def __init__( self : Optional[int], __lowercase : bool = True, __lowercase : Dict[str, int] = None, __lowercase : int = 0.9, __lowercase : PILImageResampling = PILImageResampling.BICUBIC, __lowercase : bool = True, __lowercase : Dict[str, int] = None, __lowercase : Union[int, float] = 1 / 255, __lowercase : bool = True, __lowercase : bool = True, __lowercase : Optional[Union[float, List[float]]] = None, __lowercase : Optional[Union[float, List[float]]] = None, **__lowercase : Union[str, Any], ): super().__init__(**__lowercase ) lowercase__ = size if size is not None else {"shortest_edge": 224} lowercase__ = get_size_dict(__lowercase, default_to_square=__lowercase ) lowercase__ = crop_size if crop_size is not None else {"height": 224, "width": 224} lowercase__ = get_size_dict(__lowercase, param_name="crop_size" ) lowercase__ = do_resize lowercase__ = size lowercase__ = crop_pct lowercase__ = resample lowercase__ = do_center_crop lowercase__ = crop_size lowercase__ = do_rescale lowercase__ = rescale_factor lowercase__ = do_normalize lowercase__ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN lowercase__ = image_std if image_std is not None else IMAGENET_DEFAULT_STD def A__ ( self : Dict, __lowercase : np.ndarray, __lowercase : Dict[str, int], __lowercase : Optional[float] = None, __lowercase : PILImageResampling = PILImageResampling.BICUBIC, __lowercase : Optional[Union[str, ChannelDimension]] = None, **__lowercase : List[str], ): lowercase__ = get_size_dict(__lowercase, default_to_square=__lowercase ) if "shortest_edge" not in size and ("height" not in size or "width" not in size): raise ValueError(F'''size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' ) if crop_pct is not None: if "shortest_edge" in size: lowercase__ = int(size["shortest_edge"] / crop_pct ) elif "height" in size and "width" in size: if size["height"] == size["width"]: lowercase__ = int(size["height"] / crop_pct ) else: lowercase__ = (int(size["height"] / crop_pct ), int(size["width"] / crop_pct )) else: raise ValueError("Invalid size for resize: {}".format(__lowercase ) ) lowercase__ = get_resize_output_image_size(__lowercase, size=__lowercase, default_to_square=__lowercase ) else: if "shortest_edge" in size: lowercase__ = get_resize_output_image_size(__lowercase, size=size["shortest_edge"], default_to_square=__lowercase ) elif "height" in size and "width" in size: lowercase__ = (size["height"], size["width"]) else: raise ValueError("Invalid size for resize: {}".format(__lowercase ) ) return resize(__lowercase, size=__lowercase, resample=__lowercase, data_format=__lowercase, **__lowercase ) def A__ ( self : Any, __lowercase : np.ndarray, __lowercase : Dict[str, int], __lowercase : Optional[Union[str, ChannelDimension]] = None, **__lowercase : Any, ): lowercase__ = get_size_dict(__lowercase ) if "height" not in size or "width" not in size: raise ValueError(F'''size must contain \'height\' and \'width\' as keys. Got {size.keys()}''' ) return center_crop(__lowercase, size=(size["height"], size["width"]), data_format=__lowercase, **__lowercase ) def A__ ( self : Optional[int], __lowercase : np.ndarray, __lowercase : Union[int, float], __lowercase : Optional[Union[str, ChannelDimension]] = None, **__lowercase : List[str], ): return rescale(__lowercase, scale=__lowercase, data_format=__lowercase, **__lowercase ) def A__ ( self : str, __lowercase : np.ndarray, __lowercase : Union[float, List[float]], __lowercase : Union[float, List[float]], __lowercase : Optional[Union[str, ChannelDimension]] = None, **__lowercase : Tuple, ): return normalize(__lowercase, mean=__lowercase, std=__lowercase, data_format=__lowercase, **__lowercase ) def A__ ( self : List[str], __lowercase : ImageInput, __lowercase : bool = None, __lowercase : Dict[str, int] = None, __lowercase : int = None, __lowercase : PILImageResampling = None, __lowercase : bool = None, __lowercase : Dict[str, int] = None, __lowercase : bool = None, __lowercase : float = None, __lowercase : bool = None, __lowercase : Optional[Union[float, List[float]]] = None, __lowercase : Optional[Union[float, List[float]]] = None, __lowercase : Optional[Union[str, TensorType]] = None, __lowercase : ChannelDimension = ChannelDimension.FIRST, **__lowercase : Tuple, ): lowercase__ = do_resize if do_resize is not None else self.do_resize lowercase__ = crop_pct if crop_pct is not None else self.crop_pct lowercase__ = resample if resample is not None else self.resample lowercase__ = do_center_crop if do_center_crop is not None else self.do_center_crop lowercase__ = do_rescale if do_rescale is not None else self.do_rescale lowercase__ = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase__ = do_normalize if do_normalize is not None else self.do_normalize lowercase__ = image_mean if image_mean is not None else self.image_mean lowercase__ = image_std if image_std is not None else self.image_std lowercase__ = size if size is not None else self.size lowercase__ = get_size_dict(__lowercase, default_to_square=__lowercase ) lowercase__ = crop_size if crop_size is not None else self.crop_size lowercase__ = get_size_dict(__lowercase, param_name="crop_size" ) lowercase__ = make_list_of_images(__lowercase ) if not valid_images(__lowercase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_center_crop and crop_pct is None: raise ValueError("Crop_pct must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. lowercase__ = [to_numpy_array(__lowercase ) for image in images] if do_resize: lowercase__ = [self.resize(image=__lowercase, size=__lowercase, crop_pct=__lowercase, resample=__lowercase ) for image in images] if do_center_crop: lowercase__ = [self.center_crop(image=__lowercase, size=__lowercase ) for image in images] if do_rescale: lowercase__ = [self.rescale(image=__lowercase, scale=__lowercase ) for image in images] if do_normalize: lowercase__ = [self.normalize(image=__lowercase, mean=__lowercase, std=__lowercase ) for image in images] lowercase__ = [to_channel_dimension_format(__lowercase, __lowercase ) for image in images] lowercase__ = {"pixel_values": images} return BatchFeature(data=__lowercase, tensor_type=__lowercase )
37
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowercase_ = { """configuration_roberta_prelayernorm""": [ """ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaPreLayerNormConfig""", """RobertaPreLayerNormOnnxConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""", """RobertaPreLayerNormForCausalLM""", """RobertaPreLayerNormForMaskedLM""", """RobertaPreLayerNormForMultipleChoice""", """RobertaPreLayerNormForQuestionAnswering""", """RobertaPreLayerNormForSequenceClassification""", """RobertaPreLayerNormForTokenClassification""", """RobertaPreLayerNormModel""", """RobertaPreLayerNormPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFRobertaPreLayerNormForCausalLM""", """TFRobertaPreLayerNormForMaskedLM""", """TFRobertaPreLayerNormForMultipleChoice""", """TFRobertaPreLayerNormForQuestionAnswering""", """TFRobertaPreLayerNormForSequenceClassification""", """TFRobertaPreLayerNormForTokenClassification""", """TFRobertaPreLayerNormMainLayer""", """TFRobertaPreLayerNormModel""", """TFRobertaPreLayerNormPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """FlaxRobertaPreLayerNormForCausalLM""", """FlaxRobertaPreLayerNormForMaskedLM""", """FlaxRobertaPreLayerNormForMultipleChoice""", """FlaxRobertaPreLayerNormForQuestionAnswering""", """FlaxRobertaPreLayerNormForSequenceClassification""", """FlaxRobertaPreLayerNormForTokenClassification""", """FlaxRobertaPreLayerNormModel""", """FlaxRobertaPreLayerNormPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_roberta_prelayernorm import ( ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaPreLayerNormConfig, RobertaPreLayerNormOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta_prelayernorm import ( ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaPreLayerNormForCausalLM, RobertaPreLayerNormForMaskedLM, RobertaPreLayerNormForMultipleChoice, RobertaPreLayerNormForQuestionAnswering, RobertaPreLayerNormForSequenceClassification, RobertaPreLayerNormForTokenClassification, RobertaPreLayerNormModel, RobertaPreLayerNormPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta_prelayernorm import ( TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaPreLayerNormForCausalLM, TFRobertaPreLayerNormForMaskedLM, TFRobertaPreLayerNormForMultipleChoice, TFRobertaPreLayerNormForQuestionAnswering, TFRobertaPreLayerNormForSequenceClassification, TFRobertaPreLayerNormForTokenClassification, TFRobertaPreLayerNormMainLayer, TFRobertaPreLayerNormModel, TFRobertaPreLayerNormPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
37
1
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): return 10 - x * x def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): # Bolzano theory in order to find if there is a root between a and b if equation(SCREAMING_SNAKE_CASE_ ) * equation(SCREAMING_SNAKE_CASE_ ) >= 0: raise ValueError("Wrong space!" ) lowercase__ = a while (b - a) >= 0.01: # Find middle point lowercase__ = (a + b) / 2 # Check if middle point is root if equation(SCREAMING_SNAKE_CASE_ ) == 0.0: break # Decide the side to repeat the steps if equation(SCREAMING_SNAKE_CASE_ ) * equation(SCREAMING_SNAKE_CASE_ ) < 0: lowercase__ = c else: lowercase__ = c return c if __name__ == "__main__": import doctest doctest.testmod() print(bisection(-2, 5)) print(bisection(0, 6))
37
import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value") lowercase__ = ( ("layer.", "layer_"), ("word_embeddings.weight", "word_embeddings"), ("position_embeddings.weight", "position_embeddings"), ("token_type_embeddings.weight", "token_type_embeddings"), (".", "/"), ("LayerNorm/weight", "LayerNorm/gamma"), ("LayerNorm/bias", "LayerNorm/beta"), ("weight", "kernel"), ) if not os.path.isdir(SCREAMING_SNAKE_CASE_ ): os.makedirs(SCREAMING_SNAKE_CASE_ ) lowercase__ = model.state_dict() def to_tf_var_name(SCREAMING_SNAKE_CASE_ ): for patt, repl in iter(SCREAMING_SNAKE_CASE_ ): lowercase__ = name.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return f'''bert/{name}''' def create_tf_var(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ = tf.dtypes.as_dtype(tensor.dtype ) lowercase__ = tf.get_variable(dtype=SCREAMING_SNAKE_CASE_ , shape=tensor.shape , name=SCREAMING_SNAKE_CASE_ , initializer=tf.zeros_initializer() ) session.run(tf.variables_initializer([tf_var] ) ) session.run(SCREAMING_SNAKE_CASE_ ) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: lowercase__ = to_tf_var_name(SCREAMING_SNAKE_CASE_ ) lowercase__ = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose ): lowercase__ = torch_tensor.T lowercase__ = create_tf_var(tensor=SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ , session=SCREAMING_SNAKE_CASE_ ) tf.keras.backend.set_value(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowercase__ = session.run(SCREAMING_SNAKE_CASE_ ) print(f'''Successfully created {tf_name}: {np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}''' ) lowercase__ = tf.train.Saver(tf.trainable_variables() ) saver.save(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , model_name.replace("-" , "_" ) + ".ckpt" ) ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_=None ): lowercase__ = argparse.ArgumentParser() parser.add_argument("--model_name" , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="model name e.g. bert-base-uncased" ) parser.add_argument( "--cache_dir" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="Directory containing pytorch model" ) parser.add_argument("--pytorch_model_path" , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="/path/to/<pytorch-model-name>.bin" ) parser.add_argument("--tf_cache_dir" , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="Directory in which to save tensorflow model" ) lowercase__ = parser.parse_args(SCREAMING_SNAKE_CASE_ ) lowercase__ = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , ) convert_pytorch_checkpoint_to_tf(model=SCREAMING_SNAKE_CASE_ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name ) if __name__ == "__main__": main()
37
1
from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import KarrasVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class _snake_case ( lowercase__): UpperCamelCase__ : UNetaDModel UpperCamelCase__ : KarrasVeScheduler def __init__( self : Dict, __lowercase : UNetaDModel, __lowercase : KarrasVeScheduler ): super().__init__() self.register_modules(unet=__lowercase, scheduler=__lowercase ) @torch.no_grad() def __call__( self : Optional[Any], __lowercase : int = 1, __lowercase : int = 50, __lowercase : Optional[Union[torch.Generator, List[torch.Generator]]] = None, __lowercase : Optional[str] = "pil", __lowercase : bool = True, **__lowercase : int, ): lowercase__ = self.unet.config.sample_size lowercase__ = (batch_size, 3, img_size, img_size) lowercase__ = self.unet # sample x_0 ~ N(0, sigma_0^2 * I) lowercase__ = randn_tensor(__lowercase, generator=__lowercase, device=self.device ) * self.scheduler.init_noise_sigma self.scheduler.set_timesteps(__lowercase ) for t in self.progress_bar(self.scheduler.timesteps ): # here sigma_t == t_i from the paper lowercase__ = self.scheduler.schedule[t] lowercase__ = self.scheduler.schedule[t - 1] if t > 0 else 0 # 1. Select temporarily increased noise level sigma_hat # 2. Add new noise to move from sample_i to sample_hat lowercase__ , lowercase__ = self.scheduler.add_noise_to_input(__lowercase, __lowercase, generator=__lowercase ) # 3. Predict the noise residual given the noise magnitude `sigma_hat` # The model inputs and output are adjusted by following eq. (213) in [1]. lowercase__ = (sigma_hat / 2) * model((sample_hat + 1) / 2, sigma_hat / 2 ).sample # 4. Evaluate dx/dt at sigma_hat # 5. Take Euler step from sigma to sigma_prev lowercase__ = self.scheduler.step(__lowercase, __lowercase, __lowercase, __lowercase ) if sigma_prev != 0: # 6. Apply 2nd order correction # The model inputs and output are adjusted by following eq. (213) in [1]. lowercase__ = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2, sigma_prev / 2 ).sample lowercase__ = self.scheduler.step_correct( __lowercase, __lowercase, __lowercase, __lowercase, step_output.prev_sample, step_output["derivative"], ) lowercase__ = step_output.prev_sample lowercase__ = (sample / 2 + 0.5).clamp(0, 1 ) lowercase__ = sample.cpu().permute(0, 2, 3, 1 ).numpy() if output_type == "pil": lowercase__ = self.numpy_to_pil(__lowercase ) if not return_dict: return (image,) return ImagePipelineOutput(images=__lowercase )
37
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase_ = { """configuration_timesformer""": ["""TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TimesformerConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TimesformerModel""", """TimesformerForVideoClassification""", """TimesformerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timesformer import ( TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimesformerForVideoClassification, TimesformerModel, TimesformerPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
37
1
from collections import defaultdict from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst def __lowerCAmelCase ( ): lowercase__ , lowercase__ = 9, 14 # noqa: F841 lowercase__ = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] lowercase__ = defaultdict(SCREAMING_SNAKE_CASE_ ) for nodea, nodea, cost in edges: adjancency[nodea].append([nodea, cost] ) adjancency[nodea].append([nodea, cost] ) lowercase__ = mst(SCREAMING_SNAKE_CASE_ ) lowercase__ = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] for answer in expected: lowercase__ = tuple(answer[:2] ) lowercase__ = tuple(edge[::-1] ) assert edge in result or reverse in result
37
import argparse import os from . import ( ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BART_PRETRAINED_MODEL_ARCHIVE_LIST, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, BartConfig, BertConfig, CamembertConfig, CTRLConfig, DistilBertConfig, DPRConfig, ElectraConfig, FlaubertConfig, GPTaConfig, LayoutLMConfig, LxmertConfig, OpenAIGPTConfig, RobertaConfig, TaConfig, TFAlbertForPreTraining, TFBartForConditionalGeneration, TFBartForSequenceClassification, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFCamembertForMaskedLM, TFCTRLLMHeadModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, TFElectraForPreTraining, TFFlaubertWithLMHeadModel, TFGPTaLMHeadModel, TFLayoutLMForMaskedLM, TFLxmertForPreTraining, TFLxmertVisualFeatureEncoder, TFOpenAIGPTLMHeadModel, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForSequenceClassification, TFTaForConditionalGeneration, TFTransfoXLLMHeadModel, TFWavaVecaModel, TFXLMRobertaForMaskedLM, TFXLMWithLMHeadModel, TFXLNetLMHeadModel, TransfoXLConfig, WavaVecaConfig, WavaVecaModel, XLMConfig, XLMRobertaConfig, XLNetConfig, is_torch_available, load_pytorch_checkpoint_in_tfa_model, ) from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging if is_torch_available(): import numpy as np import torch from . import ( AlbertForPreTraining, BartForConditionalGeneration, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, CamembertForMaskedLM, CTRLLMHeadModel, DistilBertForMaskedLM, DistilBertForQuestionAnswering, DPRContextEncoder, DPRQuestionEncoder, DPRReader, ElectraForPreTraining, FlaubertWithLMHeadModel, GPTaLMHeadModel, LayoutLMForMaskedLM, LxmertForPreTraining, LxmertVisualFeatureEncoder, OpenAIGPTLMHeadModel, RobertaForMaskedLM, RobertaForSequenceClassification, TaForConditionalGeneration, TransfoXLLMHeadModel, XLMRobertaForMaskedLM, XLMWithLMHeadModel, XLNetLMHeadModel, ) logging.set_verbosity_info() lowercase_ = { """bart""": ( BartConfig, TFBartForConditionalGeneration, TFBartForSequenceClassification, BartForConditionalGeneration, BART_PRETRAINED_MODEL_ARCHIVE_LIST, ), """bert""": ( BertConfig, TFBertForPreTraining, BertForPreTraining, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-large-uncased-whole-word-masking-finetuned-squad""": ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-large-cased-whole-word-masking-finetuned-squad""": ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-base-cased-finetuned-mrpc""": ( BertConfig, TFBertForSequenceClassification, BertForSequenceClassification, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """dpr""": ( DPRConfig, TFDPRQuestionEncoder, TFDPRContextEncoder, TFDPRReader, DPRQuestionEncoder, DPRContextEncoder, DPRReader, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ), """gpt2""": ( GPTaConfig, TFGPTaLMHeadModel, GPTaLMHeadModel, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlnet""": ( XLNetConfig, TFXLNetLMHeadModel, XLNetLMHeadModel, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlm""": ( XLMConfig, TFXLMWithLMHeadModel, XLMWithLMHeadModel, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlm-roberta""": ( XLMRobertaConfig, TFXLMRobertaForMaskedLM, XLMRobertaForMaskedLM, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """transfo-xl""": ( TransfoXLConfig, TFTransfoXLLMHeadModel, TransfoXLLMHeadModel, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """openai-gpt""": ( OpenAIGPTConfig, TFOpenAIGPTLMHeadModel, OpenAIGPTLMHeadModel, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """roberta""": ( RobertaConfig, TFRobertaForCausalLM, TFRobertaForMaskedLM, RobertaForMaskedLM, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """layoutlm""": ( LayoutLMConfig, TFLayoutLMForMaskedLM, LayoutLMForMaskedLM, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, ), """roberta-large-mnli""": ( RobertaConfig, TFRobertaForSequenceClassification, RobertaForSequenceClassification, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """camembert""": ( CamembertConfig, TFCamembertForMaskedLM, CamembertForMaskedLM, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """flaubert""": ( FlaubertConfig, TFFlaubertWithLMHeadModel, FlaubertWithLMHeadModel, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """distilbert""": ( DistilBertConfig, TFDistilBertForMaskedLM, DistilBertForMaskedLM, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """distilbert-base-distilled-squad""": ( DistilBertConfig, TFDistilBertForQuestionAnswering, DistilBertForQuestionAnswering, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """lxmert""": ( LxmertConfig, TFLxmertForPreTraining, LxmertForPreTraining, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """lxmert-visual-feature-encoder""": ( LxmertConfig, TFLxmertVisualFeatureEncoder, LxmertVisualFeatureEncoder, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """ctrl""": ( CTRLConfig, TFCTRLLMHeadModel, CTRLLMHeadModel, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """albert""": ( AlbertConfig, TFAlbertForPreTraining, AlbertForPreTraining, ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """t5""": ( TaConfig, TFTaForConditionalGeneration, TaForConditionalGeneration, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """electra""": ( ElectraConfig, TFElectraForPreTraining, ElectraForPreTraining, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """wav2vec2""": ( WavaVecaConfig, TFWavaVecaModel, WavaVecaModel, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), } def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True ): if model_type not in MODEL_CLASSES: raise ValueError(f'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' ) lowercase__ , lowercase__ , lowercase__ , lowercase__ = MODEL_CLASSES[model_type] # Initialise TF model if config_file in aws_config_map: lowercase__ = cached_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , force_download=not use_cached_models ) lowercase__ = config_class.from_json_file(SCREAMING_SNAKE_CASE_ ) lowercase__ = True lowercase__ = True print(f'''Building TensorFlow model from configuration: {config}''' ) lowercase__ = model_class(SCREAMING_SNAKE_CASE_ ) # Load weights from tf checkpoint if pytorch_checkpoint_path in aws_config_map.keys(): lowercase__ = cached_file( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , force_download=not use_cached_models ) # Load PyTorch checkpoint in tf2 model: lowercase__ = load_pytorch_checkpoint_in_tfa_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if compare_with_pt_model: lowercase__ = tf_model(tf_model.dummy_inputs , training=SCREAMING_SNAKE_CASE_ ) # build the network lowercase__ = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" ) lowercase__ = pt_model_class.from_pretrained( pretrained_model_name_or_path=SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ , state_dict=SCREAMING_SNAKE_CASE_ ) with torch.no_grad(): lowercase__ = pt_model(**pt_model.dummy_inputs ) lowercase__ = pto[0].numpy() lowercase__ = tfo[0].numpy() lowercase__ = np.amax(np.abs(np_pt - np_tf ) ) print(f'''Max absolute difference between models outputs {diff}''' ) assert diff <= 2e-2, f'''Error, model absolute difference is >2e-2: {diff}''' # Save pytorch-model print(f'''Save TensorFlow model to {tf_dump_path}''' ) tf_model.save_weights(SCREAMING_SNAKE_CASE_ , save_format="h5" ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , ): if args_model_type is None: lowercase__ = list(MODEL_CLASSES.keys() ) else: lowercase__ = [args_model_type] for j, model_type in enumerate(SCREAMING_SNAKE_CASE_ , start=1 ): print("=" * 100 ) print(f''' Converting model type {j}/{len(SCREAMING_SNAKE_CASE_ )}: {model_type}''' ) print("=" * 100 ) if model_type not in MODEL_CLASSES: raise ValueError(f'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' ) lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = MODEL_CLASSES[model_type] if model_shortcut_names_or_path is None: lowercase__ = list(aws_model_maps.keys() ) if config_shortcut_names_or_path is None: lowercase__ = model_shortcut_names_or_path for i, (model_shortcut_name, config_shortcut_name) in enumerate( zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , start=1 ): print("-" * 100 ) if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name: if not only_convert_finetuned_models: print(f''' Skipping finetuned checkpoint {model_shortcut_name}''' ) continue lowercase__ = model_shortcut_name elif only_convert_finetuned_models: print(f''' Skipping not finetuned checkpoint {model_shortcut_name}''' ) continue print( f''' Converting checkpoint {i}/{len(SCREAMING_SNAKE_CASE_ )}: {model_shortcut_name} - model_type {model_type}''' ) print("-" * 100 ) if config_shortcut_name in aws_config_map: lowercase__ = cached_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , force_download=not use_cached_models ) else: lowercase__ = config_shortcut_name if model_shortcut_name in aws_model_maps: lowercase__ = cached_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , force_download=not use_cached_models ) else: lowercase__ = model_shortcut_name if os.path.isfile(SCREAMING_SNAKE_CASE_ ): lowercase__ = "converted_model" convert_pt_checkpoint_to_tf( model_type=SCREAMING_SNAKE_CASE_ , pytorch_checkpoint_path=SCREAMING_SNAKE_CASE_ , config_file=SCREAMING_SNAKE_CASE_ , tf_dump_path=os.path.join(SCREAMING_SNAKE_CASE_ , model_shortcut_name + "-tf_model.h5" ) , compare_with_pt_model=SCREAMING_SNAKE_CASE_ , ) if remove_cached_files: os.remove(SCREAMING_SNAKE_CASE_ ) os.remove(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_dump_path""", default=None, type=str, required=True, help="""Path to the output Tensorflow dump file.""" ) parser.add_argument( """--model_type""", default=None, type=str, help=( F'Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and ' """convert all the models from AWS.""" ), ) parser.add_argument( """--pytorch_checkpoint_path""", default=None, type=str, help=( """Path to the PyTorch checkpoint path or shortcut name to download from AWS. """ """If not given, will download and convert all the checkpoints from AWS.""" ), ) parser.add_argument( """--config_file""", default=None, type=str, help=( """The config json file corresponding to the pre-trained model. \n""" """This specifies the model architecture. If not given and """ """--pytorch_checkpoint_path is not given or is a shortcut name """ """use the configuration associated to the shortcut name on the AWS""" ), ) parser.add_argument( """--compare_with_pt_model""", action="""store_true""", help="""Compare Tensorflow and PyTorch model predictions.""" ) parser.add_argument( """--use_cached_models""", action="""store_true""", help="""Use cached models if possible instead of updating to latest checkpoint versions.""", ) parser.add_argument( """--remove_cached_files""", action="""store_true""", help="""Remove pytorch models after conversion (save memory when converting in batches).""", ) parser.add_argument("""--only_convert_finetuned_models""", action="""store_true""", help="""Only convert finetuned models.""") lowercase_ = parser.parse_args() # if args.pytorch_checkpoint_path is not None: # convert_pt_checkpoint_to_tf(args.model_type.lower(), # args.pytorch_checkpoint_path, # args.config_file if args.config_file is not None else args.pytorch_checkpoint_path, # args.tf_dump_path, # compare_with_pt_model=args.compare_with_pt_model, # use_cached_models=args.use_cached_models) # else: convert_all_pt_checkpoints_to_tf( args.model_type.lower() if args.model_type is not None else None, args.tf_dump_path, model_shortcut_names_or_path=[args.pytorch_checkpoint_path] if args.pytorch_checkpoint_path is not None else None, config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None, compare_with_pt_model=args.compare_with_pt_model, use_cached_models=args.use_cached_models, remove_cached_files=args.remove_cached_files, only_convert_finetuned_models=args.only_convert_finetuned_models, )
37
1
from __future__ import annotations def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ = 0 lowercase__ = len(SCREAMING_SNAKE_CASE_ ) - 1 while i < j: if nums[i] + nums[j] == target: return [i, j] elif nums[i] + nums[j] < target: lowercase__ = i + 1 else: lowercase__ = j - 1 return [] if __name__ == "__main__": import doctest doctest.testmod() print(F'{two_pointer([2, 7, 11, 15], 9) = }')
37
import math def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): if 0 not in (x, y): # We use the relation x^y = y*log10(x), where 10 is the base. return y * math.logaa(SCREAMING_SNAKE_CASE_ ) else: if x == 0: # 0 raised to any number is 0 return 0 elif y == 0: return 1 # any number raised to 0 is 1 raise AssertionError("This should never happen" ) if __name__ == "__main__": # Main function # Read two numbers from input and typecast them to int using map function. # Here x is the base and y is the power. lowercase_ = """Enter the base and the power separated by a comma: """ lowercase_ , lowercase_ = map(int, input(prompt).split(""",""")) lowercase_ , lowercase_ = map(int, input(prompt).split(""",""")) # We find the log of each number, using the function res(), which takes two # arguments. lowercase_ = res(xa, ya) lowercase_ = res(xa, ya) # We check for the largest number if resa > resa: print("""Largest number is""", xa, """^""", ya) elif resa > resa: print("""Largest number is""", xa, """^""", ya) else: print("""Both are equal""")
37
1
import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class _snake_case ( unittest.TestCase): def __init__( self : int, __lowercase : List[Any], __lowercase : Union[str, Any]=7, __lowercase : str=3, __lowercase : Optional[Any]=18, __lowercase : Tuple=30, __lowercase : Any=400, __lowercase : Any=True, __lowercase : str=None, __lowercase : Optional[int]=True, ): lowercase__ = size if size is not None else {"height": 18, "width": 18} lowercase__ = parent lowercase__ = batch_size lowercase__ = num_channels lowercase__ = image_size lowercase__ = min_resolution lowercase__ = max_resolution lowercase__ = do_resize lowercase__ = size lowercase__ = apply_ocr def A__ ( self : Optional[int] ): return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class _snake_case ( lowercase__ , unittest.TestCase): UpperCamelCase__ : List[str] =LayoutLMvaImageProcessor if is_pytesseract_available() else None def A__ ( self : Optional[int] ): lowercase__ = LayoutLMvaImageProcessingTester(self ) @property def A__ ( self : Tuple ): return self.image_processor_tester.prepare_image_processor_dict() def A__ ( self : int ): lowercase__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowercase, "do_resize" ) ) self.assertTrue(hasattr(__lowercase, "size" ) ) self.assertTrue(hasattr(__lowercase, "apply_ocr" ) ) def A__ ( self : Optional[int] ): lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size, {"height": 18, "width": 18} ) lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict, size=42 ) self.assertEqual(image_processor.size, {"height": 42, "width": 42} ) def A__ ( self : Any ): pass def A__ ( self : List[Any] ): # Initialize image_processing lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase, Image.Image ) # Test not batched input lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ) self.assertEqual( encoding.pixel_values.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) self.assertIsInstance(encoding.words, __lowercase ) self.assertIsInstance(encoding.boxes, __lowercase ) # Test batched lowercase__ = image_processing(__lowercase, return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) def A__ ( self : Any ): # Initialize image_processing lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, numpify=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase, np.ndarray ) # Test not batched input lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) # Test batched lowercase__ = image_processing(__lowercase, return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) def A__ ( self : List[Any] ): # Initialize image_processing lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, torchify=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase, torch.Tensor ) # Test not batched input lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) # Test batched lowercase__ = image_processing(__lowercase, return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) def A__ ( self : List[str] ): # with apply_OCR = True lowercase__ = LayoutLMvaImageProcessor() from datasets import load_dataset lowercase__ = load_dataset("hf-internal-testing/fixtures_docvqa", split="test" ) lowercase__ = Image.open(ds[0]["file"] ).convert("RGB" ) lowercase__ = image_processing(__lowercase, return_tensors="pt" ) self.assertEqual(encoding.pixel_values.shape, (1, 3, 224, 224) ) self.assertEqual(len(encoding.words ), len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 lowercase__ = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231 lowercase__ = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words, __lowercase ) self.assertListEqual(encoding.boxes, __lowercase ) # with apply_OCR = False lowercase__ = LayoutLMvaImageProcessor(apply_ocr=__lowercase ) lowercase__ = image_processing(__lowercase, return_tensors="pt" ) self.assertEqual(encoding.pixel_values.shape, (1, 3, 224, 224) )
37
import pickle import numpy as np from matplotlib import pyplot as plt class _snake_case : def __init__( self : Tuple, __lowercase : Union[str, Any], __lowercase : int, __lowercase : Union[str, Any], __lowercase : str, __lowercase : List[Any], __lowercase : List[str]=0.2, __lowercase : List[str]=0.2 ): lowercase__ = bp_numa lowercase__ = bp_numa lowercase__ = bp_numa lowercase__ = conva_get[:2] lowercase__ = conva_get[2] lowercase__ = size_pa lowercase__ = rate_w lowercase__ = rate_t lowercase__ = [ np.mat(-1 * np.random.rand(self.conva[0], self.conva[0] ) + 0.5 ) for i in range(self.conva[1] ) ] lowercase__ = np.mat(-1 * np.random.rand(self.num_bpa, self.num_bpa ) + 0.5 ) lowercase__ = np.mat(-1 * np.random.rand(self.num_bpa, self.num_bpa ) + 0.5 ) lowercase__ = -2 * np.random.rand(self.conva[1] ) + 1 lowercase__ = -2 * np.random.rand(self.num_bpa ) + 1 lowercase__ = -2 * np.random.rand(self.num_bpa ) + 1 def A__ ( self : Any, __lowercase : List[str] ): # save model dict with pickle lowercase__ = { "num_bp1": self.num_bpa, "num_bp2": self.num_bpa, "num_bp3": self.num_bpa, "conv1": self.conva, "step_conv1": self.step_conva, "size_pooling1": self.size_poolinga, "rate_weight": self.rate_weight, "rate_thre": self.rate_thre, "w_conv1": self.w_conva, "wkj": self.wkj, "vji": self.vji, "thre_conv1": self.thre_conva, "thre_bp2": self.thre_bpa, "thre_bp3": self.thre_bpa, } with open(__lowercase, "wb" ) as f: pickle.dump(__lowercase, __lowercase ) print(F'''Model saved: {save_path}''' ) @classmethod def A__ ( cls : Dict, __lowercase : Union[str, Any] ): # read saved model with open(__lowercase, "rb" ) as f: lowercase__ = pickle.load(__lowercase ) # noqa: S301 lowercase__ = model_dic.get("conv1" ) conv_get.append(model_dic.get("step_conv1" ) ) lowercase__ = model_dic.get("size_pooling1" ) lowercase__ = model_dic.get("num_bp1" ) lowercase__ = model_dic.get("num_bp2" ) lowercase__ = model_dic.get("num_bp3" ) lowercase__ = model_dic.get("rate_weight" ) lowercase__ = model_dic.get("rate_thre" ) # create model instance lowercase__ = CNN(__lowercase, __lowercase, __lowercase, __lowercase, __lowercase, __lowercase, __lowercase ) # modify model parameter lowercase__ = model_dic.get("w_conv1" ) lowercase__ = model_dic.get("wkj" ) lowercase__ = model_dic.get("vji" ) lowercase__ = model_dic.get("thre_conv1" ) lowercase__ = model_dic.get("thre_bp2" ) lowercase__ = model_dic.get("thre_bp3" ) return conv_ins def A__ ( self : str, __lowercase : List[Any] ): return 1 / (1 + np.exp(-1 * x )) def A__ ( self : List[str], __lowercase : Optional[Any] ): return round(__lowercase, 3 ) def A__ ( self : Optional[Any], __lowercase : Dict, __lowercase : Optional[int], __lowercase : Optional[int], __lowercase : Optional[Any], __lowercase : str ): # convolution process lowercase__ = convs[0] lowercase__ = convs[1] lowercase__ = np.shape(__lowercase )[0] # get the data slice of original image data, data_focus lowercase__ = [] for i_focus in range(0, size_data - size_conv + 1, __lowercase ): for j_focus in range(0, size_data - size_conv + 1, __lowercase ): lowercase__ = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(__lowercase ) # calculate the feature map of every single kernel, and saved as list of matrix lowercase__ = [] lowercase__ = int((size_data - size_conv) / conv_step + 1 ) for i_map in range(__lowercase ): lowercase__ = [] for i_focus in range(len(__lowercase ) ): lowercase__ = ( np.sum(np.multiply(data_focus[i_focus], w_convs[i_map] ) ) - thre_convs[i_map] ) featuremap.append(self.sig(__lowercase ) ) lowercase__ = np.asmatrix(__lowercase ).reshape( __lowercase, __lowercase ) data_featuremap.append(__lowercase ) # expanding the data slice to One dimenssion lowercase__ = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(__lowercase ) ) lowercase__ = np.asarray(__lowercase ) return focus_list, data_featuremap def A__ ( self : List[Any], __lowercase : Any, __lowercase : List[Any], __lowercase : Union[str, Any]="average_pool" ): # pooling process lowercase__ = len(featuremaps[0] ) lowercase__ = int(size_map / size_pooling ) lowercase__ = [] for i_map in range(len(__lowercase ) ): lowercase__ = featuremaps[i_map] lowercase__ = [] for i_focus in range(0, __lowercase, __lowercase ): for j_focus in range(0, __lowercase, __lowercase ): lowercase__ = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(__lowercase ) ) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(__lowercase ) ) lowercase__ = np.asmatrix(__lowercase ).reshape(__lowercase, __lowercase ) featuremap_pooled.append(__lowercase ) return featuremap_pooled def A__ ( self : str, __lowercase : Optional[Any] ): # expanding three dimension data to one dimension list lowercase__ = [] for i in range(len(__lowercase ) ): lowercase__ = np.shape(data[i] ) lowercase__ = data[i].reshape(1, shapes[0] * shapes[1] ) lowercase__ = data_listed.getA().tolist()[0] data_expanded.extend(__lowercase ) lowercase__ = np.asarray(__lowercase ) return data_expanded def A__ ( self : Optional[int], __lowercase : Optional[int] ): # expanding matrix to one dimension list lowercase__ = np.asarray(__lowercase ) lowercase__ = np.shape(__lowercase ) lowercase__ = data_mat.reshape(1, shapes[0] * shapes[1] ) return data_expanded def A__ ( self : str, __lowercase : Tuple, __lowercase : List[Any], __lowercase : Any, __lowercase : Union[str, Any], __lowercase : Tuple ): lowercase__ = [] lowercase__ = 0 for i_map in range(__lowercase ): lowercase__ = np.ones((size_map, size_map) ) for i in range(0, __lowercase, __lowercase ): for j in range(0, __lowercase, __lowercase ): lowercase__ = pd_pool[ i_pool ] lowercase__ = i_pool + 1 lowercase__ = np.multiply( __lowercase, np.multiply(out_map[i_map], (1 - out_map[i_map]) ) ) pd_all.append(__lowercase ) return pd_all def A__ ( self : Tuple, __lowercase : int, __lowercase : Optional[Any], __lowercase : List[Any], __lowercase : Optional[Any], __lowercase : List[Any], __lowercase : List[str]=bool ): # model traning print("----------------------Start Training-------------------------" ) print((" - - Shape: Train_Data ", np.shape(__lowercase )) ) print((" - - Shape: Teach_Data ", np.shape(__lowercase )) ) lowercase__ = 0 lowercase__ = [] lowercase__ = 1_0000 while rp < n_repeat and mse >= error_accuracy: lowercase__ = 0 print(F'''-------------Learning Time {rp}--------------''' ) for p in range(len(__lowercase ) ): # print('------------Learning Image: %d--------------'%p) lowercase__ = np.asmatrix(datas_train[p] ) lowercase__ = np.asarray(datas_teach[p] ) lowercase__ , lowercase__ = self.convolute( __lowercase, self.conva, self.w_conva, self.thre_conva, conv_step=self.step_conva, ) lowercase__ = self.pooling(__lowercase, self.size_poolinga ) lowercase__ = np.shape(__lowercase ) lowercase__ = self._expand(__lowercase ) lowercase__ = data_bp_input lowercase__ = np.dot(__lowercase, self.vji.T ) - self.thre_bpa lowercase__ = self.sig(__lowercase ) lowercase__ = np.dot(__lowercase, self.wkj.T ) - self.thre_bpa lowercase__ = self.sig(__lowercase ) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- lowercase__ = np.multiply( (data_teach - bp_outa), np.multiply(__lowercase, (1 - bp_outa) ) ) lowercase__ = np.multiply( np.dot(__lowercase, self.wkj ), np.multiply(__lowercase, (1 - bp_outa) ) ) lowercase__ = np.dot(__lowercase, self.vji ) lowercase__ = pd_i_all / (self.size_poolinga * self.size_poolinga) lowercase__ = pd_conva_pooled.T.getA().tolist() lowercase__ = self._calculate_gradient_from_pool( __lowercase, __lowercase, shape_featuremapa[0], shape_featuremapa[1], self.size_poolinga, ) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1] ): lowercase__ = self._expand_mat(pd_conva_all[k_conv] ) lowercase__ = self.rate_weight * np.dot(__lowercase, __lowercase ) lowercase__ = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0]) ) lowercase__ = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv] ) * self.rate_thre ) # all connected layer lowercase__ = self.wkj + pd_k_all.T * bp_outa * self.rate_weight lowercase__ = self.vji + pd_j_all.T * bp_outa * self.rate_weight lowercase__ = self.thre_bpa - pd_k_all * self.rate_thre lowercase__ = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image lowercase__ = np.sum(abs(data_teach - bp_outa ) ) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) lowercase__ = rp + 1 lowercase__ = error_count / patterns all_mse.append(__lowercase ) def draw_error(): lowercase__ = [error_accuracy for i in range(int(n_repeat * 1.2 ) )] plt.plot(__lowercase, "+-" ) plt.plot(__lowercase, "r--" ) plt.xlabel("Learning Times" ) plt.ylabel("All_mse" ) plt.grid(__lowercase, alpha=0.5 ) plt.show() print("------------------Training Complished---------------------" ) print((" - - Training epoch: ", rp, F''' - - Mse: {mse:.6f}''') ) if draw_e: draw_error() return mse def A__ ( self : List[str], __lowercase : Optional[int] ): # model predict lowercase__ = [] print("-------------------Start Testing-------------------------" ) print((" - - Shape: Test_Data ", np.shape(__lowercase )) ) for p in range(len(__lowercase ) ): lowercase__ = np.asmatrix(datas_test[p] ) lowercase__ , lowercase__ = self.convolute( __lowercase, self.conva, self.w_conva, self.thre_conva, conv_step=self.step_conva, ) lowercase__ = self.pooling(__lowercase, self.size_poolinga ) lowercase__ = self._expand(__lowercase ) lowercase__ = data_bp_input lowercase__ = bp_outa * self.vji.T - self.thre_bpa lowercase__ = self.sig(__lowercase ) lowercase__ = bp_outa * self.wkj.T - self.thre_bpa lowercase__ = self.sig(__lowercase ) produce_out.extend(bp_outa.getA().tolist() ) lowercase__ = [list(map(self.do_round, __lowercase ) ) for each in produce_out] return np.asarray(__lowercase ) def A__ ( self : int, __lowercase : Any ): # return the data of image after convoluting process so we can check it out lowercase__ = np.asmatrix(__lowercase ) lowercase__ , lowercase__ = self.convolute( __lowercase, self.conva, self.w_conva, self.thre_conva, conv_step=self.step_conva, ) lowercase__ = self.pooling(__lowercase, self.size_poolinga ) return data_conveda, data_pooleda if __name__ == "__main__": pass
37
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { """xlm-mlm-en-2048""": """https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json""", """xlm-mlm-ende-1024""": """https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json""", """xlm-mlm-enfr-1024""": """https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json""", """xlm-mlm-enro-1024""": """https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json""", """xlm-mlm-tlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json""", """xlm-mlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json""", """xlm-clm-enfr-1024""": """https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json""", """xlm-clm-ende-1024""": """https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json""", """xlm-mlm-17-1280""": """https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json""", """xlm-mlm-100-1280""": """https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json""", } class _snake_case ( lowercase__): UpperCamelCase__ : Optional[int] ="""xlm""" UpperCamelCase__ : List[Any] ={ """hidden_size""": """emb_dim""", """num_attention_heads""": """n_heads""", """num_hidden_layers""": """n_layers""", """n_words""": """vocab_size""", # For backward compatibility } def __init__( self : Any, __lowercase : Any=3_0145, __lowercase : str=2048, __lowercase : str=12, __lowercase : List[str]=16, __lowercase : Union[str, Any]=0.1, __lowercase : Any=0.1, __lowercase : List[Any]=True, __lowercase : Any=False, __lowercase : Union[str, Any]=False, __lowercase : str=False, __lowercase : Tuple=1, __lowercase : int=True, __lowercase : Union[str, Any]=512, __lowercase : int=2048**-0.5, __lowercase : Union[str, Any]=1e-1_2, __lowercase : List[str]=0.02, __lowercase : Tuple=0, __lowercase : List[Any]=1, __lowercase : Dict=2, __lowercase : Optional[Any]=3, __lowercase : Union[str, Any]=5, __lowercase : Union[str, Any]=True, __lowercase : int="first", __lowercase : List[Any]=True, __lowercase : str=None, __lowercase : List[Any]=True, __lowercase : List[str]=0.1, __lowercase : Any=5, __lowercase : List[str]=5, __lowercase : int=0, __lowercase : int=0, __lowercase : Dict=2, __lowercase : List[str]=0, **__lowercase : Any, ): lowercase__ = vocab_size lowercase__ = emb_dim lowercase__ = n_layers lowercase__ = n_heads lowercase__ = dropout lowercase__ = attention_dropout lowercase__ = gelu_activation lowercase__ = sinusoidal_embeddings lowercase__ = causal lowercase__ = asm lowercase__ = n_langs lowercase__ = use_lang_emb lowercase__ = layer_norm_eps lowercase__ = bos_index lowercase__ = eos_index lowercase__ = pad_index lowercase__ = unk_index lowercase__ = mask_index lowercase__ = is_encoder lowercase__ = max_position_embeddings lowercase__ = embed_init_std lowercase__ = init_std lowercase__ = summary_type lowercase__ = summary_use_proj lowercase__ = summary_activation lowercase__ = summary_proj_to_labels lowercase__ = summary_first_dropout lowercase__ = start_n_top lowercase__ = end_n_top lowercase__ = mask_token_id lowercase__ = lang_id if "n_words" in kwargs: lowercase__ = kwargs["n_words"] super().__init__(pad_token_id=__lowercase, bos_token_id=__lowercase, **__lowercase ) class _snake_case ( lowercase__): @property def A__ ( self : List[Any] ): if self.task == "multiple-choice": lowercase__ = {0: "batch", 1: "choice", 2: "sequence"} else: lowercase__ = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis), ] )
37
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from timm import create_model from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import BitConfig, BitForImageClassification, BitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() lowercase_ = logging.get_logger(__name__) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): lowercase__ = "huggingface/label-files" lowercase__ = "imagenet-1k-id2label.json" lowercase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) ) lowercase__ = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()} lowercase__ = {v: k for k, v in idalabel.items()} lowercase__ = "std_conv" if "bit" in model_name else False # note that when using BiT as backbone for ViT-hybrid checkpoints, # one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same", # config.conv_layer = "std_conv_same" lowercase__ = BitConfig( conv_layer=SCREAMING_SNAKE_CASE_ , num_labels=1000 , idalabel=SCREAMING_SNAKE_CASE_ , labelaid=SCREAMING_SNAKE_CASE_ , ) return config def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): if "stem.conv" in name: lowercase__ = name.replace("stem.conv" , "bit.embedder.convolution" ) if "blocks" in name: lowercase__ = name.replace("blocks" , "layers" ) if "head.fc" in name: lowercase__ = name.replace("head.fc" , "classifier.1" ) if name.startswith("norm" ): lowercase__ = "bit." + name if "bit" not in name and "classifier" not in name: lowercase__ = "bit.encoder." + name return name def __lowerCAmelCase ( ): lowercase__ = "http://images.cocodataset.org/val2017/000000039769.jpg" lowercase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ) return im @torch.no_grad() def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ): lowercase__ = get_config(SCREAMING_SNAKE_CASE_ ) # load original model from timm lowercase__ = create_model(SCREAMING_SNAKE_CASE_ , pretrained=SCREAMING_SNAKE_CASE_ ) timm_model.eval() # load state_dict of original model lowercase__ = timm_model.state_dict() for key in state_dict.copy().keys(): lowercase__ = state_dict.pop(SCREAMING_SNAKE_CASE_ ) lowercase__ = val.squeeze() if "head" in key else val # load HuggingFace model lowercase__ = BitForImageClassification(SCREAMING_SNAKE_CASE_ ) model.eval() model.load_state_dict(SCREAMING_SNAKE_CASE_ ) # create image processor lowercase__ = create_transform(**resolve_data_config({} , model=SCREAMING_SNAKE_CASE_ ) ) lowercase__ = transform.transforms lowercase__ = { "bilinear": PILImageResampling.BILINEAR, "bicubic": PILImageResampling.BICUBIC, "nearest": PILImageResampling.NEAREST, } lowercase__ = BitImageProcessor( do_resize=SCREAMING_SNAKE_CASE_ , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=SCREAMING_SNAKE_CASE_ , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=SCREAMING_SNAKE_CASE_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) lowercase__ = prepare_img() lowercase__ = transform(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 ) lowercase__ = processor(SCREAMING_SNAKE_CASE_ , return_tensors="pt" ).pixel_values # verify pixel values assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # verify logits with torch.no_grad(): lowercase__ = model(SCREAMING_SNAKE_CASE_ ) lowercase__ = outputs.logits print("Logits:" , logits[0, :3] ) print("Predicted class:" , model.config.idalabel[logits.argmax(-1 ).item()] ) lowercase__ = timm_model(SCREAMING_SNAKE_CASE_ ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(SCREAMING_SNAKE_CASE_ , outputs.logits , atol=1e-3 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ ) print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) processor.save_pretrained(SCREAMING_SNAKE_CASE_ ) if push_to_hub: print(f'''Pushing model {model_name} and processor to the hub''' ) model.push_to_hub(f'''ybelkada/{model_name}''' ) processor.push_to_hub(f'''ybelkada/{model_name}''' ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""resnetv2_50x1_bitm""", type=str, help="""Name of the BiT timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to push the model to the hub.""", ) lowercase_ = parser.parse_args() convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
37
1
import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from tensorflow.python.eager import context from tensorflow.python.framework import ops from transformers import GradientAccumulator, create_optimizer @require_tf class _snake_case ( unittest.TestCase): def A__ ( self : List[str], __lowercase : str, __lowercase : Tuple, __lowercase : Optional[int] ): self.assertEqual(len(__lowercase ), len(__lowercase ) ) for a, b in zip(__lowercase, __lowercase ): self.assertAlmostEqual(__lowercase, __lowercase, delta=__lowercase ) def A__ ( self : Any ): lowercase__ = GradientAccumulator() accumulator([tf.constant([1.0, 2.0] )] ) accumulator([tf.constant([-2.0, 1.0] )] ) accumulator([tf.constant([-1.0, 2.0] )] ) with self.assertRaises(__lowercase ): accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] ) self.assertEqual(accumulator.step, 3 ) self.assertEqual(len(accumulator.gradients ), 1 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist(), [-2.0, 5.0], tol=1e-2 ) accumulator.reset() self.assertEqual(accumulator.step, 0 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist(), [0.0, 0.0], tol=1e-2 ) def A__ ( self : Dict ): lowercase__ = None ops.enable_eager_execution_internal() lowercase__ = tf.config.list_physical_devices("CPU" ) if len(__lowercase ) == 1: tf.config.set_logical_device_configuration( physical_devices[0], [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] ) lowercase__ = tf.config.list_logical_devices(device_type="CPU" ) lowercase__ = tf.distribute.MirroredStrategy(devices=devices[:2] ) with strategy.scope(): lowercase__ = GradientAccumulator() lowercase__ = tf.Variable([4.0, 3.0] ) lowercase__ , lowercase__ = create_optimizer(5e-5, 10, 5 ) lowercase__ = tf.Variable([0.0, 0.0], trainable=__lowercase ) def accumulate_on_replica(__lowercase : Dict ): accumulator([gradient] ) def apply_on_replica(): optimizer.apply_gradients(list(zip(accumulator.gradients, [variable] ) ) ) @tf.function def accumulate(__lowercase : List[Any], __lowercase : Tuple ): with strategy.scope(): lowercase__ = strategy.experimental_local_results(__lowercase ) local_variables[0].assign(__lowercase ) local_variables[1].assign(__lowercase ) strategy.run(__lowercase, args=(gradient_placeholder,) ) @tf.function def apply_grad(): with strategy.scope(): strategy.run(__lowercase ) def _check_local_values(__lowercase : int, __lowercase : int ): lowercase__ = strategy.experimental_local_results(accumulator._gradients[0] ) self.assertListAlmostEqual(values[0].value(), __lowercase, tol=1e-2 ) self.assertListAlmostEqual(values[1].value(), __lowercase, tol=1e-2 ) accumulate([1.0, 2.0], [-1.0, 1.0] ) accumulate([3.0, -1.0], [-1.0, -1.0] ) accumulate([-2.0, 2.0], [3.0, -2.0] ) self.assertEqual(accumulator.step, 3 ) _check_local_values([2.0, 3.0], [1.0, -2.0] ) apply_grad() self.assertListAlmostEqual(variable.value(), [4.0, 3.0], tol=1e-2 ) accumulator.reset() self.assertEqual(accumulator.step, 0 ) _check_local_values([0.0, 0.0], [0.0, 0.0] )
37
import json from typing import Iterator, List, Union from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers from tokenizers.implementations.base_tokenizer import BaseTokenizer from tokenizers.models import Unigram from tokenizers.processors import TemplateProcessing class _snake_case ( lowercase__): def __init__( self : Optional[Any], __lowercase : str = "▁", __lowercase : bool = True, __lowercase : Union[str, AddedToken] = "<unk>", __lowercase : Union[str, AddedToken] = "</s>", __lowercase : Union[str, AddedToken] = "<pad>", ): lowercase__ = { "pad": {"id": 0, "token": pad_token}, "eos": {"id": 1, "token": eos_token}, "unk": {"id": 2, "token": unk_token}, } lowercase__ = [None] * len(self.special_tokens ) for token_dict in self.special_tokens.values(): lowercase__ = token_dict["token"] lowercase__ = Tokenizer(Unigram() ) lowercase__ = normalizers.Sequence( [ normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(" {2,}" ), " " ), normalizers.Lowercase(), ] ) lowercase__ = pre_tokenizers.Sequence( [ pre_tokenizers.Metaspace(replacement=__lowercase, add_prefix_space=__lowercase ), pre_tokenizers.Digits(individual_digits=__lowercase ), pre_tokenizers.Punctuation(), ] ) lowercase__ = decoders.Metaspace(replacement=__lowercase, add_prefix_space=__lowercase ) lowercase__ = TemplateProcessing( single=F'''$A {self.special_tokens["eos"]["token"]}''', special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])], ) lowercase__ = { "model": "SentencePieceUnigram", "replacement": replacement, "add_prefix_space": add_prefix_space, } super().__init__(__lowercase, __lowercase ) def A__ ( self : Union[str, Any], __lowercase : Union[str, List[str]], __lowercase : int = 8000, __lowercase : bool = True, ): lowercase__ = trainers.UnigramTrainer( vocab_size=__lowercase, special_tokens=self.special_tokens_list, show_progress=__lowercase, ) if isinstance(__lowercase, __lowercase ): lowercase__ = [files] self._tokenizer.train(__lowercase, trainer=__lowercase ) self.add_unk_id() def A__ ( self : List[Any], __lowercase : Union[Iterator[str], Iterator[Iterator[str]]], __lowercase : int = 8000, __lowercase : bool = True, ): lowercase__ = trainers.UnigramTrainer( vocab_size=__lowercase, special_tokens=self.special_tokens_list, show_progress=__lowercase, ) self._tokenizer.train_from_iterator(__lowercase, trainer=__lowercase ) self.add_unk_id() def A__ ( self : str ): lowercase__ = json.loads(self._tokenizer.to_str() ) lowercase__ = self.special_tokens["unk"]["id"] lowercase__ = Tokenizer.from_str(json.dumps(__lowercase ) )
37
1
lowercase_ = tuple[float, float, float] lowercase_ = tuple[float, float, float] def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ = end_pointa[0] - end_pointa[0] lowercase__ = end_pointa[1] - end_pointa[1] lowercase__ = end_pointa[2] - end_pointa[2] return (x, y, z) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ = ab[1] * ac[2] - ab[2] * ac[1] # *i lowercase__ = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j lowercase__ = ab[0] * ac[1] - ab[1] * ac[0] # *k return (x, y, z) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): return tuple(round(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for x in vector ) == (0, 0, 0) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 10 ): lowercase__ = create_vector(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowercase__ = create_vector(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return is_zero_vector(get_ad_vectors_cross(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
37
# Usage: # ./gen-card-facebook-wmt19.py import os from pathlib import Path def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, oder?", } # BLUE scores as follows: # "pair": [fairseq, transformers] lowercase__ = { "ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"], "en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"], "en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"], "de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"], } lowercase__ = f'''{src_lang}-{tgt_lang}''' lowercase__ = f''' --- language: - {src_lang} - {tgt_lang} thumbnail: tags: - translation - wmt19 - facebook license: apache-2.0 datasets: - wmt19 metrics: - bleu --- # FSMT ## Model description This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}. For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616). The abbreviation FSMT stands for FairSeqMachineTranslation All four models are available: * [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru) * [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en) * [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de) * [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en) ## Intended uses & limitations #### How to use ```python from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = "facebook/wmt19-{src_lang}-{tgt_lang}" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) input = "{texts[src_lang]}" input_ids = tokenizer.encode(input, return_tensors="pt") outputs = model.generate(input_ids) decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) print(decoded) # {texts[tgt_lang]} ``` #### Limitations and bias - The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981) ## Training data Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616). ## Eval results pair | fairseq | transformers -------|---------|---------- {pair} | {scores[pair][0]} | {scores[pair][1]} The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support: - model ensemble, therefore the best performing checkpoint was ported (``model4.pt``). - re-ranking The score was calculated using this code: ```bash git clone https://github.com/huggingface/transformers cd transformers export PAIR={pair} export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 export NUM_BEAMS=15 mkdir -p $DATA_DIR sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target echo $PAIR PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS ``` note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`. ## Data Sources - [training, etc.](http://www.statmt.org/wmt19/) - [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561) ### BibTeX entry and citation info ```bibtex @inproceedings{{..., year={{2020}}, title={{Facebook FAIR\'s WMT19 News Translation Task Submission}}, author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}}, booktitle={{Proc. of WMT}}, }} ``` ## TODO - port model ensemble (fairseq uses 4 model checkpoints) ''' os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) lowercase__ = os.path.join(SCREAMING_SNAKE_CASE_ , "README.md" ) print(f'''Generating {path}''' ) with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as f: f.write(SCREAMING_SNAKE_CASE_ ) # make sure we are under the root of the project lowercase_ = Path(__file__).resolve().parent.parent.parent lowercase_ = repo_dir / """model_cards""" for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: lowercase_ , lowercase_ , lowercase_ = model_name.split("""-""") lowercase_ = model_cards_dir / """facebook""" / model_name write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
37
1
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): if length <= 0 or not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): raise ValueError("Length must be a positive integer." ) return [n * (2 * n - 1) for n in range(SCREAMING_SNAKE_CASE_ )] if __name__ == "__main__": print(hexagonal_numbers(length=5)) print(hexagonal_numbers(length=10))
37
import os import unittest from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class _snake_case ( lowercase__ , unittest.TestCase): UpperCamelCase__ : Dict =TransfoXLTokenizer UpperCamelCase__ : List[Any] =False UpperCamelCase__ : List[Any] =False def A__ ( self : Union[str, Any] ): super().setUp() lowercase__ = [ "<unk>", "[CLS]", "[SEP]", "want", "unwanted", "wa", "un", "running", ",", "low", "l", ] lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file, "w", encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) def A__ ( self : Union[str, Any], **__lowercase : Any ): lowercase__ = True return TransfoXLTokenizer.from_pretrained(self.tmpdirname, **__lowercase ) def A__ ( self : Tuple, __lowercase : Optional[int] ): lowercase__ = "<unk> UNwanted , running" lowercase__ = "<unk> unwanted, running" return input_text, output_text def A__ ( self : str ): lowercase__ = TransfoXLTokenizer(vocab_file=self.vocab_file, lower_case=__lowercase ) lowercase__ = tokenizer.tokenize("<unk> UNwanted , running" ) self.assertListEqual(__lowercase, ["<unk>", "unwanted", ",", "running"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ), [0, 4, 8, 7] ) def A__ ( self : Tuple ): lowercase__ = TransfoXLTokenizer(lower_case=__lowercase ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? " ), ["hello", "!", "how", "are", "you", "?"] ) def A__ ( self : Tuple ): lowercase__ = TransfoXLTokenizer(lower_case=__lowercase ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? " ), ["HeLLo", "!", "how", "Are", "yoU", "?"] ) def A__ ( self : str ): lowercase__ = TransfoXLTokenizer(lower_case=__lowercase ) lowercase__ = "Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?" lowercase__ = [ "Hello", "(", "bracket", ")", "and", "side", "@-@", "scrolled", "[", "and", "]", "Henry", "'s", "$", "5", "@,@", "000", "with", "3", "@.@", "34", "m", ".", "What", "'s", "up", "!", "?", ] self.assertListEqual(tokenizer.tokenize(__lowercase ), __lowercase ) self.assertEqual(tokenizer.convert_tokens_to_string(__lowercase ), __lowercase ) def A__ ( self : List[str] ): lowercase__ = self.get_tokenizer() lowercase__ = len(__lowercase ) tokenizer.add_tokens(["new1", "new2"] ) tokenizer.move_added_token("new1", 1 ) # Check that moved token is not copied (duplicate) self.assertEqual(len(__lowercase ), original_len + 2 ) # Check that token is moved to specified id self.assertEqual(tokenizer.encode("new1" ), [1] ) self.assertEqual(tokenizer.decode([1] ), "new1" )
37
1
import warnings from ...utils import is_sklearn_available, requires_backends if is_sklearn_available(): from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef lowercase_ = ( """This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate """ """library. You can have a look at this example script for pointers: """ """https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): warnings.warn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) requires_backends(SCREAMING_SNAKE_CASE_ , "sklearn" ) return (preds == labels).mean() def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): warnings.warn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) requires_backends(SCREAMING_SNAKE_CASE_ , "sklearn" ) lowercase__ = simple_accuracy(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowercase__ = fa_score(y_true=SCREAMING_SNAKE_CASE_ , y_pred=SCREAMING_SNAKE_CASE_ ) return { "acc": acc, "f1": fa, "acc_and_f1": (acc + fa) / 2, } def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): warnings.warn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) requires_backends(SCREAMING_SNAKE_CASE_ , "sklearn" ) lowercase__ = pearsonr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )[0] lowercase__ = spearmanr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )[0] return { "pearson": pearson_corr, "spearmanr": spearman_corr, "corr": (pearson_corr + spearman_corr) / 2, } def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): warnings.warn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) requires_backends(SCREAMING_SNAKE_CASE_ , "sklearn" ) assert len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ ), f'''Predictions and labels have mismatched lengths {len(SCREAMING_SNAKE_CASE_ )} and {len(SCREAMING_SNAKE_CASE_ )}''' if task_name == "cola": return {"mcc": matthews_corrcoef(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )} elif task_name == "sst-2": return {"acc": simple_accuracy(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )} elif task_name == "mrpc": return acc_and_fa(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) elif task_name == "sts-b": return pearson_and_spearman(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) elif task_name == "qqp": return acc_and_fa(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) elif task_name == "mnli": return {"mnli/acc": simple_accuracy(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )} elif task_name == "mnli-mm": return {"mnli-mm/acc": simple_accuracy(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )} elif task_name == "qnli": return {"acc": simple_accuracy(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )} elif task_name == "rte": return {"acc": simple_accuracy(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )} elif task_name == "wnli": return {"acc": simple_accuracy(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )} elif task_name == "hans": return {"acc": simple_accuracy(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )} else: raise KeyError(SCREAMING_SNAKE_CASE_ ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): warnings.warn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) requires_backends(SCREAMING_SNAKE_CASE_ , "sklearn" ) if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ): raise ValueError(f'''Predictions and labels have mismatched lengths {len(SCREAMING_SNAKE_CASE_ )} and {len(SCREAMING_SNAKE_CASE_ )}''' ) if task_name == "xnli": return {"acc": simple_accuracy(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )} else: raise KeyError(SCREAMING_SNAKE_CASE_ )
37
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def __lowerCAmelCase ( ): lowercase__ = HfArgumentParser(SCREAMING_SNAKE_CASE_ ) lowercase__ = parser.parse_args_into_dataclasses()[0] lowercase__ = TensorFlowBenchmark(args=SCREAMING_SNAKE_CASE_ ) try: lowercase__ = parser.parse_args_into_dataclasses()[0] except ValueError as e: lowercase__ = "Arg --no_{0} is no longer used, please use --no-{0} instead." lowercase__ = " ".join(str(SCREAMING_SNAKE_CASE_ ).split(" " )[:-1] ) lowercase__ = "" lowercase__ = eval(str(SCREAMING_SNAKE_CASE_ ).split(" " )[-1] ) lowercase__ = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(SCREAMING_SNAKE_CASE_ ) if len(SCREAMING_SNAKE_CASE_ ) > 0: lowercase__ = full_error_msg + begin_error_msg + str(SCREAMING_SNAKE_CASE_ ) raise ValueError(SCREAMING_SNAKE_CASE_ ) benchmark.run() if __name__ == "__main__": main()
37
1
import importlib import shutil import threading import warnings from typing import List import fsspec import fsspec.asyn from . import compression from .hffilesystem import HfFileSystem lowercase_ = importlib.util.find_spec("""s3fs""") is not None if _has_safs: from .safilesystem import SaFileSystem # noqa: F401 lowercase_ = [ compression.BzaFileSystem, compression.GzipFileSystem, compression.LzaFileSystem, compression.XzFileSystem, compression.ZstdFileSystem, ] # Register custom filesystems for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]: if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class: warnings.warn(F'A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.') fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): if "://" in dataset_path: lowercase__ = dataset_path.split("://" )[1] return dataset_path def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): if fs is not None and fs.protocol != "file": return True else: return False def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ = not is_remote_filesystem(SCREAMING_SNAKE_CASE_ ) if is_local: # LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory shutil.move(fs._strip_protocol(SCREAMING_SNAKE_CASE_ ) , fs._strip_protocol(SCREAMING_SNAKE_CASE_ ) ) else: fs.mv(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , recursive=SCREAMING_SNAKE_CASE_ ) def __lowerCAmelCase ( ): if hasattr(fsspec.asyn , "reset_lock" ): # for future fsspec>2022.05.0 fsspec.asyn.reset_lock() else: lowercase__ = None lowercase__ = None lowercase__ = threading.Lock()
37
import os import re import shutil from argparse import ArgumentParser, Namespace from datasets.commands import BaseDatasetsCLICommand from datasets.utils.logging import get_logger lowercase_ = """<<<<<<< This should probably be modified because it mentions: """ lowercase_ = """======= >>>>>>> """ lowercase_ = [ """TextEncoderConfig""", """ByteTextEncoder""", """SubwordTextEncoder""", """encoder_config""", """maybe_build_from_corpus""", """manual_dir""", ] lowercase_ = [ # (pattern, replacement) # Order is important here for some replacements (r"""tfds\.core""", r"""datasets"""), (r"""tf\.io\.gfile\.GFile""", r"""open"""), (r"""tf\.([\w\d]+)""", r"""datasets.Value('\1')"""), (r"""tfds\.features\.Text\(\)""", r"""datasets.Value('string')"""), (r"""tfds\.features\.Text\(""", r"""datasets.Value('string'),"""), (r"""features\s*=\s*tfds.features.FeaturesDict\(""", r"""features=datasets.Features("""), (r"""tfds\.features\.FeaturesDict\(""", r"""dict("""), (r"""The TensorFlow Datasets Authors""", r"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""), (r"""tfds\.""", r"""datasets."""), (r"""dl_manager\.manual_dir""", r"""self.config.data_dir"""), (r"""self\.builder_config""", r"""self.config"""), ] def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): return ConvertCommand(args.tfds_path , args.datasets_directory ) class _snake_case ( lowercase__): @staticmethod def A__ ( __lowercase : ArgumentParser ): lowercase__ = parser.add_parser( "convert", help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.", ) train_parser.add_argument( "--tfds_path", type=__lowercase, required=__lowercase, help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.", ) train_parser.add_argument( "--datasets_directory", type=__lowercase, required=__lowercase, help="Path to the HuggingFace Datasets folder." ) train_parser.set_defaults(func=__lowercase ) def __init__( self : Tuple, __lowercase : str, __lowercase : str, *__lowercase : Tuple ): lowercase__ = get_logger("datasets-cli/converting" ) lowercase__ = tfds_path lowercase__ = datasets_directory def A__ ( self : Any ): if os.path.isdir(self._tfds_path ): lowercase__ = os.path.abspath(self._tfds_path ) elif os.path.isfile(self._tfds_path ): lowercase__ = os.path.dirname(self._tfds_path ) else: raise ValueError("--tfds_path is neither a directory nor a file. Please check path." ) lowercase__ = os.path.abspath(self._datasets_directory ) self._logger.info(F'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' ) lowercase__ = [] lowercase__ = [] lowercase__ = {} if os.path.isdir(self._tfds_path ): lowercase__ = os.listdir(__lowercase ) else: lowercase__ = [os.path.basename(self._tfds_path )] for f_name in file_names: self._logger.info(F'''Looking at file {f_name}''' ) lowercase__ = os.path.join(__lowercase, __lowercase ) lowercase__ = os.path.join(__lowercase, __lowercase ) if not os.path.isfile(__lowercase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name: self._logger.info("Skipping file" ) continue with open(__lowercase, encoding="utf-8" ) as f: lowercase__ = f.readlines() lowercase__ = [] lowercase__ = False lowercase__ = False lowercase__ = [] for line in lines: lowercase__ = line # Convert imports if "import tensorflow.compat.v2 as tf" in out_line: continue elif "@tfds.core" in out_line: continue elif "builder=self" in out_line: continue elif "import tensorflow_datasets.public_api as tfds" in out_line: lowercase__ = "import datasets\n" elif "import tensorflow" in out_line: # order is important here lowercase__ = "" continue elif "from absl import logging" in out_line: lowercase__ = "from datasets import logging\n" elif "getLogger" in out_line: lowercase__ = out_line.replace("getLogger", "get_logger" ) elif any(expression in out_line for expression in TO_HIGHLIGHT ): lowercase__ = True lowercase__ = list(filter(lambda __lowercase : e in out_line, __lowercase ) ) out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(__lowercase ) + "\n" ) out_lines.append(__lowercase ) out_lines.append(__lowercase ) continue else: for pattern, replacement in TO_CONVERT: lowercase__ = re.sub(__lowercase, __lowercase, __lowercase ) # Take care of saving utilities (to later move them together with main script) if "tensorflow_datasets" in out_line: lowercase__ = re.match(R"from\stensorflow_datasets.*import\s([^\.\r\n]+)", __lowercase ) tfds_imports.extend(imp.strip() for imp in match.group(1 ).split("," ) ) lowercase__ = "from . import " + match.group(1 ) # Check we have not forget anything if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line: raise ValueError(F'''Error converting {out_line.strip()}''' ) if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line: lowercase__ = True out_lines.append(__lowercase ) if is_builder or "wmt" in f_name: # We create a new directory for each dataset lowercase__ = f_name.replace(".py", "" ) lowercase__ = os.path.join(__lowercase, __lowercase ) lowercase__ = os.path.join(__lowercase, __lowercase ) os.makedirs(__lowercase, exist_ok=__lowercase ) self._logger.info(F'''Adding directory {output_dir}''' ) imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} ) else: # Utilities will be moved at the end utils_files.append(__lowercase ) if needs_manual_update: with_manual_update.append(__lowercase ) with open(__lowercase, "w", encoding="utf-8" ) as f: f.writelines(__lowercase ) self._logger.info(F'''Converted in {output_file}''' ) for utils_file in utils_files: try: lowercase__ = os.path.basename(__lowercase ) lowercase__ = imports_to_builder_map[f_name.replace(".py", "" )] self._logger.info(F'''Moving {dest_folder} to {utils_file}''' ) shutil.copy(__lowercase, __lowercase ) except KeyError: self._logger.error(F'''Cannot find destination folder for {utils_file}. Please copy manually.''' ) if with_manual_update: for file_path in with_manual_update: self._logger.warning( F'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
37
1
import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _snake_case : @staticmethod def A__ ( *__lowercase : List[str], **__lowercase : int ): pass @is_pipeline_test @require_vision class _snake_case ( unittest.TestCase): @require_torch def A__ ( self : List[str] ): lowercase__ = pipeline( model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification", ) lowercase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) lowercase__ = image_classifier(__lowercase, candidate_labels=["a", "b", "c"] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(__lowercase ), [ [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}], [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}], ], ) lowercase__ = image_classifier([image] * 5, candidate_labels=["A", "B", "C"], batch_size=2 ) self.assertEqual( nested_simplify(__lowercase ), [ [ {"score": 0.333, "label": ANY(__lowercase )}, {"score": 0.333, "label": ANY(__lowercase )}, {"score": 0.333, "label": ANY(__lowercase )}, ], [ {"score": 0.333, "label": ANY(__lowercase )}, {"score": 0.333, "label": ANY(__lowercase )}, {"score": 0.333, "label": ANY(__lowercase )}, ], [ {"score": 0.333, "label": ANY(__lowercase )}, {"score": 0.333, "label": ANY(__lowercase )}, {"score": 0.333, "label": ANY(__lowercase )}, ], [ {"score": 0.333, "label": ANY(__lowercase )}, {"score": 0.333, "label": ANY(__lowercase )}, {"score": 0.333, "label": ANY(__lowercase )}, ], [ {"score": 0.333, "label": ANY(__lowercase )}, {"score": 0.333, "label": ANY(__lowercase )}, {"score": 0.333, "label": ANY(__lowercase )}, ], ], ) @require_tf def A__ ( self : Any ): lowercase__ = pipeline( model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification", framework="tf" ) lowercase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) lowercase__ = image_classifier(__lowercase, candidate_labels=["a", "b", "c"] ) self.assertEqual( nested_simplify(__lowercase ), [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}], ) lowercase__ = image_classifier([image] * 5, candidate_labels=["A", "B", "C"], batch_size=2 ) self.assertEqual( nested_simplify(__lowercase ), [ [ {"score": 0.333, "label": ANY(__lowercase )}, {"score": 0.333, "label": ANY(__lowercase )}, {"score": 0.333, "label": ANY(__lowercase )}, ], [ {"score": 0.333, "label": ANY(__lowercase )}, {"score": 0.333, "label": ANY(__lowercase )}, {"score": 0.333, "label": ANY(__lowercase )}, ], [ {"score": 0.333, "label": ANY(__lowercase )}, {"score": 0.333, "label": ANY(__lowercase )}, {"score": 0.333, "label": ANY(__lowercase )}, ], [ {"score": 0.333, "label": ANY(__lowercase )}, {"score": 0.333, "label": ANY(__lowercase )}, {"score": 0.333, "label": ANY(__lowercase )}, ], [ {"score": 0.333, "label": ANY(__lowercase )}, {"score": 0.333, "label": ANY(__lowercase )}, {"score": 0.333, "label": ANY(__lowercase )}, ], ], ) @slow @require_torch def A__ ( self : int ): lowercase__ = pipeline( task="zero-shot-image-classification", model="openai/clip-vit-base-patch32", ) # This is an image of 2 cats with remotes and no planes lowercase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) lowercase__ = image_classifier(__lowercase, candidate_labels=["cat", "plane", "remote"] ) self.assertEqual( nested_simplify(__lowercase ), [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ) lowercase__ = image_classifier([image] * 5, candidate_labels=["cat", "plane", "remote"], batch_size=2 ) self.assertEqual( nested_simplify(__lowercase ), [ [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ] * 5, ) @slow @require_tf def A__ ( self : List[Any] ): lowercase__ = pipeline( task="zero-shot-image-classification", model="openai/clip-vit-base-patch32", framework="tf" ) # This is an image of 2 cats with remotes and no planes lowercase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) lowercase__ = image_classifier(__lowercase, candidate_labels=["cat", "plane", "remote"] ) self.assertEqual( nested_simplify(__lowercase ), [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ) lowercase__ = image_classifier([image] * 5, candidate_labels=["cat", "plane", "remote"], batch_size=2 ) self.assertEqual( nested_simplify(__lowercase ), [ [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ] * 5, )
37
import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer lowercase_ = logging.get_logger(__name__) lowercase_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} lowercase_ = { """vocab_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""", }, """merges_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""", }, """tokenizer_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""", }, } lowercase_ = { """allenai/led-base-16384""": 1_6384, } class _snake_case ( lowercase__): UpperCamelCase__ : int =VOCAB_FILES_NAMES UpperCamelCase__ : Any =PRETRAINED_VOCAB_FILES_MAP UpperCamelCase__ : Dict =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase__ : List[Any] =LEDTokenizer UpperCamelCase__ : Tuple =["""input_ids""", """attention_mask"""] def __init__( self : Optional[Any], __lowercase : Optional[Any]=None, __lowercase : Dict=None, __lowercase : Tuple=None, __lowercase : Union[str, Any]="replace", __lowercase : Tuple="<s>", __lowercase : Optional[Any]="</s>", __lowercase : Tuple="</s>", __lowercase : List[str]="<s>", __lowercase : Tuple="<unk>", __lowercase : Dict="<pad>", __lowercase : Dict="<mask>", __lowercase : Any=False, __lowercase : Any=True, **__lowercase : List[Any], ): super().__init__( __lowercase, __lowercase, tokenizer_file=__lowercase, errors=__lowercase, bos_token=__lowercase, eos_token=__lowercase, sep_token=__lowercase, cls_token=__lowercase, unk_token=__lowercase, pad_token=__lowercase, mask_token=__lowercase, add_prefix_space=__lowercase, trim_offsets=__lowercase, **__lowercase, ) lowercase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space", __lowercase ) != add_prefix_space: lowercase__ = getattr(__lowercase, pre_tok_state.pop("type" ) ) lowercase__ = add_prefix_space lowercase__ = pre_tok_class(**__lowercase ) lowercase__ = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` lowercase__ = "post_processor" lowercase__ = getattr(self.backend_tokenizer, __lowercase, __lowercase ) if tokenizer_component_instance: lowercase__ = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: lowercase__ = tuple(state["sep"] ) if "cls" in state: lowercase__ = tuple(state["cls"] ) lowercase__ = False if state.get("add_prefix_space", __lowercase ) != add_prefix_space: lowercase__ = add_prefix_space lowercase__ = True if state.get("trim_offsets", __lowercase ) != trim_offsets: lowercase__ = trim_offsets lowercase__ = True if changes_to_apply: lowercase__ = getattr(__lowercase, state.pop("type" ) ) lowercase__ = component_class(**__lowercase ) setattr(self.backend_tokenizer, __lowercase, __lowercase ) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def A__ ( self : str ): if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def A__ ( self : Optional[int], __lowercase : Dict ): lowercase__ = AddedToken(__lowercase, lstrip=__lowercase, rstrip=__lowercase ) if isinstance(__lowercase, __lowercase ) else value lowercase__ = value def A__ ( self : Any, *__lowercase : List[Any], **__lowercase : Optional[Any] ): lowercase__ = kwargs.get("is_split_into_words", __lowercase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*__lowercase, **__lowercase ) def A__ ( self : int, *__lowercase : Union[str, Any], **__lowercase : List[str] ): lowercase__ = kwargs.get("is_split_into_words", __lowercase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*__lowercase, **__lowercase ) def A__ ( self : Optional[Any], __lowercase : str, __lowercase : Optional[str] = None ): lowercase__ = self._tokenizer.model.save(__lowercase, name=__lowercase ) return tuple(__lowercase ) def A__ ( self : List[str], __lowercase : int, __lowercase : Optional[int]=None ): lowercase__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def A__ ( self : int, __lowercase : List[int], __lowercase : Optional[List[int]] = None ): lowercase__ = [self.sep_token_id] lowercase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def A__ ( self : Union[str, Any], __lowercase : Union[Dict[str, EncodedInput], BatchEncoding], __lowercase : Optional[int] = None, __lowercase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD, __lowercase : Optional[int] = None, __lowercase : Optional[bool] = None, ): lowercase__ = super()._pad( encoded_inputs=__lowercase, max_length=__lowercase, padding_strategy=__lowercase, pad_to_multiple_of=__lowercase, return_attention_mask=__lowercase, ) # Load from model defaults if return_attention_mask is None: lowercase__ = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: lowercase__ = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. lowercase__ = len(encoded_inputs["global_attention_mask"] ) != len(__lowercase ) if needs_to_be_padded: lowercase__ = len(__lowercase ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` lowercase__ = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": lowercase__ = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
37
1
import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class _snake_case ( nn.Module): def __init__( self : Union[str, Any], __lowercase : nn.Module, __lowercase : int ): super().__init__() lowercase__ = module lowercase__ = nn.Sequential( nn.Linear(module.in_features, __lowercase, bias=__lowercase ), nn.Linear(__lowercase, module.out_features, bias=__lowercase ), ) lowercase__ = (2.0 / (5 * min(module.in_features, module.out_features ))) ** 0.5 nn.init.normal_(self.adapter[0].weight, std=__lowercase ) nn.init.zeros_(self.adapter[1].weight ) self.adapter.to(module.weight.device ) def A__ ( self : List[str], __lowercase : Any, *__lowercase : Any, **__lowercase : Dict ): return self.module(__lowercase, *__lowercase, **__lowercase ) + self.adapter(__lowercase ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class _snake_case ( unittest.TestCase): # We keep the constants inside the init function and model loading inside setUp function # We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected) # Therefore here we use only bloom-1b3 to test our module UpperCamelCase__ : int ="""bigscience/bloom-1b7""" # Constant values UpperCamelCase__ : List[str] =2.109_659_552_692_574 UpperCamelCase__ : str ="""Hello my name is""" UpperCamelCase__ : List[Any] =set() EXPECTED_OUTPUTS.add("""Hello my name is John and I am a professional photographer. I""") EXPECTED_OUTPUTS.add("""Hello my name is John.\nI am a friend of your father.\n""") EXPECTED_OUTPUTS.add("""Hello my name is John Doe, I am a student at the University""") UpperCamelCase__ : Optional[Any] =1_0 def A__ ( self : Optional[int] ): # Models and tokenizer lowercase__ = AutoTokenizer.from_pretrained(self.model_name ) class _snake_case ( lowercase__): def A__ ( self : Union[str, Any] ): super().setUp() # Models and tokenizer lowercase__ = AutoModelForCausalLM.from_pretrained( self.model_name, torch_dtype=torch.floataa, device_map="auto" ) lowercase__ = AutoModelForCausalLM.from_pretrained(self.model_name, load_in_abit=__lowercase, device_map="auto" ) def A__ ( self : Union[str, Any] ): del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def A__ ( self : int ): lowercase__ = self.model_abit.config self.assertTrue(hasattr(__lowercase, "quantization_config" ) ) lowercase__ = config.to_dict() lowercase__ = config.to_diff_dict() lowercase__ = config.to_json_string() def A__ ( self : List[str] ): from bitsandbytes.nn import Paramsabit lowercase__ = self.model_fpaa.get_memory_footprint() lowercase__ = self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit, self.EXPECTED_RELATIVE_DIFFERENCE ) lowercase__ = get_some_linear_layer(self.model_abit ) self.assertTrue(linear.weight.__class__ == Paramsabit ) def A__ ( self : Optional[Any] ): from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(__lowercase, torch.nn.Linear ): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta ) def A__ ( self : Optional[int] ): lowercase__ = self.tokenizer(self.input_text, return_tensors="pt" ) lowercase__ = self.model_abit.generate(input_ids=encoded_input["input_ids"].to(0 ), max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=__lowercase ), self.EXPECTED_OUTPUTS ) def A__ ( self : str ): lowercase__ = BitsAndBytesConfig() lowercase__ = True lowercase__ = AutoModelForCausalLM.from_pretrained( self.model_name, quantization_config=__lowercase, device_map="auto" ) lowercase__ = self.tokenizer(self.input_text, return_tensors="pt" ) lowercase__ = model_abit_from_config.generate( input_ids=encoded_input["input_ids"].to(0 ), max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=__lowercase ), self.EXPECTED_OUTPUTS ) def A__ ( self : int ): with self.assertRaises(__lowercase ), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(__lowercase ) def A__ ( self : str ): lowercase__ = BitsAndBytesConfig() with self.assertRaises(__lowercase ): lowercase__ = AutoModelForCausalLM.from_pretrained( self.model_name, quantization_config=__lowercase, load_in_abit=__lowercase, device_map="auto", bnb_abit_quant_type="nf4", ) def A__ ( self : Tuple ): with self.assertRaises(__lowercase ): # Tries with `str` self.model_abit.to("cpu" ) with self.assertRaises(__lowercase ): # Tries with a `dtype`` self.model_abit.to(torch.floataa ) with self.assertRaises(__lowercase ): # Tries with a `device` self.model_abit.to(torch.device("cuda:0" ) ) with self.assertRaises(__lowercase ): # Tries with a `device` self.model_abit.float() with self.assertRaises(__lowercase ): # Tries with a `device` self.model_abit.half() # Test if we did not break anything lowercase__ = self.tokenizer(self.input_text, return_tensors="pt" ) lowercase__ = self.model_fpaa.to(torch.floataa ) lowercase__ = self.model_fpaa.generate(input_ids=encoded_input["input_ids"].to(0 ), max_new_tokens=10 ) # Check this does not throw an error lowercase__ = self.model_fpaa.to("cpu" ) # Check this does not throw an error lowercase__ = self.model_fpaa.half() # Check this does not throw an error lowercase__ = self.model_fpaa.float() def A__ ( self : Any ): lowercase__ = AutoModelForSeqaSeqLM.from_pretrained("t5-small", load_in_abit=__lowercase, device_map="auto" ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class _snake_case ( unittest.TestCase): @classmethod def A__ ( cls : Union[str, Any] ): lowercase__ = "t5-small" lowercase__ = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense lowercase__ = AutoTokenizer.from_pretrained(cls.model_name ) lowercase__ = "Translate in German: Hello, my dog is cute" def A__ ( self : List[str] ): gc.collect() torch.cuda.empty_cache() def A__ ( self : Dict ): from transformers import TaForConditionalGeneration lowercase__ = TaForConditionalGeneration._keep_in_fpaa_modules lowercase__ = None # test with `t5-small` lowercase__ = TaForConditionalGeneration.from_pretrained(self.model_name, load_in_abit=__lowercase, device_map="auto" ) lowercase__ = self.tokenizer(self.input_text, return_tensors="pt" ).to(0 ) lowercase__ = model.generate(**__lowercase ) # test with `flan-t5-small` lowercase__ = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name, load_in_abit=__lowercase, device_map="auto" ) lowercase__ = self.tokenizer(self.input_text, return_tensors="pt" ).to(0 ) lowercase__ = model.generate(**__lowercase ) lowercase__ = modules def A__ ( self : int ): import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` lowercase__ = TaForConditionalGeneration.from_pretrained(self.model_name, load_in_abit=__lowercase, device_map="auto" ) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q, bnb.nn.Linearabit ) ) lowercase__ = self.tokenizer(self.input_text, return_tensors="pt" ).to(0 ) lowercase__ = model.generate(**__lowercase ) # test with `flan-t5-small` lowercase__ = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name, load_in_abit=__lowercase, device_map="auto" ) lowercase__ = self.tokenizer(self.input_text, return_tensors="pt" ).to(0 ) lowercase__ = model.generate(**__lowercase ) class _snake_case ( lowercase__): def A__ ( self : Tuple ): super().setUp() # model_name lowercase__ = "bigscience/bloom-560m" lowercase__ = "t5-small" # Different types of model lowercase__ = AutoModel.from_pretrained(self.model_name, load_in_abit=__lowercase, device_map="auto" ) # Sequence classification model lowercase__ = AutoModelForSequenceClassification.from_pretrained( self.model_name, load_in_abit=__lowercase, device_map="auto" ) # CausalLM model lowercase__ = AutoModelForCausalLM.from_pretrained(self.model_name, load_in_abit=__lowercase, device_map="auto" ) # Seq2seq model lowercase__ = AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name, load_in_abit=__lowercase, device_map="auto" ) def A__ ( self : List[Any] ): del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def A__ ( self : Optional[int] ): from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit ) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter ) class _snake_case ( lowercase__): def A__ ( self : Tuple ): super().setUp() def A__ ( self : List[Any] ): del self.pipe gc.collect() torch.cuda.empty_cache() def A__ ( self : Union[str, Any] ): lowercase__ = pipeline( "text-generation", model=self.model_name, model_kwargs={"device_map": "auto", "load_in_4bit": True, "torch_dtype": torch.floataa}, max_new_tokens=self.MAX_NEW_TOKENS, ) # Real second forward pass lowercase__ = self.pipe(self.input_text ) self.assertIn(pipeline_output[0]["generated_text"], self.EXPECTED_OUTPUTS ) @require_torch_multi_gpu class _snake_case ( lowercase__): def A__ ( self : Any ): super().setUp() def A__ ( self : str ): lowercase__ = AutoModelForCausalLM.from_pretrained( self.model_name, load_in_abit=__lowercase, device_map="balanced" ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values() ), {0, 1} ) # Check that inference pass works on the model lowercase__ = self.tokenizer(self.input_text, return_tensors="pt" ) # Second real batch lowercase__ = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0 ), max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_parallel[0], skip_special_tokens=__lowercase ), self.EXPECTED_OUTPUTS ) class _snake_case ( lowercase__): def A__ ( self : Optional[Any] ): lowercase__ = "facebook/opt-350m" super().setUp() def A__ ( self : Dict ): if version.parse(importlib.metadata.version("bitsandbytes" ) ) < version.parse("0.37.0" ): return # Step 1: freeze all parameters lowercase__ = AutoModelForCausalLM.from_pretrained(self.model_name, load_in_abit=__lowercase ) self.assertEqual(set(model.hf_device_map.values() ), {torch.cuda.current_device()} ) for param in model.parameters(): lowercase__ = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability lowercase__ = param.data.to(torch.floataa ) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(__lowercase ) ): lowercase__ = LoRALayer(module.q_proj, rank=16 ) lowercase__ = LoRALayer(module.k_proj, rank=16 ) lowercase__ = LoRALayer(module.v_proj, rank=16 ) # Step 3: dummy batch lowercase__ = self.tokenizer("Test batch ", return_tensors="pt" ).to(0 ) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): lowercase__ = model.forward(**__lowercase ) out.logits.norm().backward() for module in model.modules(): if isinstance(__lowercase, __lowercase ): self.assertTrue(module.adapter[1].weight.grad is not None ) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 ) elif isinstance(__lowercase, nn.Embedding ): self.assertTrue(module.weight.grad is None ) class _snake_case ( lowercase__): UpperCamelCase__ : Union[str, Any] ="""gpt2-xl""" UpperCamelCase__ : Any =3.3_191_854_854_152_187
37
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def __lowerCAmelCase ( ): lowercase__ = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=SCREAMING_SNAKE_CASE_ ) lowercase__ = parser.add_subparsers(help="accelerate command helpers" ) # Register commands get_config_parser(subparsers=SCREAMING_SNAKE_CASE_ ) env_command_parser(subparsers=SCREAMING_SNAKE_CASE_ ) launch_command_parser(subparsers=SCREAMING_SNAKE_CASE_ ) tpu_command_parser(subparsers=SCREAMING_SNAKE_CASE_ ) test_command_parser(subparsers=SCREAMING_SNAKE_CASE_ ) # Let's go lowercase__ = parser.parse_args() if not hasattr(SCREAMING_SNAKE_CASE_ , "func" ): parser.print_help() exit(1 ) # Run args.func(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": main()
37
1
from __future__ import annotations from collections import namedtuple def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ = namedtuple("result" , "name value" ) if (voltage, current, power).count(0 ) != 1: raise ValueError("Only one argument must be 0" ) elif power < 0: raise ValueError( "Power cannot be negative in any electrical/electronics system" ) elif voltage == 0: return result("voltage" , power / current ) elif current == 0: return result("current" , power / voltage ) elif power == 0: return result("power" , float(round(abs(voltage * current ) , 2 ) ) ) else: raise ValueError("Exactly one argument must be 0" ) if __name__ == "__main__": import doctest doctest.testmod()
37
import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class _snake_case ( unittest.TestCase): def __init__( self : Dict, __lowercase : int, __lowercase : Union[str, Any]=7, __lowercase : Union[str, Any]=3, __lowercase : Any=18, __lowercase : Union[str, Any]=30, __lowercase : Any=400, __lowercase : List[str]=True, __lowercase : Dict=None, __lowercase : List[str]=True, __lowercase : int=False, __lowercase : Union[str, Any]=True, __lowercase : str=True, __lowercase : Optional[int]=[0.5, 0.5, 0.5], __lowercase : List[Any]=[0.5, 0.5, 0.5], ): lowercase__ = parent lowercase__ = batch_size lowercase__ = num_channels lowercase__ = image_size lowercase__ = min_resolution lowercase__ = max_resolution lowercase__ = do_resize lowercase__ = size if size is not None else {"height": 18, "width": 20} lowercase__ = do_thumbnail lowercase__ = do_align_axis lowercase__ = do_pad lowercase__ = do_normalize lowercase__ = image_mean lowercase__ = image_std def A__ ( self : Optional[Any] ): return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class _snake_case ( lowercase__ , unittest.TestCase): UpperCamelCase__ : Optional[int] =DonutImageProcessor if is_vision_available() else None def A__ ( self : str ): lowercase__ = DonutImageProcessingTester(self ) @property def A__ ( self : List[str] ): return self.image_processor_tester.prepare_image_processor_dict() def A__ ( self : Optional[Any] ): lowercase__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowercase, "do_resize" ) ) self.assertTrue(hasattr(__lowercase, "size" ) ) self.assertTrue(hasattr(__lowercase, "do_thumbnail" ) ) self.assertTrue(hasattr(__lowercase, "do_align_long_axis" ) ) self.assertTrue(hasattr(__lowercase, "do_pad" ) ) self.assertTrue(hasattr(__lowercase, "do_normalize" ) ) self.assertTrue(hasattr(__lowercase, "image_mean" ) ) self.assertTrue(hasattr(__lowercase, "image_std" ) ) def A__ ( self : str ): lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size, {"height": 18, "width": 20} ) lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict, size=42 ) self.assertEqual(image_processor.size, {"height": 42, "width": 42} ) # Previous config had dimensions in (width, height) order lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict, size=(42, 84) ) self.assertEqual(image_processor.size, {"height": 84, "width": 42} ) def A__ ( self : List[str] ): pass @is_flaky() def A__ ( self : Dict ): # Initialize image_processing lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase, Image.Image ) # Test not batched input lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) # Test batched lowercase__ = image_processing(__lowercase, return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) @is_flaky() def A__ ( self : Optional[Any] ): # Initialize image_processing lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, numpify=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase, np.ndarray ) # Test not batched input lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) # Test batched lowercase__ = image_processing(__lowercase, return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) @is_flaky() def A__ ( self : Tuple ): # Initialize image_processing lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, torchify=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase, torch.Tensor ) # Test not batched input lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) # Test batched lowercase__ = image_processing(__lowercase, return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), )
37
1
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_realm import RealmTokenizer lowercase_ = logging.get_logger(__name__) lowercase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} lowercase_ = { """vocab_file""": { """google/realm-cc-news-pretrained-embedder""": ( """https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt""" ), """google/realm-cc-news-pretrained-encoder""": ( """https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt""" ), """google/realm-cc-news-pretrained-scorer""": ( """https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt""" ), """google/realm-cc-news-pretrained-openqa""": ( """https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt""" ), """google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""", """google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""", """google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""", """google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""", }, """tokenizer_file""": { """google/realm-cc-news-pretrained-embedder""": ( """https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont""" ), """google/realm-cc-news-pretrained-encoder""": ( """https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json""" ), """google/realm-cc-news-pretrained-scorer""": ( """https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json""" ), """google/realm-cc-news-pretrained-openqa""": ( """https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json""" ), """google/realm-orqa-nq-openqa""": ( """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json""" ), """google/realm-orqa-nq-reader""": ( """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json""" ), """google/realm-orqa-wq-openqa""": ( """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json""" ), """google/realm-orqa-wq-reader""": ( """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json""" ), }, } lowercase_ = { """google/realm-cc-news-pretrained-embedder""": 512, """google/realm-cc-news-pretrained-encoder""": 512, """google/realm-cc-news-pretrained-scorer""": 512, """google/realm-cc-news-pretrained-openqa""": 512, """google/realm-orqa-nq-openqa""": 512, """google/realm-orqa-nq-reader""": 512, """google/realm-orqa-wq-openqa""": 512, """google/realm-orqa-wq-reader""": 512, } lowercase_ = { """google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True}, """google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True}, """google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True}, """google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True}, """google/realm-orqa-nq-openqa""": {"""do_lower_case""": True}, """google/realm-orqa-nq-reader""": {"""do_lower_case""": True}, """google/realm-orqa-wq-openqa""": {"""do_lower_case""": True}, """google/realm-orqa-wq-reader""": {"""do_lower_case""": True}, } class _snake_case ( lowercase__): UpperCamelCase__ : Optional[Any] =VOCAB_FILES_NAMES UpperCamelCase__ : List[Any] =PRETRAINED_VOCAB_FILES_MAP UpperCamelCase__ : List[str] =PRETRAINED_INIT_CONFIGURATION UpperCamelCase__ : List[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase__ : Any =RealmTokenizer def __init__( self : List[Any], __lowercase : Union[str, Any]=None, __lowercase : Dict=None, __lowercase : List[Any]=True, __lowercase : List[str]="[UNK]", __lowercase : int="[SEP]", __lowercase : int="[PAD]", __lowercase : List[str]="[CLS]", __lowercase : Dict="[MASK]", __lowercase : List[Any]=True, __lowercase : Union[str, Any]=None, **__lowercase : Tuple, ): super().__init__( __lowercase, tokenizer_file=__lowercase, do_lower_case=__lowercase, unk_token=__lowercase, sep_token=__lowercase, pad_token=__lowercase, cls_token=__lowercase, mask_token=__lowercase, tokenize_chinese_chars=__lowercase, strip_accents=__lowercase, **__lowercase, ) lowercase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase", __lowercase ) != do_lower_case or normalizer_state.get("strip_accents", __lowercase ) != strip_accents or normalizer_state.get("handle_chinese_chars", __lowercase ) != tokenize_chinese_chars ): lowercase__ = getattr(__lowercase, normalizer_state.pop("type" ) ) lowercase__ = do_lower_case lowercase__ = strip_accents lowercase__ = tokenize_chinese_chars lowercase__ = normalizer_class(**__lowercase ) lowercase__ = do_lower_case def A__ ( self : Union[str, Any], __lowercase : Dict, **__lowercase : Union[str, Any] ): lowercase__ = PaddingStrategy.MAX_LENGTH lowercase__ = text lowercase__ = kwargs.pop("text_pair", __lowercase ) lowercase__ = kwargs.pop("return_tensors", __lowercase ) lowercase__ = { "input_ids": [], "attention_mask": [], "token_type_ids": [], } for idx, candidate_text in enumerate(__lowercase ): if batch_text_pair is not None: lowercase__ = batch_text_pair[idx] else: lowercase__ = None lowercase__ = super().__call__(__lowercase, __lowercase, return_tensors=__lowercase, **__lowercase ) lowercase__ = encoded_candidates.get("input_ids" ) lowercase__ = encoded_candidates.get("attention_mask" ) lowercase__ = encoded_candidates.get("token_type_ids" ) if encoded_input_ids is not None: output_data["input_ids"].append(__lowercase ) if encoded_attention_mask is not None: output_data["attention_mask"].append(__lowercase ) if encoded_token_type_ids is not None: output_data["token_type_ids"].append(__lowercase ) lowercase__ = {key: item for key, item in output_data.items() if len(__lowercase ) != 0} return BatchEncoding(__lowercase, tensor_type=__lowercase ) def A__ ( self : List[Any], __lowercase : Any, __lowercase : List[str]=None ): lowercase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def A__ ( self : List[str], __lowercase : List[int], __lowercase : Optional[List[int]] = None ): lowercase__ = [self.sep_token_id] lowercase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def A__ ( self : Any, __lowercase : str, __lowercase : Optional[str] = None ): lowercase__ = self._tokenizer.model.save(__lowercase, name=__lowercase ) return tuple(__lowercase )
37
import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class _snake_case ( lowercase__): def A__ ( self : Optional[Any], __lowercase : str ): with open(__lowercase, encoding="utf-8" ) as input_file: lowercase__ = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" ) lowercase__ = input_file.read() lowercase__ = regexp.search(__lowercase ) return match def A__ ( self : str, __lowercase : str ): with open(__lowercase, encoding="utf-8" ) as input_file: lowercase__ = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()", re.DOTALL ) lowercase__ = input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` lowercase__ = regexp.finditer(__lowercase ) lowercase__ = [match for match in matches if match is not None and match.group(1 ) is not None] return matches[0] if matches else None def A__ ( self : Union[str, Any] ): lowercase__ = Path("./datasets" ) lowercase__ = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_encoding_on_file_open(str(__lowercase ) ): raise AssertionError(F'''open(...) must use utf-8 encoding in {dataset}''' ) def A__ ( self : Union[str, Any] ): lowercase__ = Path("./datasets" ) lowercase__ = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_print_statements(str(__lowercase ) ): raise AssertionError(F'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
37
1
from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass class _snake_case ( lowercase__): UpperCamelCase__ : torch.FloatTensor UpperCamelCase__ : torch.FloatTensor UpperCamelCase__ : Optional[torch.FloatTensor] =None class _snake_case ( lowercase__ , lowercase__): UpperCamelCase__ : Tuple =2 @register_to_config def __init__( self : List[str], __lowercase : float = 0.02, __lowercase : float = 100, __lowercase : float = 1.007, __lowercase : float = 80, __lowercase : float = 0.05, __lowercase : float = 50, ): # standard deviation of the initial noise distribution lowercase__ = sigma_max # setable values lowercase__ = None lowercase__ = None lowercase__ = None # sigma(t_i) def A__ ( self : int, __lowercase : torch.FloatTensor, __lowercase : Optional[int] = None ): return sample def A__ ( self : Any, __lowercase : int, __lowercase : Union[str, torch.device] = None ): lowercase__ = num_inference_steps lowercase__ = np.arange(0, self.num_inference_steps )[::-1].copy() lowercase__ = torch.from_numpy(__lowercase ).to(__lowercase ) lowercase__ = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in self.timesteps ] lowercase__ = torch.tensor(__lowercase, dtype=torch.floataa, device=__lowercase ) def A__ ( self : Optional[Any], __lowercase : torch.FloatTensor, __lowercase : float, __lowercase : Optional[torch.Generator] = None ): if self.config.s_min <= sigma <= self.config.s_max: lowercase__ = min(self.config.s_churn / self.num_inference_steps, 2**0.5 - 1 ) else: lowercase__ = 0 # sample eps ~ N(0, S_noise^2 * I) lowercase__ = self.config.s_noise * randn_tensor(sample.shape, generator=__lowercase ).to(sample.device ) lowercase__ = sigma + gamma * sigma lowercase__ = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def A__ ( self : Dict, __lowercase : torch.FloatTensor, __lowercase : float, __lowercase : float, __lowercase : torch.FloatTensor, __lowercase : bool = True, ): lowercase__ = sample_hat + sigma_hat * model_output lowercase__ = (sample_hat - pred_original_sample) / sigma_hat lowercase__ = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=__lowercase, derivative=__lowercase, pred_original_sample=__lowercase ) def A__ ( self : Tuple, __lowercase : torch.FloatTensor, __lowercase : float, __lowercase : float, __lowercase : torch.FloatTensor, __lowercase : torch.FloatTensor, __lowercase : torch.FloatTensor, __lowercase : bool = True, ): lowercase__ = sample_prev + sigma_prev * model_output lowercase__ = (sample_prev - pred_original_sample) / sigma_prev lowercase__ = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=__lowercase, derivative=__lowercase, pred_original_sample=__lowercase ) def A__ ( self : Dict, __lowercase : Any, __lowercase : List[str], __lowercase : Union[str, Any] ): raise NotImplementedError()
37
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase_ = { """configuration_xmod""": [ """XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XmodConfig""", """XmodOnnxConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """XMOD_PRETRAINED_MODEL_ARCHIVE_LIST""", """XmodForCausalLM""", """XmodForMaskedLM""", """XmodForMultipleChoice""", """XmodForQuestionAnswering""", """XmodForSequenceClassification""", """XmodForTokenClassification""", """XmodModel""", """XmodPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
37
1
from typing import Any class _snake_case : def __init__( self : Union[str, Any], __lowercase : Any ): lowercase__ = data lowercase__ = None def __repr__( self : Dict ): return F'''Node({self.data})''' class _snake_case : def __init__( self : Optional[Any] ): lowercase__ = None def __iter__( self : int ): lowercase__ = self.head while node: yield node.data lowercase__ = node.next def __len__( self : Optional[int] ): return sum(1 for _ in self ) def __repr__( self : Optional[Any] ): return "->".join([str(__lowercase ) for item in self] ) def __getitem__( self : Dict, __lowercase : int ): if not 0 <= index < len(self ): raise ValueError("list index out of range." ) for i, node in enumerate(self ): if i == index: return node return None def __setitem__( self : str, __lowercase : int, __lowercase : Any ): if not 0 <= index < len(self ): raise ValueError("list index out of range." ) lowercase__ = self.head for _ in range(__lowercase ): lowercase__ = current.next lowercase__ = data def A__ ( self : int, __lowercase : Any ): self.insert_nth(len(self ), __lowercase ) def A__ ( self : List[str], __lowercase : Any ): self.insert_nth(0, __lowercase ) def A__ ( self : List[Any], __lowercase : int, __lowercase : Any ): if not 0 <= index <= len(self ): raise IndexError("list index out of range" ) lowercase__ = Node(__lowercase ) if self.head is None: lowercase__ = new_node elif index == 0: lowercase__ = self.head # link new_node to head lowercase__ = new_node else: lowercase__ = self.head for _ in range(index - 1 ): lowercase__ = temp.next lowercase__ = temp.next lowercase__ = new_node def A__ ( self : Optional[int] ): # print every node data print(self ) def A__ ( self : Tuple ): return self.delete_nth(0 ) def A__ ( self : Dict ): # delete from tail return self.delete_nth(len(self ) - 1 ) def A__ ( self : int, __lowercase : int = 0 ): if not 0 <= index <= len(self ) - 1: # test if index is valid raise IndexError("List index out of range." ) lowercase__ = self.head # default first node if index == 0: lowercase__ = self.head.next else: lowercase__ = self.head for _ in range(index - 1 ): lowercase__ = temp.next lowercase__ = temp.next lowercase__ = temp.next.next return delete_node.data def A__ ( self : Optional[Any] ): return self.head is None def A__ ( self : List[Any] ): lowercase__ = None lowercase__ = self.head while current: # Store the current node's next node. lowercase__ = current.next # Make the current node's next point backwards lowercase__ = prev # Make the previous node be the current node lowercase__ = current # Make the current node the next node (to progress iteration) lowercase__ = next_node # Return prev in order to put the head at the end lowercase__ = prev def __lowerCAmelCase ( ): lowercase__ = LinkedList() assert linked_list.is_empty() is True assert str(SCREAMING_SNAKE_CASE_ ) == "" try: linked_list.delete_head() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. for i in range(10 ): assert len(SCREAMING_SNAKE_CASE_ ) == i linked_list.insert_nth(SCREAMING_SNAKE_CASE_ , i + 1 ) assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(1 , 11 ) ) linked_list.insert_head(0 ) linked_list.insert_tail(11 ) assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(0 , 12 ) ) assert linked_list.delete_head() == 0 assert linked_list.delete_nth(9 ) == 10 assert linked_list.delete_tail() == 11 assert len(SCREAMING_SNAKE_CASE_ ) == 9 assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(1 , 10 ) ) assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True for i in range(0 , 9 ): lowercase__ = -i assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True linked_list.reverse() assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(-8 , 1 ) ) def __lowerCAmelCase ( ): lowercase__ = [ -9, 100, Node(7734_5112 ), "dlrow olleH", 7, 5555, 0, -192.5_5555, "Hello, world!", 77.9, Node(10 ), None, None, 12.20, ] lowercase__ = LinkedList() for i in test_input: linked_list.insert_tail(SCREAMING_SNAKE_CASE_ ) # Check if it's empty or not assert linked_list.is_empty() is False assert ( str(SCREAMING_SNAKE_CASE_ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->" "-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the head lowercase__ = linked_list.delete_head() assert result == -9 assert ( str(SCREAMING_SNAKE_CASE_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the tail lowercase__ = linked_list.delete_tail() assert result == 12.2 assert ( str(SCREAMING_SNAKE_CASE_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None" ) # Delete a node in specific location in linked list lowercase__ = linked_list.delete_nth(10 ) assert result is None assert ( str(SCREAMING_SNAKE_CASE_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None" ) # Add a Node instance to its head linked_list.insert_head(Node("Hello again, world!" ) ) assert ( str(SCREAMING_SNAKE_CASE_ ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None" ) # Add None to its tail linked_list.insert_tail(SCREAMING_SNAKE_CASE_ ) assert ( str(SCREAMING_SNAKE_CASE_ ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None" ) # Reverse the linked list linked_list.reverse() assert ( str(SCREAMING_SNAKE_CASE_ ) == "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->" "7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)" ) def __lowerCAmelCase ( ): from doctest import testmod testmod() lowercase__ = LinkedList() linked_list.insert_head(input("Inserting 1st at head " ).strip() ) linked_list.insert_head(input("Inserting 2nd at head " ).strip() ) print("\nPrint list:" ) linked_list.print_list() linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() ) linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() ) print("\nPrint list:" ) linked_list.print_list() print("\nDelete head" ) linked_list.delete_head() print("Delete tail" ) linked_list.delete_tail() print("\nPrint list:" ) linked_list.print_list() print("\nReverse linked list" ) linked_list.reverse() print("\nPrint list:" ) linked_list.print_list() print("\nString representation of linked list:" ) print(SCREAMING_SNAKE_CASE_ ) print("\nReading/changing Node data using indexing:" ) print(f'''Element at Position 1: {linked_list[1]}''' ) lowercase__ = input("Enter New Value: " ).strip() print("New list:" ) print(SCREAMING_SNAKE_CASE_ ) print(f'''length of linked_list is : {len(SCREAMING_SNAKE_CASE_ )}''' ) if __name__ == "__main__": main()
37
import unittest import numpy as np import requests from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: lowercase_ = False if is_vision_available(): from PIL import Image from transformers import PixaStructImageProcessor class _snake_case ( unittest.TestCase): def __init__( self : List[Any], __lowercase : int, __lowercase : Optional[int]=7, __lowercase : List[str]=3, __lowercase : Tuple=18, __lowercase : List[Any]=30, __lowercase : Tuple=400, __lowercase : Any=None, __lowercase : Optional[int]=True, __lowercase : List[str]=True, __lowercase : Union[str, Any]=None, ): lowercase__ = size if size is not None else {"height": 20, "width": 20} lowercase__ = parent lowercase__ = batch_size lowercase__ = num_channels lowercase__ = image_size lowercase__ = min_resolution lowercase__ = max_resolution lowercase__ = size lowercase__ = do_normalize lowercase__ = do_convert_rgb lowercase__ = [512, 1024, 2048, 4096] lowercase__ = patch_size if patch_size is not None else {"height": 16, "width": 16} def A__ ( self : List[str] ): return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb} def A__ ( self : Any ): lowercase__ = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg" lowercase__ = Image.open(requests.get(__lowercase, stream=__lowercase ).raw ).convert("RGB" ) return raw_image @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , ) @require_torch @require_vision class _snake_case ( lowercase__ , unittest.TestCase): UpperCamelCase__ : Any =PixaStructImageProcessor if is_vision_available() else None def A__ ( self : Any ): lowercase__ = PixaStructImageProcessingTester(self ) @property def A__ ( self : Union[str, Any] ): return self.image_processor_tester.prepare_image_processor_dict() def A__ ( self : Optional[Any] ): lowercase__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowercase, "do_normalize" ) ) self.assertTrue(hasattr(__lowercase, "do_convert_rgb" ) ) def A__ ( self : Optional[int] ): lowercase__ = self.image_processor_tester.prepare_dummy_image() lowercase__ = self.image_processing_class(**self.image_processor_dict ) lowercase__ = 2048 lowercase__ = image_processor(__lowercase, return_tensors="pt", max_patches=__lowercase ) self.assertTrue(torch.allclose(inputs.flattened_patches.mean(), torch.tensor(0.0606 ), atol=1e-3, rtol=1e-3 ) ) def A__ ( self : Union[str, Any] ): # Initialize image_processor lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase, Image.Image ) # Test not batched input lowercase__ = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input lowercase__ = image_processor( image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches self.assertEqual( encoded_images.shape, (1, max_patch, expected_hidden_dim), ) # Test batched lowercase__ = image_processor( __lowercase, return_tensors="pt", max_patches=__lowercase ).flattened_patches self.assertEqual( encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), ) def A__ ( self : int ): # Initialize image_processor lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase, Image.Image ) # Test not batched input lowercase__ = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 lowercase__ = True for max_patch in self.image_processor_tester.max_patches: # Test not batched input with self.assertRaises(__lowercase ): lowercase__ = image_processor( image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches lowercase__ = "Hello" lowercase__ = image_processor( image_inputs[0], return_tensors="pt", max_patches=__lowercase, header_text=__lowercase ).flattened_patches self.assertEqual( encoded_images.shape, (1, max_patch, expected_hidden_dim), ) # Test batched lowercase__ = image_processor( __lowercase, return_tensors="pt", max_patches=__lowercase, header_text=__lowercase ).flattened_patches self.assertEqual( encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), ) def A__ ( self : Tuple ): # Initialize image_processor lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, numpify=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase, np.ndarray ) lowercase__ = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input lowercase__ = image_processor( image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches self.assertEqual( encoded_images.shape, (1, max_patch, expected_hidden_dim), ) # Test batched lowercase__ = image_processor( __lowercase, return_tensors="pt", max_patches=__lowercase ).flattened_patches self.assertEqual( encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), ) def A__ ( self : Any ): # Initialize image_processor lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, torchify=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase, torch.Tensor ) # Test not batched input lowercase__ = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input lowercase__ = image_processor( image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches self.assertEqual( encoded_images.shape, (1, max_patch, expected_hidden_dim), ) # Test batched lowercase__ = image_processor( __lowercase, return_tensors="pt", max_patches=__lowercase ).flattened_patches self.assertEqual( encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), ) @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , ) @require_torch @require_vision class _snake_case ( lowercase__ , unittest.TestCase): UpperCamelCase__ : Optional[int] =PixaStructImageProcessor if is_vision_available() else None def A__ ( self : Optional[int] ): lowercase__ = PixaStructImageProcessingTester(self, num_channels=4 ) lowercase__ = 3 @property def A__ ( self : Union[str, Any] ): return self.image_processor_tester.prepare_image_processor_dict() def A__ ( self : Dict ): lowercase__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowercase, "do_normalize" ) ) self.assertTrue(hasattr(__lowercase, "do_convert_rgb" ) ) def A__ ( self : Union[str, Any] ): # Initialize image_processor lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase, Image.Image ) # Test not batched input lowercase__ = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * (self.image_processor_tester.num_channels - 1) ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input lowercase__ = image_processor( image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches self.assertEqual( encoded_images.shape, (1, max_patch, expected_hidden_dim), ) # Test batched lowercase__ = image_processor( __lowercase, return_tensors="pt", max_patches=__lowercase ).flattened_patches self.assertEqual( encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
37
1
def __lowerCAmelCase ( ): lowercase__ = [] lowercase__ = 1 while len(SCREAMING_SNAKE_CASE_ ) < 1e6: constant.append(str(SCREAMING_SNAKE_CASE_ ) ) i += 1 lowercase__ = "".join(SCREAMING_SNAKE_CASE_ ) return ( int(constant[0] ) * int(constant[9] ) * int(constant[99] ) * int(constant[999] ) * int(constant[9999] ) * int(constant[9_9999] ) * int(constant[99_9999] ) ) if __name__ == "__main__": print(solution())
37
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ , lowercase__ = len(SCREAMING_SNAKE_CASE_ ), len(grid[0] ) if ( min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) < 0 or row == row_length or col == col_length or (row, col) in visit or grid[row][col] == 1 ): return 0 if row == row_length - 1 and col == col_length - 1: return 1 visit.add((row, col) ) lowercase__ = 0 count += depth_first_search(SCREAMING_SNAKE_CASE_ , row + 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) count += depth_first_search(SCREAMING_SNAKE_CASE_ , row - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) count += depth_first_search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , col + 1 , SCREAMING_SNAKE_CASE_ ) count += depth_first_search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , col - 1 , SCREAMING_SNAKE_CASE_ ) visit.remove((row, col) ) return count if __name__ == "__main__": import doctest doctest.testmod()
37
1
from sklearn.metrics import mean_squared_error import datasets lowercase_ = """\ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } """ lowercase_ = """\ Mean Squared Error(MSE) is the average of the square of difference between the predicted and actual values. """ lowercase_ = """ Args: predictions: array-like of shape (n_samples,) or (n_samples, n_outputs) Estimated target values. references: array-like of shape (n_samples,) or (n_samples, n_outputs) Ground truth (correct) target values. sample_weight: array-like of shape (n_samples,), default=None Sample weights. multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\" Defines aggregating of multiple output values. Array-like value defines weights used to average errors. \"raw_values\" : Returns a full set of errors in case of multioutput input. \"uniform_average\" : Errors of all outputs are averaged with uniform weight. squared : bool, default=True If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value. Returns: mse : mean squared error. Examples: >>> mse_metric = datasets.load_metric(\"mse\") >>> predictions = [2.5, 0.0, 2, 8] >>> references = [3, -0.5, 2, 7] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {'mse': 0.375} >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False) >>> print(rmse_result) {'mse': 0.6123724356957945} If you're using multi-dimensional lists, then set the config as follows : >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\") >>> predictions = [[0.5, 1], [-1, 1], [7, -6]] >>> references = [[0, 2], [-1, 2], [8, -5]] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {'mse': 0.7083333333333334} >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values') >>> print(results) # doctest: +NORMALIZE_WHITESPACE {'mse': array([0.41666667, 1. ])} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class _snake_case ( datasets.Metric): def A__ ( self : Dict ): return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(self._get_feature_types() ), reference_urls=[ "https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html" ], ) def A__ ( self : List[Any] ): if self.config_name == "multilist": return { "predictions": datasets.Sequence(datasets.Value("float" ) ), "references": datasets.Sequence(datasets.Value("float" ) ), } else: return { "predictions": datasets.Value("float" ), "references": datasets.Value("float" ), } def A__ ( self : Tuple, __lowercase : Union[str, Any], __lowercase : Optional[Any], __lowercase : Dict=None, __lowercase : Any="uniform_average", __lowercase : Tuple=True ): lowercase__ = mean_squared_error( __lowercase, __lowercase, sample_weight=__lowercase, multioutput=__lowercase, squared=__lowercase ) return {"mse": mse}
37
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): lowercase__ = 0 for ch in input_str: lowercase__ = ord(SCREAMING_SNAKE_CASE_ ) lowercase__ = pow(2 , SCREAMING_SNAKE_CASE_ ) # If we already turned on bit for current character's unicode if bitmap >> ch_unicode & 1 == 1: return False bitmap |= ch_bit_index_on return True if __name__ == "__main__": import doctest doctest.testmod()
37
1
from datasets.utils.patching import _PatchedModuleObj, patch_submodule from . import _test_patching def __lowerCAmelCase ( ): import os as original_os from os import path as original_path from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join lowercase__ = "__test_patch_submodule_mock__" with patch_submodule(_test_patching , "os.path.join" , SCREAMING_SNAKE_CASE_ ): # Every way to access os.path.join must be patched, and the rest must stay untouched # check os.path.join assert isinstance(_test_patching.os , _PatchedModuleObj ) assert isinstance(_test_patching.os.path , _PatchedModuleObj ) assert _test_patching.os.path.join is mock # check path.join assert isinstance(_test_patching.path , _PatchedModuleObj ) assert _test_patching.path.join is mock # check join assert _test_patching.join is mock # check that the other attributes are untouched assert _test_patching.os.rename is original_rename assert _test_patching.path.dirname is original_dirname assert _test_patching.os.path.dirname is original_dirname # Even renamed modules or objects must be patched # check renamed_os.path.join assert isinstance(_test_patching.renamed_os , _PatchedModuleObj ) assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj ) assert _test_patching.renamed_os.path.join is mock # check renamed_path.join assert isinstance(_test_patching.renamed_path , _PatchedModuleObj ) assert _test_patching.renamed_path.join is mock # check renamed_join assert _test_patching.renamed_join is mock # check that the other attributes are untouched assert _test_patching.renamed_os.rename is original_rename assert _test_patching.renamed_path.dirname is original_dirname assert _test_patching.renamed_os.path.dirname is original_dirname # check that everthing is back to normal when the patch is over assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join def __lowerCAmelCase ( ): assert _test_patching.open is open lowercase__ = "__test_patch_submodule_builtin_mock__" # _test_patching has "open" in its globals assert _test_patching.open is open with patch_submodule(_test_patching , "open" , SCREAMING_SNAKE_CASE_ ): assert _test_patching.open is mock # check that everthing is back to normal when the patch is over assert _test_patching.open is open def __lowerCAmelCase ( ): # pandas.read_csv is not present in _test_patching lowercase__ = "__test_patch_submodule_missing_mock__" with patch_submodule(_test_patching , "pandas.read_csv" , SCREAMING_SNAKE_CASE_ ): pass def __lowerCAmelCase ( ): # builtin should always be mocked even if they're not in the globals # in case they're loaded at one point lowercase__ = "__test_patch_submodule_missing_builtin_mock__" # _test_patching doesn't have "len" in its globals assert getattr(_test_patching , "len" , SCREAMING_SNAKE_CASE_ ) is None with patch_submodule(_test_patching , "len" , SCREAMING_SNAKE_CASE_ ): assert _test_patching.len is mock assert _test_patching.len is len def __lowerCAmelCase ( ): lowercase__ = "__test_patch_submodule_start_and_stop_mock__" lowercase__ = patch_submodule(_test_patching , "open" , SCREAMING_SNAKE_CASE_ ) assert _test_patching.open is open patch.start() assert _test_patching.open is mock patch.stop() assert _test_patching.open is open def __lowerCAmelCase ( ): from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join lowercase__ = "__test_patch_submodule_successive_join__" lowercase__ = "__test_patch_submodule_successive_dirname__" lowercase__ = "__test_patch_submodule_successive_rename__" assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename with patch_submodule(_test_patching , "os.path.join" , SCREAMING_SNAKE_CASE_ ): with patch_submodule(_test_patching , "os.rename" , SCREAMING_SNAKE_CASE_ ): with patch_submodule(_test_patching , "os.path.dirname" , SCREAMING_SNAKE_CASE_ ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename # try another order with patch_submodule(_test_patching , "os.rename" , SCREAMING_SNAKE_CASE_ ): with patch_submodule(_test_patching , "os.path.join" , SCREAMING_SNAKE_CASE_ ): with patch_submodule(_test_patching , "os.path.dirname" , SCREAMING_SNAKE_CASE_ ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename def __lowerCAmelCase ( ): lowercase__ = "__test_patch_submodule_doesnt_exist_mock__" with patch_submodule(_test_patching , "__module_that_doesn_exist__.__attribute_that_doesn_exist__" , SCREAMING_SNAKE_CASE_ ): pass with patch_submodule(_test_patching , "os.__attribute_that_doesn_exist__" , SCREAMING_SNAKE_CASE_ ): pass
37
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): lowercase__ = len(SCREAMING_SNAKE_CASE_ ) for i in range(SCREAMING_SNAKE_CASE_ ): for j in range(i + 1 , SCREAMING_SNAKE_CASE_ ): if numbers[j] < numbers[i]: lowercase__ , lowercase__ = numbers[j], numbers[i] return numbers if __name__ == "__main__": lowercase_ = input("""Enter numbers separated by a comma:\n""").strip() lowercase_ = [int(item) for item in user_input.split(""",""")] print(exchange_sort(unsorted))
37
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { """facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/config.json""", # See all XGLM models at https://huggingface.co/models?filter=xglm } class _snake_case ( lowercase__): UpperCamelCase__ : List[str] ="""xglm""" UpperCamelCase__ : Optional[Any] =["""past_key_values"""] UpperCamelCase__ : int ={ """num_attention_heads""": """attention_heads""", """hidden_size""": """d_model""", """num_hidden_layers""": """num_layers""", } def __init__( self : str, __lowercase : Any=25_6008, __lowercase : List[str]=2048, __lowercase : List[Any]=1024, __lowercase : Union[str, Any]=4096, __lowercase : Any=24, __lowercase : List[Any]=16, __lowercase : Tuple="gelu", __lowercase : Tuple=0.1, __lowercase : List[Any]=0.1, __lowercase : List[Any]=0.0, __lowercase : Optional[Any]=0.0, __lowercase : List[Any]=0.02, __lowercase : Optional[int]=True, __lowercase : Tuple=True, __lowercase : int=2, __lowercase : Dict=1, __lowercase : Any=0, __lowercase : List[Any]=2, **__lowercase : Optional[Any], ): lowercase__ = vocab_size lowercase__ = max_position_embeddings lowercase__ = d_model lowercase__ = ffn_dim lowercase__ = num_layers lowercase__ = attention_heads lowercase__ = activation_function lowercase__ = dropout lowercase__ = attention_dropout lowercase__ = activation_dropout lowercase__ = layerdrop lowercase__ = init_std lowercase__ = scale_embedding # scale factor will be sqrt(d_model) if True lowercase__ = use_cache super().__init__( pad_token_id=__lowercase, bos_token_id=__lowercase, eos_token_id=__lowercase, decoder_start_token_id=__lowercase, **__lowercase, )
37
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): if upper_limit < 0: raise ValueError("Limit for the Catalan sequence must be ≥ 0" ) lowercase__ = [0] * (upper_limit + 1) # Base case: C(0) = C(1) = 1 lowercase__ = 1 if upper_limit > 0: lowercase__ = 1 # Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i for i in range(2 , upper_limit + 1 ): for j in range(SCREAMING_SNAKE_CASE_ ): catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1] return catalan_list if __name__ == "__main__": print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""") print("""\n*** Enter -1 at any time to quit ***""") print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""") try: while True: lowercase_ = int(input().strip()) if N < 0: print("""\n********* Goodbye!! ************""") break else: print(F'The Catalan numbers from 0 through {N} are:') print(catalan_numbers(N)) print("""Try another upper limit for the sequence: """, end="""""") except (NameError, ValueError): print("""\n********* Invalid input, goodbye! ************\n""") import doctest doctest.testmod()
37
1
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): if a < 0 or b < 0: raise ValueError("the value of both inputs must be positive" ) lowercase__ = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b" lowercase__ = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b" lowercase__ = max(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) ) return "0b" + "".join( str(int(char_a != char_b ) ) for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE_ ) , b_binary.zfill(SCREAMING_SNAKE_CASE_ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
37
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowercase_ = { """configuration_roberta_prelayernorm""": [ """ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaPreLayerNormConfig""", """RobertaPreLayerNormOnnxConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""", """RobertaPreLayerNormForCausalLM""", """RobertaPreLayerNormForMaskedLM""", """RobertaPreLayerNormForMultipleChoice""", """RobertaPreLayerNormForQuestionAnswering""", """RobertaPreLayerNormForSequenceClassification""", """RobertaPreLayerNormForTokenClassification""", """RobertaPreLayerNormModel""", """RobertaPreLayerNormPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFRobertaPreLayerNormForCausalLM""", """TFRobertaPreLayerNormForMaskedLM""", """TFRobertaPreLayerNormForMultipleChoice""", """TFRobertaPreLayerNormForQuestionAnswering""", """TFRobertaPreLayerNormForSequenceClassification""", """TFRobertaPreLayerNormForTokenClassification""", """TFRobertaPreLayerNormMainLayer""", """TFRobertaPreLayerNormModel""", """TFRobertaPreLayerNormPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """FlaxRobertaPreLayerNormForCausalLM""", """FlaxRobertaPreLayerNormForMaskedLM""", """FlaxRobertaPreLayerNormForMultipleChoice""", """FlaxRobertaPreLayerNormForQuestionAnswering""", """FlaxRobertaPreLayerNormForSequenceClassification""", """FlaxRobertaPreLayerNormForTokenClassification""", """FlaxRobertaPreLayerNormModel""", """FlaxRobertaPreLayerNormPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_roberta_prelayernorm import ( ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaPreLayerNormConfig, RobertaPreLayerNormOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta_prelayernorm import ( ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaPreLayerNormForCausalLM, RobertaPreLayerNormForMaskedLM, RobertaPreLayerNormForMultipleChoice, RobertaPreLayerNormForQuestionAnswering, RobertaPreLayerNormForSequenceClassification, RobertaPreLayerNormForTokenClassification, RobertaPreLayerNormModel, RobertaPreLayerNormPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta_prelayernorm import ( TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaPreLayerNormForCausalLM, TFRobertaPreLayerNormForMaskedLM, TFRobertaPreLayerNormForMultipleChoice, TFRobertaPreLayerNormForQuestionAnswering, TFRobertaPreLayerNormForSequenceClassification, TFRobertaPreLayerNormForTokenClassification, TFRobertaPreLayerNormMainLayer, TFRobertaPreLayerNormModel, TFRobertaPreLayerNormPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
37
1
import os import socket from contextlib import contextmanager import torch from ..commands.config.default import write_basic_config # noqa: F401 from ..state import PartialState from .dataclasses import DistributedType from .imports import is_deepspeed_available, is_tpu_available from .transformer_engine import convert_model from .versions import is_torch_version if is_deepspeed_available(): from deepspeed import DeepSpeedEngine if is_tpu_available(check_device=False): import torch_xla.core.xla_model as xm def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): if is_torch_version("<" , "2.0.0" ) or not hasattr(SCREAMING_SNAKE_CASE_ , "_dynamo" ): return False return isinstance(SCREAMING_SNAKE_CASE_ , torch._dynamo.eval_frame.OptimizedModule ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = True ): lowercase__ = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel) lowercase__ = is_compiled_module(SCREAMING_SNAKE_CASE_ ) if is_compiled: lowercase__ = model lowercase__ = model._orig_mod if is_deepspeed_available(): options += (DeepSpeedEngine,) while isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ = model.module if not keep_fpaa_wrapper: lowercase__ = getattr(SCREAMING_SNAKE_CASE_ , "forward" ) lowercase__ = model.__dict__.pop("_original_forward" , SCREAMING_SNAKE_CASE_ ) if original_forward is not None: while hasattr(SCREAMING_SNAKE_CASE_ , "__wrapped__" ): lowercase__ = forward.__wrapped__ if forward == original_forward: break lowercase__ = forward if getattr(SCREAMING_SNAKE_CASE_ , "_converted_to_transformer_engine" , SCREAMING_SNAKE_CASE_ ): convert_model(SCREAMING_SNAKE_CASE_ , to_transformer_engine=SCREAMING_SNAKE_CASE_ ) if is_compiled: lowercase__ = model lowercase__ = compiled_model return model def __lowerCAmelCase ( ): PartialState().wait_for_everyone() def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): if PartialState().distributed_type == DistributedType.TPU: xm.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) elif PartialState().local_process_index == 0: torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @contextmanager def __lowerCAmelCase ( **SCREAMING_SNAKE_CASE_ ): for key, value in kwargs.items(): lowercase__ = str(SCREAMING_SNAKE_CASE_ ) yield for key in kwargs: if key.upper() in os.environ: del os.environ[key.upper()] def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): if not hasattr(SCREAMING_SNAKE_CASE_ , "__qualname__" ) and not hasattr(SCREAMING_SNAKE_CASE_ , "__name__" ): lowercase__ = getattr(SCREAMING_SNAKE_CASE_ , "__class__" , SCREAMING_SNAKE_CASE_ ) if hasattr(SCREAMING_SNAKE_CASE_ , "__qualname__" ): return obj.__qualname__ if hasattr(SCREAMING_SNAKE_CASE_ , "__name__" ): return obj.__name__ return str(SCREAMING_SNAKE_CASE_ ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): for key, value in source.items(): if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ = destination.setdefault(SCREAMING_SNAKE_CASE_ , {} ) merge_dicts(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else: lowercase__ = value return destination def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ = None ): if port is None: lowercase__ = 2_9500 with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s: return s.connect_ex(("localhost", port) ) == 0
37
import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value") lowercase__ = ( ("layer.", "layer_"), ("word_embeddings.weight", "word_embeddings"), ("position_embeddings.weight", "position_embeddings"), ("token_type_embeddings.weight", "token_type_embeddings"), (".", "/"), ("LayerNorm/weight", "LayerNorm/gamma"), ("LayerNorm/bias", "LayerNorm/beta"), ("weight", "kernel"), ) if not os.path.isdir(SCREAMING_SNAKE_CASE_ ): os.makedirs(SCREAMING_SNAKE_CASE_ ) lowercase__ = model.state_dict() def to_tf_var_name(SCREAMING_SNAKE_CASE_ ): for patt, repl in iter(SCREAMING_SNAKE_CASE_ ): lowercase__ = name.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return f'''bert/{name}''' def create_tf_var(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ = tf.dtypes.as_dtype(tensor.dtype ) lowercase__ = tf.get_variable(dtype=SCREAMING_SNAKE_CASE_ , shape=tensor.shape , name=SCREAMING_SNAKE_CASE_ , initializer=tf.zeros_initializer() ) session.run(tf.variables_initializer([tf_var] ) ) session.run(SCREAMING_SNAKE_CASE_ ) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: lowercase__ = to_tf_var_name(SCREAMING_SNAKE_CASE_ ) lowercase__ = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose ): lowercase__ = torch_tensor.T lowercase__ = create_tf_var(tensor=SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ , session=SCREAMING_SNAKE_CASE_ ) tf.keras.backend.set_value(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowercase__ = session.run(SCREAMING_SNAKE_CASE_ ) print(f'''Successfully created {tf_name}: {np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}''' ) lowercase__ = tf.train.Saver(tf.trainable_variables() ) saver.save(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , model_name.replace("-" , "_" ) + ".ckpt" ) ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_=None ): lowercase__ = argparse.ArgumentParser() parser.add_argument("--model_name" , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="model name e.g. bert-base-uncased" ) parser.add_argument( "--cache_dir" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="Directory containing pytorch model" ) parser.add_argument("--pytorch_model_path" , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="/path/to/<pytorch-model-name>.bin" ) parser.add_argument("--tf_cache_dir" , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="Directory in which to save tensorflow model" ) lowercase__ = parser.parse_args(SCREAMING_SNAKE_CASE_ ) lowercase__ = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , ) convert_pytorch_checkpoint_to_tf(model=SCREAMING_SNAKE_CASE_ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name ) if __name__ == "__main__": main()
37
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { """naver-clova-ix/donut-base""": """https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json""", # See all Donut models at https://huggingface.co/models?filter=donut-swin } class _snake_case ( lowercase__): UpperCamelCase__ : List[Any] ="""donut-swin""" UpperCamelCase__ : Any ={ """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self : Any, __lowercase : Dict=224, __lowercase : str=4, __lowercase : Optional[int]=3, __lowercase : Dict=96, __lowercase : List[str]=[2, 2, 6, 2], __lowercase : Union[str, Any]=[3, 6, 12, 24], __lowercase : str=7, __lowercase : Optional[Any]=4.0, __lowercase : Tuple=True, __lowercase : List[str]=0.0, __lowercase : int=0.0, __lowercase : Optional[Any]=0.1, __lowercase : str="gelu", __lowercase : List[str]=False, __lowercase : Optional[Any]=0.02, __lowercase : Optional[int]=1e-5, **__lowercase : int, ): super().__init__(**__lowercase ) lowercase__ = image_size lowercase__ = patch_size lowercase__ = num_channels lowercase__ = embed_dim lowercase__ = depths lowercase__ = len(__lowercase ) lowercase__ = num_heads lowercase__ = window_size lowercase__ = mlp_ratio lowercase__ = qkv_bias lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = drop_path_rate lowercase__ = hidden_act lowercase__ = use_absolute_embeddings lowercase__ = layer_norm_eps lowercase__ = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model lowercase__ = int(embed_dim * 2 ** (len(__lowercase ) - 1) )
37
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase_ = { """configuration_timesformer""": ["""TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TimesformerConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TimesformerModel""", """TimesformerForVideoClassification""", """TimesformerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timesformer import ( TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimesformerForVideoClassification, TimesformerModel, TimesformerPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
37
1
import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""} # See all BART models at https://huggingface.co/models?filter=bart lowercase_ = { """vocab_file""": { """facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""", """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""", """facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""", """facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""", """facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""", """yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""", }, """merges_file""": { """facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""", """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""", """facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""", """facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""", """facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""", """yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""", }, } lowercase_ = { """facebook/bart-base""": 1024, """facebook/bart-large""": 1024, """facebook/bart-large-mnli""": 1024, """facebook/bart-large-cnn""": 1024, """facebook/bart-large-xsum""": 1024, """yjernite/bart_eli5""": 1024, } @lru_cache() def __lowerCAmelCase ( ): lowercase__ = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) lowercase__ = bs[:] lowercase__ = 0 for b in range(2**8 ): if b not in bs: bs.append(SCREAMING_SNAKE_CASE_ ) cs.append(2**8 + n ) n += 1 lowercase__ = [chr(SCREAMING_SNAKE_CASE_ ) for n in cs] return dict(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): lowercase__ = set() lowercase__ = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowercase__ = char return pairs class _snake_case ( lowercase__): UpperCamelCase__ : Union[str, Any] =VOCAB_FILES_NAMES UpperCamelCase__ : List[Any] =PRETRAINED_VOCAB_FILES_MAP UpperCamelCase__ : str =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase__ : List[str] =["""input_ids""", """attention_mask"""] def __init__( self : Optional[Any], __lowercase : Dict, __lowercase : Optional[int], __lowercase : List[Any]="replace", __lowercase : List[str]="<s>", __lowercase : Optional[Any]="</s>", __lowercase : int="</s>", __lowercase : Optional[Any]="<s>", __lowercase : str="<unk>", __lowercase : Tuple="<pad>", __lowercase : int="<mask>", __lowercase : Dict=False, **__lowercase : Optional[Any], ): lowercase__ = AddedToken(__lowercase, lstrip=__lowercase, rstrip=__lowercase ) if isinstance(__lowercase, __lowercase ) else bos_token lowercase__ = AddedToken(__lowercase, lstrip=__lowercase, rstrip=__lowercase ) if isinstance(__lowercase, __lowercase ) else eos_token lowercase__ = AddedToken(__lowercase, lstrip=__lowercase, rstrip=__lowercase ) if isinstance(__lowercase, __lowercase ) else sep_token lowercase__ = AddedToken(__lowercase, lstrip=__lowercase, rstrip=__lowercase ) if isinstance(__lowercase, __lowercase ) else cls_token lowercase__ = AddedToken(__lowercase, lstrip=__lowercase, rstrip=__lowercase ) if isinstance(__lowercase, __lowercase ) else unk_token lowercase__ = AddedToken(__lowercase, lstrip=__lowercase, rstrip=__lowercase ) if isinstance(__lowercase, __lowercase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowercase__ = AddedToken(__lowercase, lstrip=__lowercase, rstrip=__lowercase ) if isinstance(__lowercase, __lowercase ) else mask_token super().__init__( errors=__lowercase, bos_token=__lowercase, eos_token=__lowercase, unk_token=__lowercase, sep_token=__lowercase, cls_token=__lowercase, pad_token=__lowercase, mask_token=__lowercase, add_prefix_space=__lowercase, **__lowercase, ) with open(__lowercase, encoding="utf-8" ) as vocab_handle: lowercase__ = json.load(__lowercase ) lowercase__ = {v: k for k, v in self.encoder.items()} lowercase__ = errors # how to handle errors in decoding lowercase__ = bytes_to_unicode() lowercase__ = {v: k for k, v in self.byte_encoder.items()} with open(__lowercase, encoding="utf-8" ) as merges_handle: lowercase__ = merges_handle.read().split("\n" )[1:-1] lowercase__ = [tuple(merge.split() ) for merge in bpe_merges] lowercase__ = dict(zip(__lowercase, range(len(__lowercase ) ) ) ) lowercase__ = {} lowercase__ = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions lowercase__ = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property def A__ ( self : List[str] ): return len(self.encoder ) def A__ ( self : str ): return dict(self.encoder, **self.added_tokens_encoder ) def A__ ( self : Optional[Any], __lowercase : Dict ): if token in self.cache: return self.cache[token] lowercase__ = tuple(__lowercase ) lowercase__ = get_pairs(__lowercase ) if not pairs: return token while True: lowercase__ = min(__lowercase, key=lambda __lowercase : self.bpe_ranks.get(__lowercase, float("inf" ) ) ) if bigram not in self.bpe_ranks: break lowercase__ , lowercase__ = bigram lowercase__ = [] lowercase__ = 0 while i < len(__lowercase ): try: lowercase__ = word.index(__lowercase, __lowercase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowercase__ = j if word[i] == first and i < len(__lowercase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowercase__ = tuple(__lowercase ) lowercase__ = new_word if len(__lowercase ) == 1: break else: lowercase__ = get_pairs(__lowercase ) lowercase__ = " ".join(__lowercase ) lowercase__ = word return word def A__ ( self : Tuple, __lowercase : Dict ): lowercase__ = [] for token in re.findall(self.pat, __lowercase ): lowercase__ = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowercase ).split(" " ) ) return bpe_tokens def A__ ( self : Union[str, Any], __lowercase : Union[str, Any] ): return self.encoder.get(__lowercase, self.encoder.get(self.unk_token ) ) def A__ ( self : Optional[Any], __lowercase : int ): return self.decoder.get(__lowercase ) def A__ ( self : List[str], __lowercase : Union[str, Any] ): lowercase__ = "".join(__lowercase ) lowercase__ = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8", errors=self.errors ) return text def A__ ( self : Tuple, __lowercase : str, __lowercase : Optional[str] = None ): if not os.path.isdir(__lowercase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowercase__ = os.path.join( __lowercase, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) lowercase__ = os.path.join( __lowercase, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(__lowercase, "w", encoding="utf-8" ) as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=__lowercase, ensure_ascii=__lowercase ) + "\n" ) lowercase__ = 0 with open(__lowercase, "w", encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda __lowercase : kv[1] ): if index != token_index: logger.warning( F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' " Please check that the tokenizer is not corrupted!" ) lowercase__ = token_index writer.write(" ".join(__lowercase ) + "\n" ) index += 1 return vocab_file, merge_file def A__ ( self : Optional[Any], __lowercase : List[int], __lowercase : Optional[List[int]] = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowercase__ = [self.cls_token_id] lowercase__ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def A__ ( self : Any, __lowercase : List[int], __lowercase : Optional[List[int]] = None, __lowercase : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowercase, token_ids_a=__lowercase, already_has_special_tokens=__lowercase ) if token_ids_a is None: return [1] + ([0] * len(__lowercase )) + [1] return [1] + ([0] * len(__lowercase )) + [1, 1] + ([0] * len(__lowercase )) + [1] def A__ ( self : str, __lowercase : List[int], __lowercase : Optional[List[int]] = None ): lowercase__ = [self.sep_token_id] lowercase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def A__ ( self : Optional[Any], __lowercase : Any, __lowercase : Dict=False, **__lowercase : Union[str, Any] ): lowercase__ = kwargs.pop("add_prefix_space", self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__lowercase ) > 0 and not text[0].isspace()): lowercase__ = " " + text return (text, kwargs)
37
import argparse import os from . import ( ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BART_PRETRAINED_MODEL_ARCHIVE_LIST, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, BartConfig, BertConfig, CamembertConfig, CTRLConfig, DistilBertConfig, DPRConfig, ElectraConfig, FlaubertConfig, GPTaConfig, LayoutLMConfig, LxmertConfig, OpenAIGPTConfig, RobertaConfig, TaConfig, TFAlbertForPreTraining, TFBartForConditionalGeneration, TFBartForSequenceClassification, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFCamembertForMaskedLM, TFCTRLLMHeadModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, TFElectraForPreTraining, TFFlaubertWithLMHeadModel, TFGPTaLMHeadModel, TFLayoutLMForMaskedLM, TFLxmertForPreTraining, TFLxmertVisualFeatureEncoder, TFOpenAIGPTLMHeadModel, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForSequenceClassification, TFTaForConditionalGeneration, TFTransfoXLLMHeadModel, TFWavaVecaModel, TFXLMRobertaForMaskedLM, TFXLMWithLMHeadModel, TFXLNetLMHeadModel, TransfoXLConfig, WavaVecaConfig, WavaVecaModel, XLMConfig, XLMRobertaConfig, XLNetConfig, is_torch_available, load_pytorch_checkpoint_in_tfa_model, ) from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging if is_torch_available(): import numpy as np import torch from . import ( AlbertForPreTraining, BartForConditionalGeneration, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, CamembertForMaskedLM, CTRLLMHeadModel, DistilBertForMaskedLM, DistilBertForQuestionAnswering, DPRContextEncoder, DPRQuestionEncoder, DPRReader, ElectraForPreTraining, FlaubertWithLMHeadModel, GPTaLMHeadModel, LayoutLMForMaskedLM, LxmertForPreTraining, LxmertVisualFeatureEncoder, OpenAIGPTLMHeadModel, RobertaForMaskedLM, RobertaForSequenceClassification, TaForConditionalGeneration, TransfoXLLMHeadModel, XLMRobertaForMaskedLM, XLMWithLMHeadModel, XLNetLMHeadModel, ) logging.set_verbosity_info() lowercase_ = { """bart""": ( BartConfig, TFBartForConditionalGeneration, TFBartForSequenceClassification, BartForConditionalGeneration, BART_PRETRAINED_MODEL_ARCHIVE_LIST, ), """bert""": ( BertConfig, TFBertForPreTraining, BertForPreTraining, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-large-uncased-whole-word-masking-finetuned-squad""": ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-large-cased-whole-word-masking-finetuned-squad""": ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-base-cased-finetuned-mrpc""": ( BertConfig, TFBertForSequenceClassification, BertForSequenceClassification, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """dpr""": ( DPRConfig, TFDPRQuestionEncoder, TFDPRContextEncoder, TFDPRReader, DPRQuestionEncoder, DPRContextEncoder, DPRReader, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ), """gpt2""": ( GPTaConfig, TFGPTaLMHeadModel, GPTaLMHeadModel, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlnet""": ( XLNetConfig, TFXLNetLMHeadModel, XLNetLMHeadModel, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlm""": ( XLMConfig, TFXLMWithLMHeadModel, XLMWithLMHeadModel, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlm-roberta""": ( XLMRobertaConfig, TFXLMRobertaForMaskedLM, XLMRobertaForMaskedLM, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """transfo-xl""": ( TransfoXLConfig, TFTransfoXLLMHeadModel, TransfoXLLMHeadModel, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """openai-gpt""": ( OpenAIGPTConfig, TFOpenAIGPTLMHeadModel, OpenAIGPTLMHeadModel, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """roberta""": ( RobertaConfig, TFRobertaForCausalLM, TFRobertaForMaskedLM, RobertaForMaskedLM, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """layoutlm""": ( LayoutLMConfig, TFLayoutLMForMaskedLM, LayoutLMForMaskedLM, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, ), """roberta-large-mnli""": ( RobertaConfig, TFRobertaForSequenceClassification, RobertaForSequenceClassification, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """camembert""": ( CamembertConfig, TFCamembertForMaskedLM, CamembertForMaskedLM, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """flaubert""": ( FlaubertConfig, TFFlaubertWithLMHeadModel, FlaubertWithLMHeadModel, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """distilbert""": ( DistilBertConfig, TFDistilBertForMaskedLM, DistilBertForMaskedLM, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """distilbert-base-distilled-squad""": ( DistilBertConfig, TFDistilBertForQuestionAnswering, DistilBertForQuestionAnswering, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """lxmert""": ( LxmertConfig, TFLxmertForPreTraining, LxmertForPreTraining, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """lxmert-visual-feature-encoder""": ( LxmertConfig, TFLxmertVisualFeatureEncoder, LxmertVisualFeatureEncoder, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """ctrl""": ( CTRLConfig, TFCTRLLMHeadModel, CTRLLMHeadModel, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """albert""": ( AlbertConfig, TFAlbertForPreTraining, AlbertForPreTraining, ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """t5""": ( TaConfig, TFTaForConditionalGeneration, TaForConditionalGeneration, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """electra""": ( ElectraConfig, TFElectraForPreTraining, ElectraForPreTraining, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """wav2vec2""": ( WavaVecaConfig, TFWavaVecaModel, WavaVecaModel, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), } def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True ): if model_type not in MODEL_CLASSES: raise ValueError(f'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' ) lowercase__ , lowercase__ , lowercase__ , lowercase__ = MODEL_CLASSES[model_type] # Initialise TF model if config_file in aws_config_map: lowercase__ = cached_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , force_download=not use_cached_models ) lowercase__ = config_class.from_json_file(SCREAMING_SNAKE_CASE_ ) lowercase__ = True lowercase__ = True print(f'''Building TensorFlow model from configuration: {config}''' ) lowercase__ = model_class(SCREAMING_SNAKE_CASE_ ) # Load weights from tf checkpoint if pytorch_checkpoint_path in aws_config_map.keys(): lowercase__ = cached_file( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , force_download=not use_cached_models ) # Load PyTorch checkpoint in tf2 model: lowercase__ = load_pytorch_checkpoint_in_tfa_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if compare_with_pt_model: lowercase__ = tf_model(tf_model.dummy_inputs , training=SCREAMING_SNAKE_CASE_ ) # build the network lowercase__ = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" ) lowercase__ = pt_model_class.from_pretrained( pretrained_model_name_or_path=SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ , state_dict=SCREAMING_SNAKE_CASE_ ) with torch.no_grad(): lowercase__ = pt_model(**pt_model.dummy_inputs ) lowercase__ = pto[0].numpy() lowercase__ = tfo[0].numpy() lowercase__ = np.amax(np.abs(np_pt - np_tf ) ) print(f'''Max absolute difference between models outputs {diff}''' ) assert diff <= 2e-2, f'''Error, model absolute difference is >2e-2: {diff}''' # Save pytorch-model print(f'''Save TensorFlow model to {tf_dump_path}''' ) tf_model.save_weights(SCREAMING_SNAKE_CASE_ , save_format="h5" ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , ): if args_model_type is None: lowercase__ = list(MODEL_CLASSES.keys() ) else: lowercase__ = [args_model_type] for j, model_type in enumerate(SCREAMING_SNAKE_CASE_ , start=1 ): print("=" * 100 ) print(f''' Converting model type {j}/{len(SCREAMING_SNAKE_CASE_ )}: {model_type}''' ) print("=" * 100 ) if model_type not in MODEL_CLASSES: raise ValueError(f'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' ) lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = MODEL_CLASSES[model_type] if model_shortcut_names_or_path is None: lowercase__ = list(aws_model_maps.keys() ) if config_shortcut_names_or_path is None: lowercase__ = model_shortcut_names_or_path for i, (model_shortcut_name, config_shortcut_name) in enumerate( zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , start=1 ): print("-" * 100 ) if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name: if not only_convert_finetuned_models: print(f''' Skipping finetuned checkpoint {model_shortcut_name}''' ) continue lowercase__ = model_shortcut_name elif only_convert_finetuned_models: print(f''' Skipping not finetuned checkpoint {model_shortcut_name}''' ) continue print( f''' Converting checkpoint {i}/{len(SCREAMING_SNAKE_CASE_ )}: {model_shortcut_name} - model_type {model_type}''' ) print("-" * 100 ) if config_shortcut_name in aws_config_map: lowercase__ = cached_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , force_download=not use_cached_models ) else: lowercase__ = config_shortcut_name if model_shortcut_name in aws_model_maps: lowercase__ = cached_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , force_download=not use_cached_models ) else: lowercase__ = model_shortcut_name if os.path.isfile(SCREAMING_SNAKE_CASE_ ): lowercase__ = "converted_model" convert_pt_checkpoint_to_tf( model_type=SCREAMING_SNAKE_CASE_ , pytorch_checkpoint_path=SCREAMING_SNAKE_CASE_ , config_file=SCREAMING_SNAKE_CASE_ , tf_dump_path=os.path.join(SCREAMING_SNAKE_CASE_ , model_shortcut_name + "-tf_model.h5" ) , compare_with_pt_model=SCREAMING_SNAKE_CASE_ , ) if remove_cached_files: os.remove(SCREAMING_SNAKE_CASE_ ) os.remove(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_dump_path""", default=None, type=str, required=True, help="""Path to the output Tensorflow dump file.""" ) parser.add_argument( """--model_type""", default=None, type=str, help=( F'Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and ' """convert all the models from AWS.""" ), ) parser.add_argument( """--pytorch_checkpoint_path""", default=None, type=str, help=( """Path to the PyTorch checkpoint path or shortcut name to download from AWS. """ """If not given, will download and convert all the checkpoints from AWS.""" ), ) parser.add_argument( """--config_file""", default=None, type=str, help=( """The config json file corresponding to the pre-trained model. \n""" """This specifies the model architecture. If not given and """ """--pytorch_checkpoint_path is not given or is a shortcut name """ """use the configuration associated to the shortcut name on the AWS""" ), ) parser.add_argument( """--compare_with_pt_model""", action="""store_true""", help="""Compare Tensorflow and PyTorch model predictions.""" ) parser.add_argument( """--use_cached_models""", action="""store_true""", help="""Use cached models if possible instead of updating to latest checkpoint versions.""", ) parser.add_argument( """--remove_cached_files""", action="""store_true""", help="""Remove pytorch models after conversion (save memory when converting in batches).""", ) parser.add_argument("""--only_convert_finetuned_models""", action="""store_true""", help="""Only convert finetuned models.""") lowercase_ = parser.parse_args() # if args.pytorch_checkpoint_path is not None: # convert_pt_checkpoint_to_tf(args.model_type.lower(), # args.pytorch_checkpoint_path, # args.config_file if args.config_file is not None else args.pytorch_checkpoint_path, # args.tf_dump_path, # compare_with_pt_model=args.compare_with_pt_model, # use_cached_models=args.use_cached_models) # else: convert_all_pt_checkpoints_to_tf( args.model_type.lower() if args.model_type is not None else None, args.tf_dump_path, model_shortcut_names_or_path=[args.pytorch_checkpoint_path] if args.pytorch_checkpoint_path is not None else None, config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None, compare_with_pt_model=args.compare_with_pt_model, use_cached_models=args.use_cached_models, remove_cached_files=args.remove_cached_files, only_convert_finetuned_models=args.only_convert_finetuned_models, )
37
1
import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import numpy as np import pandas as pd from datasets import load_dataset import transformers from transformers import ( AutoConfig, BartForSequenceClassification, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, TapexTokenizer, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.17.0.dev0""") require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""") lowercase_ = logging.getLogger(__name__) @dataclass class _snake_case : UpperCamelCase__ : Optional[str] =field( default="""tab_fact""" , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""}) UpperCamelCase__ : Optional[str] =field( default="""tab_fact""" , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} , ) UpperCamelCase__ : int =field( default=1_0_2_4 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) UpperCamelCase__ : bool =field( default=lowercase__ , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""}) UpperCamelCase__ : bool =field( default=lowercase__ , metadata={ """help""": ( """Whether to pad all samples to `max_seq_length`. """ """If False, will pad the samples dynamically when batching to the maximum length in the batch.""" ) } , ) UpperCamelCase__ : Optional[int] =field( default=lowercase__ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) UpperCamelCase__ : Optional[int] =field( default=lowercase__ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) } , ) UpperCamelCase__ : Optional[int] =field( default=lowercase__ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of prediction examples to this """ """value if set.""" ) } , ) UpperCamelCase__ : Optional[str] =field( default=lowercase__ , metadata={"""help""": """A csv or a json file containing the training data."""}) UpperCamelCase__ : Optional[str] =field( default=lowercase__ , metadata={"""help""": """A csv or a json file containing the validation data."""}) UpperCamelCase__ : Optional[str] =field(default=lowercase__ , metadata={"""help""": """A csv or a json file containing the test data."""}) def A__ ( self : Any ): if self.dataset_name is not None: pass elif self.train_file is None or self.validation_file is None: raise ValueError("Need either a GLUE task, a training/validation file or a dataset name." ) else: lowercase__ = self.train_file.split("." )[-1] assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file." lowercase__ = self.validation_file.split("." )[-1] assert ( validation_extension == train_extension ), "`validation_file` should have the same extension (csv or json) as `train_file`." @dataclass class _snake_case : UpperCamelCase__ : str =field( default=lowercase__ , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""}) UpperCamelCase__ : Optional[str] =field( default=lowercase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""}) UpperCamelCase__ : Optional[str] =field( default=lowercase__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""}) UpperCamelCase__ : Optional[str] =field( default=lowercase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) UpperCamelCase__ : bool =field( default=lowercase__ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , ) UpperCamelCase__ : str =field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) UpperCamelCase__ : bool =field( default=lowercase__ , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) def __lowerCAmelCase ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. lowercase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowercase__ , lowercase__ , lowercase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowercase__ , lowercase__ , lowercase__ = parser.parse_args_into_dataclasses() # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) lowercase__ = training_args.get_process_log_level() logger.setLevel(SCREAMING_SNAKE_CASE_ ) datasets.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE_ ) transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE_ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(f'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. lowercase__ = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: lowercase__ = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). # # For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table. # # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this # single column. You can easily tweak this behavior (see below) # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. lowercase__ = load_dataset( data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir ) else: # Loading a dataset from your local files. # CSV/JSON training and evaluation files are needed. lowercase__ = {"train": data_args.train_file, "validation": data_args.validation_file} # Get the test dataset: you can provide your own CSV/JSON test file (see below) # when you use `do_predict` without specifying a GLUE benchmark task. if training_args.do_predict: if data_args.test_file is not None: lowercase__ = data_args.train_file.split("." )[-1] lowercase__ = data_args.test_file.split("." )[-1] assert ( test_extension == train_extension ), "`test_file` should have the same extension (csv or json) as `train_file`." lowercase__ = data_args.test_file else: raise ValueError("Need either a GLUE task or a test file for `do_predict`." ) for key in data_files.keys(): logger.info(f'''load a local file for {key}: {data_files[key]}''' ) if data_args.train_file.endswith(".csv" ): # Loading a dataset from local csv files lowercase__ = load_dataset("csv" , data_files=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir ) else: # Loading a dataset from local json files lowercase__ = load_dataset("json" , data_files=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir ) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets.html. # Labels lowercase__ = raw_datasets["train"].features["label"].names lowercase__ = len(SCREAMING_SNAKE_CASE_ ) # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowercase__ = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # load tapex tokenizer lowercase__ = TapexTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=SCREAMING_SNAKE_CASE_ , ) lowercase__ = BartForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # Padding strategy if data_args.pad_to_max_length: lowercase__ = "max_length" else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch lowercase__ = False # Some models have set the order of the labels to use, so let's make sure we do use it. lowercase__ = {"Refused": 0, "Entailed": 1} lowercase__ = {0: "Refused", 1: "Entailed"} if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the''' f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' ) lowercase__ = min(data_args.max_seq_length , tokenizer.model_max_length ) def preprocess_tabfact_function(SCREAMING_SNAKE_CASE_ ): # Tokenize the texts def _convert_table_text_to_pandas(SCREAMING_SNAKE_CASE_ ): lowercase__ = [_table_row.split("#" ) for _table_row in _table_text.strip("\n" ).split("\n" )] lowercase__ = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] ) return _table_pd lowercase__ = examples["statement"] lowercase__ = list(map(_convert_table_text_to_pandas , examples["table_text"] ) ) lowercase__ = tokenizer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ ) lowercase__ = examples["label"] return result with training_args.main_process_first(desc="dataset map pre-processing" ): lowercase__ = raw_datasets.map( SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on dataset" , ) if training_args.do_train: if "train" not in raw_datasets: raise ValueError("--do_train requires a train dataset" ) lowercase__ = raw_datasets["train"] if data_args.max_train_samples is not None: lowercase__ = train_dataset.select(range(data_args.max_train_samples ) ) if training_args.do_eval: if "validation" not in raw_datasets and "validation_matched" not in raw_datasets: raise ValueError("--do_eval requires a validation dataset" ) lowercase__ = raw_datasets["validation"] if data_args.max_eval_samples is not None: lowercase__ = eval_dataset.select(range(data_args.max_eval_samples ) ) if training_args.do_predict or data_args.test_file is not None: if "test" not in raw_datasets and "test_matched" not in raw_datasets: raise ValueError("--do_predict requires a test dataset" ) lowercase__ = raw_datasets["test"] if data_args.max_predict_samples is not None: lowercase__ = predict_dataset.select(range(data_args.max_predict_samples ) ) # Log a few random samples from the training set: if training_args.do_train: for index in random.sample(range(len(SCREAMING_SNAKE_CASE_ ) ) , 3 ): logger.info(f'''Sample {index} of the training set: {train_dataset[index]}.''' ) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(SCREAMING_SNAKE_CASE_ ): lowercase__ = p.predictions[0] if isinstance(p.predictions , SCREAMING_SNAKE_CASE_ ) else p.predictions lowercase__ = np.argmax(SCREAMING_SNAKE_CASE_ , axis=1 ) return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()} # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: lowercase__ = default_data_collator elif training_args.fpaa: lowercase__ = DataCollatorWithPadding(SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=8 ) else: lowercase__ = None # Initialize our Trainer lowercase__ = Trainer( model=SCREAMING_SNAKE_CASE_ , args=SCREAMING_SNAKE_CASE_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , data_collator=SCREAMING_SNAKE_CASE_ , ) # Training if training_args.do_train: lowercase__ = None if training_args.resume_from_checkpoint is not None: lowercase__ = training_args.resume_from_checkpoint elif last_checkpoint is not None: lowercase__ = last_checkpoint lowercase__ = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE_ ) lowercase__ = train_result.metrics lowercase__ = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(SCREAMING_SNAKE_CASE_ ) ) lowercase__ = min(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics("train" , SCREAMING_SNAKE_CASE_ ) trainer.save_metrics("train" , SCREAMING_SNAKE_CASE_ ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***" ) lowercase__ = trainer.evaluate(eval_dataset=SCREAMING_SNAKE_CASE_ ) lowercase__ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(SCREAMING_SNAKE_CASE_ ) lowercase__ = min(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ) trainer.log_metrics("eval" , SCREAMING_SNAKE_CASE_ ) trainer.save_metrics("eval" , SCREAMING_SNAKE_CASE_ ) if training_args.do_predict: logger.info("*** Predict ***" ) # Removing the `label` columns because it contains -1 and Trainer won't like that. lowercase__ = predict_dataset.remove_columns("label" ) lowercase__ = trainer.predict(SCREAMING_SNAKE_CASE_ , metric_key_prefix="predict" ).predictions lowercase__ = np.argmax(SCREAMING_SNAKE_CASE_ , axis=1 ) lowercase__ = os.path.join(training_args.output_dir , "predict_results_tabfact.txt" ) if trainer.is_world_process_zero(): with open(SCREAMING_SNAKE_CASE_ , "w" ) as writer: logger.info("***** Predict Results *****" ) writer.write("index\tprediction\n" ) for index, item in enumerate(SCREAMING_SNAKE_CASE_ ): lowercase__ = label_list[item] writer.write(f'''{index}\t{item}\n''' ) lowercase__ = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-classification"} if training_args.push_to_hub: trainer.push_to_hub(**SCREAMING_SNAKE_CASE_ ) else: trainer.create_model_card(**SCREAMING_SNAKE_CASE_ ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
37
import math def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): if 0 not in (x, y): # We use the relation x^y = y*log10(x), where 10 is the base. return y * math.logaa(SCREAMING_SNAKE_CASE_ ) else: if x == 0: # 0 raised to any number is 0 return 0 elif y == 0: return 1 # any number raised to 0 is 1 raise AssertionError("This should never happen" ) if __name__ == "__main__": # Main function # Read two numbers from input and typecast them to int using map function. # Here x is the base and y is the power. lowercase_ = """Enter the base and the power separated by a comma: """ lowercase_ , lowercase_ = map(int, input(prompt).split(""",""")) lowercase_ , lowercase_ = map(int, input(prompt).split(""",""")) # We find the log of each number, using the function res(), which takes two # arguments. lowercase_ = res(xa, ya) lowercase_ = res(xa, ya) # We check for the largest number if resa > resa: print("""Largest number is""", xa, """^""", ya) elif resa > resa: print("""Largest number is""", xa, """^""", ya) else: print("""Both are equal""")
37
1
from math import ceil def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ = 1001 ): lowercase__ = 1 for i in range(1 , int(ceil(n / 2.0 ) ) ): lowercase__ = 2 * i + 1 lowercase__ = 2 * i lowercase__ = total + 4 * odd**2 - 6 * even return total if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution()) else: try: lowercase_ = int(sys.argv[1]) print(solution(n)) except ValueError: print("""Invalid entry - please enter a number""")
37
import pickle import numpy as np from matplotlib import pyplot as plt class _snake_case : def __init__( self : Tuple, __lowercase : Union[str, Any], __lowercase : int, __lowercase : Union[str, Any], __lowercase : str, __lowercase : List[Any], __lowercase : List[str]=0.2, __lowercase : List[str]=0.2 ): lowercase__ = bp_numa lowercase__ = bp_numa lowercase__ = bp_numa lowercase__ = conva_get[:2] lowercase__ = conva_get[2] lowercase__ = size_pa lowercase__ = rate_w lowercase__ = rate_t lowercase__ = [ np.mat(-1 * np.random.rand(self.conva[0], self.conva[0] ) + 0.5 ) for i in range(self.conva[1] ) ] lowercase__ = np.mat(-1 * np.random.rand(self.num_bpa, self.num_bpa ) + 0.5 ) lowercase__ = np.mat(-1 * np.random.rand(self.num_bpa, self.num_bpa ) + 0.5 ) lowercase__ = -2 * np.random.rand(self.conva[1] ) + 1 lowercase__ = -2 * np.random.rand(self.num_bpa ) + 1 lowercase__ = -2 * np.random.rand(self.num_bpa ) + 1 def A__ ( self : Any, __lowercase : List[str] ): # save model dict with pickle lowercase__ = { "num_bp1": self.num_bpa, "num_bp2": self.num_bpa, "num_bp3": self.num_bpa, "conv1": self.conva, "step_conv1": self.step_conva, "size_pooling1": self.size_poolinga, "rate_weight": self.rate_weight, "rate_thre": self.rate_thre, "w_conv1": self.w_conva, "wkj": self.wkj, "vji": self.vji, "thre_conv1": self.thre_conva, "thre_bp2": self.thre_bpa, "thre_bp3": self.thre_bpa, } with open(__lowercase, "wb" ) as f: pickle.dump(__lowercase, __lowercase ) print(F'''Model saved: {save_path}''' ) @classmethod def A__ ( cls : Dict, __lowercase : Union[str, Any] ): # read saved model with open(__lowercase, "rb" ) as f: lowercase__ = pickle.load(__lowercase ) # noqa: S301 lowercase__ = model_dic.get("conv1" ) conv_get.append(model_dic.get("step_conv1" ) ) lowercase__ = model_dic.get("size_pooling1" ) lowercase__ = model_dic.get("num_bp1" ) lowercase__ = model_dic.get("num_bp2" ) lowercase__ = model_dic.get("num_bp3" ) lowercase__ = model_dic.get("rate_weight" ) lowercase__ = model_dic.get("rate_thre" ) # create model instance lowercase__ = CNN(__lowercase, __lowercase, __lowercase, __lowercase, __lowercase, __lowercase, __lowercase ) # modify model parameter lowercase__ = model_dic.get("w_conv1" ) lowercase__ = model_dic.get("wkj" ) lowercase__ = model_dic.get("vji" ) lowercase__ = model_dic.get("thre_conv1" ) lowercase__ = model_dic.get("thre_bp2" ) lowercase__ = model_dic.get("thre_bp3" ) return conv_ins def A__ ( self : str, __lowercase : List[Any] ): return 1 / (1 + np.exp(-1 * x )) def A__ ( self : List[str], __lowercase : Optional[Any] ): return round(__lowercase, 3 ) def A__ ( self : Optional[Any], __lowercase : Dict, __lowercase : Optional[int], __lowercase : Optional[int], __lowercase : Optional[Any], __lowercase : str ): # convolution process lowercase__ = convs[0] lowercase__ = convs[1] lowercase__ = np.shape(__lowercase )[0] # get the data slice of original image data, data_focus lowercase__ = [] for i_focus in range(0, size_data - size_conv + 1, __lowercase ): for j_focus in range(0, size_data - size_conv + 1, __lowercase ): lowercase__ = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(__lowercase ) # calculate the feature map of every single kernel, and saved as list of matrix lowercase__ = [] lowercase__ = int((size_data - size_conv) / conv_step + 1 ) for i_map in range(__lowercase ): lowercase__ = [] for i_focus in range(len(__lowercase ) ): lowercase__ = ( np.sum(np.multiply(data_focus[i_focus], w_convs[i_map] ) ) - thre_convs[i_map] ) featuremap.append(self.sig(__lowercase ) ) lowercase__ = np.asmatrix(__lowercase ).reshape( __lowercase, __lowercase ) data_featuremap.append(__lowercase ) # expanding the data slice to One dimenssion lowercase__ = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(__lowercase ) ) lowercase__ = np.asarray(__lowercase ) return focus_list, data_featuremap def A__ ( self : List[Any], __lowercase : Any, __lowercase : List[Any], __lowercase : Union[str, Any]="average_pool" ): # pooling process lowercase__ = len(featuremaps[0] ) lowercase__ = int(size_map / size_pooling ) lowercase__ = [] for i_map in range(len(__lowercase ) ): lowercase__ = featuremaps[i_map] lowercase__ = [] for i_focus in range(0, __lowercase, __lowercase ): for j_focus in range(0, __lowercase, __lowercase ): lowercase__ = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(__lowercase ) ) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(__lowercase ) ) lowercase__ = np.asmatrix(__lowercase ).reshape(__lowercase, __lowercase ) featuremap_pooled.append(__lowercase ) return featuremap_pooled def A__ ( self : str, __lowercase : Optional[Any] ): # expanding three dimension data to one dimension list lowercase__ = [] for i in range(len(__lowercase ) ): lowercase__ = np.shape(data[i] ) lowercase__ = data[i].reshape(1, shapes[0] * shapes[1] ) lowercase__ = data_listed.getA().tolist()[0] data_expanded.extend(__lowercase ) lowercase__ = np.asarray(__lowercase ) return data_expanded def A__ ( self : Optional[int], __lowercase : Optional[int] ): # expanding matrix to one dimension list lowercase__ = np.asarray(__lowercase ) lowercase__ = np.shape(__lowercase ) lowercase__ = data_mat.reshape(1, shapes[0] * shapes[1] ) return data_expanded def A__ ( self : str, __lowercase : Tuple, __lowercase : List[Any], __lowercase : Any, __lowercase : Union[str, Any], __lowercase : Tuple ): lowercase__ = [] lowercase__ = 0 for i_map in range(__lowercase ): lowercase__ = np.ones((size_map, size_map) ) for i in range(0, __lowercase, __lowercase ): for j in range(0, __lowercase, __lowercase ): lowercase__ = pd_pool[ i_pool ] lowercase__ = i_pool + 1 lowercase__ = np.multiply( __lowercase, np.multiply(out_map[i_map], (1 - out_map[i_map]) ) ) pd_all.append(__lowercase ) return pd_all def A__ ( self : Tuple, __lowercase : int, __lowercase : Optional[Any], __lowercase : List[Any], __lowercase : Optional[Any], __lowercase : List[Any], __lowercase : List[str]=bool ): # model traning print("----------------------Start Training-------------------------" ) print((" - - Shape: Train_Data ", np.shape(__lowercase )) ) print((" - - Shape: Teach_Data ", np.shape(__lowercase )) ) lowercase__ = 0 lowercase__ = [] lowercase__ = 1_0000 while rp < n_repeat and mse >= error_accuracy: lowercase__ = 0 print(F'''-------------Learning Time {rp}--------------''' ) for p in range(len(__lowercase ) ): # print('------------Learning Image: %d--------------'%p) lowercase__ = np.asmatrix(datas_train[p] ) lowercase__ = np.asarray(datas_teach[p] ) lowercase__ , lowercase__ = self.convolute( __lowercase, self.conva, self.w_conva, self.thre_conva, conv_step=self.step_conva, ) lowercase__ = self.pooling(__lowercase, self.size_poolinga ) lowercase__ = np.shape(__lowercase ) lowercase__ = self._expand(__lowercase ) lowercase__ = data_bp_input lowercase__ = np.dot(__lowercase, self.vji.T ) - self.thre_bpa lowercase__ = self.sig(__lowercase ) lowercase__ = np.dot(__lowercase, self.wkj.T ) - self.thre_bpa lowercase__ = self.sig(__lowercase ) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- lowercase__ = np.multiply( (data_teach - bp_outa), np.multiply(__lowercase, (1 - bp_outa) ) ) lowercase__ = np.multiply( np.dot(__lowercase, self.wkj ), np.multiply(__lowercase, (1 - bp_outa) ) ) lowercase__ = np.dot(__lowercase, self.vji ) lowercase__ = pd_i_all / (self.size_poolinga * self.size_poolinga) lowercase__ = pd_conva_pooled.T.getA().tolist() lowercase__ = self._calculate_gradient_from_pool( __lowercase, __lowercase, shape_featuremapa[0], shape_featuremapa[1], self.size_poolinga, ) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1] ): lowercase__ = self._expand_mat(pd_conva_all[k_conv] ) lowercase__ = self.rate_weight * np.dot(__lowercase, __lowercase ) lowercase__ = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0]) ) lowercase__ = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv] ) * self.rate_thre ) # all connected layer lowercase__ = self.wkj + pd_k_all.T * bp_outa * self.rate_weight lowercase__ = self.vji + pd_j_all.T * bp_outa * self.rate_weight lowercase__ = self.thre_bpa - pd_k_all * self.rate_thre lowercase__ = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image lowercase__ = np.sum(abs(data_teach - bp_outa ) ) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) lowercase__ = rp + 1 lowercase__ = error_count / patterns all_mse.append(__lowercase ) def draw_error(): lowercase__ = [error_accuracy for i in range(int(n_repeat * 1.2 ) )] plt.plot(__lowercase, "+-" ) plt.plot(__lowercase, "r--" ) plt.xlabel("Learning Times" ) plt.ylabel("All_mse" ) plt.grid(__lowercase, alpha=0.5 ) plt.show() print("------------------Training Complished---------------------" ) print((" - - Training epoch: ", rp, F''' - - Mse: {mse:.6f}''') ) if draw_e: draw_error() return mse def A__ ( self : List[str], __lowercase : Optional[int] ): # model predict lowercase__ = [] print("-------------------Start Testing-------------------------" ) print((" - - Shape: Test_Data ", np.shape(__lowercase )) ) for p in range(len(__lowercase ) ): lowercase__ = np.asmatrix(datas_test[p] ) lowercase__ , lowercase__ = self.convolute( __lowercase, self.conva, self.w_conva, self.thre_conva, conv_step=self.step_conva, ) lowercase__ = self.pooling(__lowercase, self.size_poolinga ) lowercase__ = self._expand(__lowercase ) lowercase__ = data_bp_input lowercase__ = bp_outa * self.vji.T - self.thre_bpa lowercase__ = self.sig(__lowercase ) lowercase__ = bp_outa * self.wkj.T - self.thre_bpa lowercase__ = self.sig(__lowercase ) produce_out.extend(bp_outa.getA().tolist() ) lowercase__ = [list(map(self.do_round, __lowercase ) ) for each in produce_out] return np.asarray(__lowercase ) def A__ ( self : int, __lowercase : Any ): # return the data of image after convoluting process so we can check it out lowercase__ = np.asmatrix(__lowercase ) lowercase__ , lowercase__ = self.convolute( __lowercase, self.conva, self.w_conva, self.thre_conva, conv_step=self.step_conva, ) lowercase__ = self.pooling(__lowercase, self.size_poolinga ) return data_conveda, data_pooleda if __name__ == "__main__": pass
37
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowercase_ = { """configuration_bridgetower""": [ """BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BridgeTowerConfig""", """BridgeTowerTextConfig""", """BridgeTowerVisionConfig""", ], """processing_bridgetower""": ["""BridgeTowerProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["""BridgeTowerImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST""", """BridgeTowerForContrastiveLearning""", """BridgeTowerForImageAndTextRetrieval""", """BridgeTowerForMaskedLM""", """BridgeTowerModel""", """BridgeTowerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_bridgetower import ( BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP, BridgeTowerConfig, BridgeTowerTextConfig, BridgeTowerVisionConfig, ) from .processing_bridgetower import BridgeTowerProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_bridgetower import BridgeTowerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bridgetower import ( BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST, BridgeTowerForContrastiveLearning, BridgeTowerForImageAndTextRetrieval, BridgeTowerForMaskedLM, BridgeTowerModel, BridgeTowerPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
37
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from timm import create_model from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import BitConfig, BitForImageClassification, BitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() lowercase_ = logging.get_logger(__name__) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): lowercase__ = "huggingface/label-files" lowercase__ = "imagenet-1k-id2label.json" lowercase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) ) lowercase__ = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()} lowercase__ = {v: k for k, v in idalabel.items()} lowercase__ = "std_conv" if "bit" in model_name else False # note that when using BiT as backbone for ViT-hybrid checkpoints, # one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same", # config.conv_layer = "std_conv_same" lowercase__ = BitConfig( conv_layer=SCREAMING_SNAKE_CASE_ , num_labels=1000 , idalabel=SCREAMING_SNAKE_CASE_ , labelaid=SCREAMING_SNAKE_CASE_ , ) return config def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): if "stem.conv" in name: lowercase__ = name.replace("stem.conv" , "bit.embedder.convolution" ) if "blocks" in name: lowercase__ = name.replace("blocks" , "layers" ) if "head.fc" in name: lowercase__ = name.replace("head.fc" , "classifier.1" ) if name.startswith("norm" ): lowercase__ = "bit." + name if "bit" not in name and "classifier" not in name: lowercase__ = "bit.encoder." + name return name def __lowerCAmelCase ( ): lowercase__ = "http://images.cocodataset.org/val2017/000000039769.jpg" lowercase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ) return im @torch.no_grad() def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ): lowercase__ = get_config(SCREAMING_SNAKE_CASE_ ) # load original model from timm lowercase__ = create_model(SCREAMING_SNAKE_CASE_ , pretrained=SCREAMING_SNAKE_CASE_ ) timm_model.eval() # load state_dict of original model lowercase__ = timm_model.state_dict() for key in state_dict.copy().keys(): lowercase__ = state_dict.pop(SCREAMING_SNAKE_CASE_ ) lowercase__ = val.squeeze() if "head" in key else val # load HuggingFace model lowercase__ = BitForImageClassification(SCREAMING_SNAKE_CASE_ ) model.eval() model.load_state_dict(SCREAMING_SNAKE_CASE_ ) # create image processor lowercase__ = create_transform(**resolve_data_config({} , model=SCREAMING_SNAKE_CASE_ ) ) lowercase__ = transform.transforms lowercase__ = { "bilinear": PILImageResampling.BILINEAR, "bicubic": PILImageResampling.BICUBIC, "nearest": PILImageResampling.NEAREST, } lowercase__ = BitImageProcessor( do_resize=SCREAMING_SNAKE_CASE_ , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=SCREAMING_SNAKE_CASE_ , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=SCREAMING_SNAKE_CASE_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) lowercase__ = prepare_img() lowercase__ = transform(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 ) lowercase__ = processor(SCREAMING_SNAKE_CASE_ , return_tensors="pt" ).pixel_values # verify pixel values assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # verify logits with torch.no_grad(): lowercase__ = model(SCREAMING_SNAKE_CASE_ ) lowercase__ = outputs.logits print("Logits:" , logits[0, :3] ) print("Predicted class:" , model.config.idalabel[logits.argmax(-1 ).item()] ) lowercase__ = timm_model(SCREAMING_SNAKE_CASE_ ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(SCREAMING_SNAKE_CASE_ , outputs.logits , atol=1e-3 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ ) print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) processor.save_pretrained(SCREAMING_SNAKE_CASE_ ) if push_to_hub: print(f'''Pushing model {model_name} and processor to the hub''' ) model.push_to_hub(f'''ybelkada/{model_name}''' ) processor.push_to_hub(f'''ybelkada/{model_name}''' ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""resnetv2_50x1_bitm""", type=str, help="""Name of the BiT timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to push the model to the hub.""", ) lowercase_ = parser.parse_args() convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
37
1
import argparse import logging import os import sys import numpy as np import onnxruntime import torch from bart_onnx.generation_onnx import BARTBeamSearchGenerator from bart_onnx.reduce_onnx_size import remove_dup_initializers import transformers from transformers import BartForConditionalGeneration, BartTokenizer logging.basicConfig( format="""%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s""", datefmt="""%Y-%m-%d %H:%M:%S""", level=os.environ.get("""LOGLEVEL""", """INFO""").upper(), stream=sys.stdout, ) lowercase_ = logging.getLogger(__name__) lowercase_ = {"""facebook/bart-base""": BartForConditionalGeneration} lowercase_ = {"""facebook/bart-base""": BartTokenizer} def __lowerCAmelCase ( ): lowercase__ = argparse.ArgumentParser(description="Export Bart model + Beam Search to ONNX graph." ) parser.add_argument( "--validation_file" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help="A csv or a json file containing the validation data." ) parser.add_argument( "--max_length" , type=SCREAMING_SNAKE_CASE_ , default=5 , help="The maximum total input sequence length after tokenization." , ) parser.add_argument( "--num_beams" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help=( "Number of beams to use for evaluation. This argument will be " "passed to ``model.generate``, which is used during ``evaluate`` and ``predict``." ) , ) parser.add_argument( "--model_name_or_path" , type=SCREAMING_SNAKE_CASE_ , help="Path to pretrained model or model identifier from huggingface.co/models." , required=SCREAMING_SNAKE_CASE_ , ) parser.add_argument( "--config_name" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help="Pretrained config name or path if not the same as model_name" , ) parser.add_argument( "--device" , type=SCREAMING_SNAKE_CASE_ , default="cpu" , help="Device where the model will be run" , ) parser.add_argument("--output_file_path" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help="Where to store the final ONNX file." ) lowercase__ = parser.parse_args() return args def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="cpu" ): lowercase__ = model_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ) lowercase__ = tokenizer_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE_ ) if model_name in ["facebook/bart-base"]: lowercase__ = 0 lowercase__ = None lowercase__ = 0 return huggingface_model, tokenizer def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): model.eval() lowercase__ = None lowercase__ = torch.jit.script(BARTBeamSearchGenerator(SCREAMING_SNAKE_CASE_ ) ) with torch.no_grad(): lowercase__ = "My friends are cool but they eat too many carbs." lowercase__ = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors="pt" ).to(model.device ) lowercase__ = model.generate( inputs["input_ids"] , attention_mask=inputs["attention_mask"] , num_beams=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , early_stopping=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=model.config.decoder_start_token_id , ) torch.onnx.export( SCREAMING_SNAKE_CASE_ , ( inputs["input_ids"], inputs["attention_mask"], num_beams, max_length, model.config.decoder_start_token_id, ) , SCREAMING_SNAKE_CASE_ , opset_version=14 , input_names=["input_ids", "attention_mask", "num_beams", "max_length", "decoder_start_token_id"] , output_names=["output_ids"] , dynamic_axes={ "input_ids": {0: "batch", 1: "seq"}, "output_ids": {0: "batch", 1: "seq_out"}, } , example_outputs=SCREAMING_SNAKE_CASE_ , ) logger.info("Model exported to {}".format(SCREAMING_SNAKE_CASE_ ) ) lowercase__ = remove_dup_initializers(os.path.abspath(SCREAMING_SNAKE_CASE_ ) ) logger.info("Deduplicated and optimized model written to {}".format(SCREAMING_SNAKE_CASE_ ) ) lowercase__ = onnxruntime.InferenceSession(SCREAMING_SNAKE_CASE_ ) lowercase__ = ort_sess.run( SCREAMING_SNAKE_CASE_ , { "input_ids": inputs["input_ids"].cpu().numpy(), "attention_mask": inputs["attention_mask"].cpu().numpy(), "num_beams": np.array(SCREAMING_SNAKE_CASE_ ), "max_length": np.array(SCREAMING_SNAKE_CASE_ ), "decoder_start_token_id": np.array(model.config.decoder_start_token_id ), } , ) np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3 ) logger.info("Model outputs from torch and ONNX Runtime are similar." ) logger.info("Success." ) def __lowerCAmelCase ( ): lowercase__ = parse_args() lowercase__ = 5 lowercase__ = 4 # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , ) logger.setLevel(logging.INFO ) transformers.utils.logging.set_verbosity_error() lowercase__ = torch.device(args.device ) lowercase__ , lowercase__ = load_model_tokenizer(args.model_name_or_path , SCREAMING_SNAKE_CASE_ ) if model.config.decoder_start_token_id is None: raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined" ) model.to(SCREAMING_SNAKE_CASE_ ) if args.max_length: lowercase__ = args.max_length if args.num_beams: lowercase__ = args.num_beams if args.output_file_path: lowercase__ = args.output_file_path else: lowercase__ = "BART.onnx" logger.info("Exporting model to ONNX" ) export_and_validate_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": main()
37
import json from typing import Iterator, List, Union from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers from tokenizers.implementations.base_tokenizer import BaseTokenizer from tokenizers.models import Unigram from tokenizers.processors import TemplateProcessing class _snake_case ( lowercase__): def __init__( self : Optional[Any], __lowercase : str = "▁", __lowercase : bool = True, __lowercase : Union[str, AddedToken] = "<unk>", __lowercase : Union[str, AddedToken] = "</s>", __lowercase : Union[str, AddedToken] = "<pad>", ): lowercase__ = { "pad": {"id": 0, "token": pad_token}, "eos": {"id": 1, "token": eos_token}, "unk": {"id": 2, "token": unk_token}, } lowercase__ = [None] * len(self.special_tokens ) for token_dict in self.special_tokens.values(): lowercase__ = token_dict["token"] lowercase__ = Tokenizer(Unigram() ) lowercase__ = normalizers.Sequence( [ normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(" {2,}" ), " " ), normalizers.Lowercase(), ] ) lowercase__ = pre_tokenizers.Sequence( [ pre_tokenizers.Metaspace(replacement=__lowercase, add_prefix_space=__lowercase ), pre_tokenizers.Digits(individual_digits=__lowercase ), pre_tokenizers.Punctuation(), ] ) lowercase__ = decoders.Metaspace(replacement=__lowercase, add_prefix_space=__lowercase ) lowercase__ = TemplateProcessing( single=F'''$A {self.special_tokens["eos"]["token"]}''', special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])], ) lowercase__ = { "model": "SentencePieceUnigram", "replacement": replacement, "add_prefix_space": add_prefix_space, } super().__init__(__lowercase, __lowercase ) def A__ ( self : Union[str, Any], __lowercase : Union[str, List[str]], __lowercase : int = 8000, __lowercase : bool = True, ): lowercase__ = trainers.UnigramTrainer( vocab_size=__lowercase, special_tokens=self.special_tokens_list, show_progress=__lowercase, ) if isinstance(__lowercase, __lowercase ): lowercase__ = [files] self._tokenizer.train(__lowercase, trainer=__lowercase ) self.add_unk_id() def A__ ( self : List[Any], __lowercase : Union[Iterator[str], Iterator[Iterator[str]]], __lowercase : int = 8000, __lowercase : bool = True, ): lowercase__ = trainers.UnigramTrainer( vocab_size=__lowercase, special_tokens=self.special_tokens_list, show_progress=__lowercase, ) self._tokenizer.train_from_iterator(__lowercase, trainer=__lowercase ) self.add_unk_id() def A__ ( self : str ): lowercase__ = json.loads(self._tokenizer.to_str() ) lowercase__ = self.special_tokens["unk"]["id"] lowercase__ = Tokenizer.from_str(json.dumps(__lowercase ) )
37
1
import argparse import shutil import time from json import JSONDecodeError from logging import getLogger from pathlib import Path from typing import Dict, List import torch from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import ( SeqaSeqDataset, calculate_bleu, calculate_rouge, chunks, lmap, load_json, parse_numeric_n_bool_cl_kwargs, save_json, use_task_specific_params, write_txt_file, ) lowercase_ = getLogger(__name__) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 8 , SCREAMING_SNAKE_CASE_ = 1024 , SCREAMING_SNAKE_CASE_="val" , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_="summarization" , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_="" , **SCREAMING_SNAKE_CASE_ , ): lowercase__ = str(SCREAMING_SNAKE_CASE_ ) assert local_rank is not None torch.distributed.init_process_group(backend="nccl" , rank=SCREAMING_SNAKE_CASE_ ) lowercase__ = Path(SCREAMING_SNAKE_CASE_ ) lowercase__ = save_dir.joinpath(f'''rank_{local_rank}_output.json''' ) torch.cuda.set_device(SCREAMING_SNAKE_CASE_ ) lowercase__ = AutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE_ ).cuda() if fpaa: lowercase__ = model.half() # determine if we need to increase num_beams use_task_specific_params(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # update config with task specific params lowercase__ = generate_kwargs.pop("num_beams" , model.config.num_beams ) # AttributeError risk? if num_return_sequences > num_beams: lowercase__ = num_return_sequences lowercase__ = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ ) logger.info(f'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type. if max_source_length is None: lowercase__ = tokenizer.model_max_length if prefix is None: lowercase__ = prefix or getattr(model.config , "prefix" , "" ) or "" lowercase__ = SeqaSeqDataset( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , max_target_length=1024 , type_path=SCREAMING_SNAKE_CASE_ , n_obs=SCREAMING_SNAKE_CASE_ , prefix=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) # I set shuffle=True for a more accurate progress bar. # If all the longest samples are first, the prog bar estimate is too high at the beginning. lowercase__ = ds.make_sortish_sampler(SCREAMING_SNAKE_CASE_ , distributed=SCREAMING_SNAKE_CASE_ , add_extra_examples=SCREAMING_SNAKE_CASE_ , shuffle=SCREAMING_SNAKE_CASE_ ) lowercase__ = DataLoader(SCREAMING_SNAKE_CASE_ , sampler=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , collate_fn=ds.collate_fn ) lowercase__ = [] for batch in tqdm(SCREAMING_SNAKE_CASE_ ): lowercase__ = model.generate( input_ids=batch["input_ids"].to(model.device ) , attention_mask=batch["attention_mask"].to(model.device ) , num_return_sequences=SCREAMING_SNAKE_CASE_ , num_beams=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) lowercase__ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ ) lowercase__ = batch["ids"] if num_return_sequences > 1: lowercase__ = chunks(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # batch size chunks, each of size num_return_seq for i, pred in enumerate(SCREAMING_SNAKE_CASE_ ): results.append({"pred": pred, "id": ids[i].item()} ) save_json(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return results, sampler.num_replicas def __lowerCAmelCase ( ): lowercase__ = argparse.ArgumentParser( epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" ) parser.add_argument("--data_dir" , type=SCREAMING_SNAKE_CASE_ , help="like cnn_dm/test.source" ) parser.add_argument( "--model_name" , type=SCREAMING_SNAKE_CASE_ , help="like facebook/bart-large-cnn,t5-base, etc." , default="sshleifer/distilbart-xsum-12-3" , ) parser.add_argument("--save_dir" , type=SCREAMING_SNAKE_CASE_ , help="where to save" , default="tmp_gen" ) parser.add_argument("--max_source_length" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ ) parser.add_argument( "--type_path" , type=SCREAMING_SNAKE_CASE_ , default="test" , help="which subset to evaluate typically train/val/test" ) parser.add_argument("--task" , type=SCREAMING_SNAKE_CASE_ , default="summarization" , help="used for task_specific_params + metrics" ) parser.add_argument("--bs" , type=SCREAMING_SNAKE_CASE_ , default=8 , required=SCREAMING_SNAKE_CASE_ , help="batch size" ) parser.add_argument( "--local_rank" , type=SCREAMING_SNAKE_CASE_ , default=-1 , required=SCREAMING_SNAKE_CASE_ , help="should be passed by distributed.launch" ) parser.add_argument( "--n_obs" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="How many observations. Defaults to all." ) parser.add_argument( "--num_return_sequences" , type=SCREAMING_SNAKE_CASE_ , default=1 , required=SCREAMING_SNAKE_CASE_ , help="How many sequences to return" ) parser.add_argument( "--sync_timeout" , type=SCREAMING_SNAKE_CASE_ , default=600 , required=SCREAMING_SNAKE_CASE_ , help="How long should master process wait for other processes to finish." , ) parser.add_argument("--src_lang" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ ) parser.add_argument("--tgt_lang" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ ) parser.add_argument( "--prefix" , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help="will be added to the begininng of src examples" ) parser.add_argument("--fp16" , action="store_true" ) parser.add_argument("--debug" , action="store_true" ) lowercase__ = time.time() lowercase__ , lowercase__ = parser.parse_known_args() lowercase__ = parse_numeric_n_bool_cl_kwargs(SCREAMING_SNAKE_CASE_ ) if generate_kwargs and args.local_rank <= 0: print(f'''parsed the following generate kwargs: {generate_kwargs}''' ) lowercase__ = Path(args.save_dir + "_tmp" ) Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ ) # this handles locking. lowercase__ = list(json_save_dir.glob("rank_*.json" ) ) if intermediate_files: raise ValueError(f'''Found files at {json_save_dir} please move or remove them.''' ) # In theory, a node could finish and save before another node hits this. If this happens, we can address later. lowercase__ = {} if args.src_lang is not None: lowercase__ = args.src_lang if args.tgt_lang is not None: lowercase__ = args.tgt_lang Path(args.save_dir ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ ) lowercase__ , lowercase__ = eval_data_dir( args.data_dir , SCREAMING_SNAKE_CASE_ , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) if args.local_rank <= 0: lowercase__ = Path(args.save_dir ) save_dir.mkdir(exist_ok=SCREAMING_SNAKE_CASE_ ) lowercase__ = gather_results_from_each_node(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , args.sync_timeout ) lowercase__ = combine_partial_results(SCREAMING_SNAKE_CASE_ ) if args.num_return_sequences > 1: lowercase__ = save_dir.joinpath("pseudolabel_results.json" ) print(f'''Saving aggregated results at {save_path}, intermediate in {json_save_dir}/''' ) save_json(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return lowercase__ = Path(args.data_dir ).joinpath(args.type_path + ".target" ) with open(SCREAMING_SNAKE_CASE_ ) as f: lowercase__ = [x.rstrip() for x in f.readlines()][: len(SCREAMING_SNAKE_CASE_ )] # Calculate metrics, save metrics, and save _generations.txt lowercase__ = "translation" in args.task lowercase__ = calculate_bleu if calc_bleu else calculate_rouge lowercase__ = "bleu" if calc_bleu else "rouge" lowercase__ = score_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowercase__ = len(SCREAMING_SNAKE_CASE_ ) lowercase__ = time.time() - start_time lowercase__ = round(runtime / metrics["n_obs"] , 4 ) lowercase__ = num_replicas # TODO(@stas00): add whatever metadata to metrics lowercase__ = save_dir.joinpath(f'''{args.type_path}_{metric_name}.json''' ) save_json(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , indent=SCREAMING_SNAKE_CASE_ ) print(SCREAMING_SNAKE_CASE_ ) write_txt_file(SCREAMING_SNAKE_CASE_ , save_dir.joinpath(f'''{args.type_path}_generations.txt''' ) ) if args.debug: write_txt_file(SCREAMING_SNAKE_CASE_ , save_dir.joinpath(f'''{args.type_path}.target''' ) ) else: shutil.rmtree(SCREAMING_SNAKE_CASE_ ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): lowercase__ = [] for partial_result in partial_results: records.extend(SCREAMING_SNAKE_CASE_ ) lowercase__ = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : x["id"] ) lowercase__ = [x["pred"] for x in records] return preds def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): # WAIT FOR lots of .json files lowercase__ = time.time() logger.info("waiting for all nodes to finish" ) lowercase__ = None while (time.time() - start_wait) < timeout: lowercase__ = list(save_dir.glob("rank_*.json" ) ) if len(SCREAMING_SNAKE_CASE_ ) < num_replicas: continue try: # make sure all json files are fully saved lowercase__ = lmap(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return json_data except JSONDecodeError: continue else: raise TimeoutError("Rank 0 gave up on waiting for other processes" ) # Unreachable if __name__ == "__main__": # Usage for MT: run_generate()
37
# Usage: # ./gen-card-facebook-wmt19.py import os from pathlib import Path def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, oder?", } # BLUE scores as follows: # "pair": [fairseq, transformers] lowercase__ = { "ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"], "en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"], "en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"], "de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"], } lowercase__ = f'''{src_lang}-{tgt_lang}''' lowercase__ = f''' --- language: - {src_lang} - {tgt_lang} thumbnail: tags: - translation - wmt19 - facebook license: apache-2.0 datasets: - wmt19 metrics: - bleu --- # FSMT ## Model description This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}. For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616). The abbreviation FSMT stands for FairSeqMachineTranslation All four models are available: * [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru) * [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en) * [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de) * [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en) ## Intended uses & limitations #### How to use ```python from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = "facebook/wmt19-{src_lang}-{tgt_lang}" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) input = "{texts[src_lang]}" input_ids = tokenizer.encode(input, return_tensors="pt") outputs = model.generate(input_ids) decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) print(decoded) # {texts[tgt_lang]} ``` #### Limitations and bias - The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981) ## Training data Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616). ## Eval results pair | fairseq | transformers -------|---------|---------- {pair} | {scores[pair][0]} | {scores[pair][1]} The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support: - model ensemble, therefore the best performing checkpoint was ported (``model4.pt``). - re-ranking The score was calculated using this code: ```bash git clone https://github.com/huggingface/transformers cd transformers export PAIR={pair} export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 export NUM_BEAMS=15 mkdir -p $DATA_DIR sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target echo $PAIR PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS ``` note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`. ## Data Sources - [training, etc.](http://www.statmt.org/wmt19/) - [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561) ### BibTeX entry and citation info ```bibtex @inproceedings{{..., year={{2020}}, title={{Facebook FAIR\'s WMT19 News Translation Task Submission}}, author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}}, booktitle={{Proc. of WMT}}, }} ``` ## TODO - port model ensemble (fairseq uses 4 model checkpoints) ''' os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) lowercase__ = os.path.join(SCREAMING_SNAKE_CASE_ , "README.md" ) print(f'''Generating {path}''' ) with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as f: f.write(SCREAMING_SNAKE_CASE_ ) # make sure we are under the root of the project lowercase_ = Path(__file__).resolve().parent.parent.parent lowercase_ = repo_dir / """model_cards""" for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: lowercase_ , lowercase_ , lowercase_ = model_name.split("""-""") lowercase_ = model_cards_dir / """facebook""" / model_name write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
37
1
import collections import os from typing import List, Optional, Tuple from transformers.utils import is_jieba_available, requires_backends if is_jieba_available(): import jieba from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = {"""vocab_file""": """vocab.txt"""} lowercase_ = { """vocab_file""": { """openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""", }, } lowercase_ = { """openbmb/cpm-ant-10b""": 1024, } def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): lowercase__ = collections.OrderedDict() with open(SCREAMING_SNAKE_CASE_ , "r" , encoding="utf-8" ) as reader: lowercase__ = reader.readlines() for index, token in enumerate(SCREAMING_SNAKE_CASE_ ): lowercase__ = token.rstrip("\n" ) lowercase__ = index return vocab class _snake_case ( lowercase__): def __init__( self : Dict, __lowercase : List[Any], __lowercase : Optional[int]="<unk>", __lowercase : int=200 ): lowercase__ = vocab lowercase__ = unk_token lowercase__ = max_input_chars_per_word def A__ ( self : Union[str, Any], __lowercase : str ): lowercase__ = list(__lowercase ) if len(__lowercase ) > self.max_input_chars_per_word: return [self.unk_token] lowercase__ = 0 lowercase__ = [] while start < len(__lowercase ): lowercase__ = len(__lowercase ) lowercase__ = None while start < end: lowercase__ = "".join(chars[start:end] ) if substr in self.vocab: lowercase__ = substr break end -= 1 if cur_substr is None: sub_tokens.append(self.unk_token ) start += 1 else: sub_tokens.append(__lowercase ) lowercase__ = end return sub_tokens class _snake_case ( lowercase__): UpperCamelCase__ : Optional[int] =VOCAB_FILES_NAMES UpperCamelCase__ : Union[str, Any] =PRETRAINED_VOCAB_FILES_MAP UpperCamelCase__ : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase__ : int =["""input_ids""", """attention_mask"""] UpperCamelCase__ : List[str] =False def __init__( self : Tuple, __lowercase : List[Any], __lowercase : Optional[Any]="<d>", __lowercase : List[str]="</d>", __lowercase : Dict="<s>", __lowercase : Any="</s>", __lowercase : Optional[Any]="<pad>", __lowercase : str="<unk>", __lowercase : Any="</n>", __lowercase : List[str]="</_>", __lowercase : Union[str, Any]="left", **__lowercase : Dict, ): requires_backends(self, ["jieba"] ) super().__init__( bod_token=__lowercase, eod_token=__lowercase, bos_token=__lowercase, eos_token=__lowercase, pad_token=__lowercase, unk_token=__lowercase, line_token=__lowercase, space_token=__lowercase, padding_side=__lowercase, **__lowercase, ) lowercase__ = bod_token lowercase__ = eod_token lowercase__ = load_vocab(__lowercase ) lowercase__ = self.encoder[space_token] lowercase__ = self.encoder[line_token] del self.encoder[space_token] del self.encoder[line_token] lowercase__ = collections.OrderedDict(sorted(self.encoder.items(), key=lambda __lowercase : x[1] ) ) lowercase__ = {v: k for k, v in self.encoder.items()} lowercase__ = WordpieceTokenizer(vocab=self.encoder, unk_token=self.unk_token ) @property def A__ ( self : List[Any] ): return self.encoder[self.bod_token] @property def A__ ( self : Optional[Any] ): return self.encoder[self.eod_token] @property def A__ ( self : int ): return self.encoder["\n"] @property def A__ ( self : int ): return len(self.encoder ) def A__ ( self : Union[str, Any] ): return dict(self.encoder, **self.added_tokens_encoder ) def A__ ( self : Any, __lowercase : Dict ): lowercase__ = [] for x in jieba.cut(__lowercase, cut_all=__lowercase ): output_tokens.extend(self.wordpiece_tokenizer.tokenize(__lowercase ) ) return output_tokens def A__ ( self : Optional[int], __lowercase : List[Any], **__lowercase : List[Any] ): lowercase__ = [i for i in token_ids if i >= 0] lowercase__ = [ x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id ] return super()._decode(__lowercase, **__lowercase ) def A__ ( self : Union[str, Any], __lowercase : List[Any] ): return token in self.encoder def A__ ( self : int, __lowercase : List[str] ): return "".join(__lowercase ) def A__ ( self : Union[str, Any], __lowercase : str ): return self.encoder.get(__lowercase, self.encoder.get(self.unk_token ) ) def A__ ( self : Union[str, Any], __lowercase : Any ): return self.decoder.get(__lowercase, self.unk_token ) def A__ ( self : Union[str, Any], __lowercase : str, __lowercase : Optional[str] = None ): if os.path.isdir(__lowercase ): lowercase__ = os.path.join( __lowercase, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) else: lowercase__ = (filename_prefix + "-" if filename_prefix else "") + save_directory lowercase__ = 0 if " " in self.encoder: lowercase__ = self.encoder[" "] del self.encoder[" "] if "\n" in self.encoder: lowercase__ = self.encoder["\n"] del self.encoder["\n"] lowercase__ = collections.OrderedDict(sorted(self.encoder.items(), key=lambda __lowercase : x[1] ) ) with open(__lowercase, "w", encoding="utf-8" ) as writer: for token, token_index in self.encoder.items(): if index != token_index: logger.warning( F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.''' " Please check that the vocabulary is not corrupted!" ) lowercase__ = token_index writer.write(token + "\n" ) index += 1 return (vocab_file,) def A__ ( self : Tuple, __lowercase : List[int], __lowercase : List[int] = None ): if token_ids_a is None: return [self.bos_token_id] + token_ids_a return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a def A__ ( self : List[str], __lowercase : List[int], __lowercase : Optional[List[int]] = None, __lowercase : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowercase, token_ids_a=__lowercase, already_has_special_tokens=__lowercase ) if token_ids_a is not None: return [1] + ([0] * len(__lowercase )) + [1] + ([0] * len(__lowercase )) return [1] + ([0] * len(__lowercase ))
37
import os import unittest from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class _snake_case ( lowercase__ , unittest.TestCase): UpperCamelCase__ : Dict =TransfoXLTokenizer UpperCamelCase__ : List[Any] =False UpperCamelCase__ : List[Any] =False def A__ ( self : Union[str, Any] ): super().setUp() lowercase__ = [ "<unk>", "[CLS]", "[SEP]", "want", "unwanted", "wa", "un", "running", ",", "low", "l", ] lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file, "w", encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) def A__ ( self : Union[str, Any], **__lowercase : Any ): lowercase__ = True return TransfoXLTokenizer.from_pretrained(self.tmpdirname, **__lowercase ) def A__ ( self : Tuple, __lowercase : Optional[int] ): lowercase__ = "<unk> UNwanted , running" lowercase__ = "<unk> unwanted, running" return input_text, output_text def A__ ( self : str ): lowercase__ = TransfoXLTokenizer(vocab_file=self.vocab_file, lower_case=__lowercase ) lowercase__ = tokenizer.tokenize("<unk> UNwanted , running" ) self.assertListEqual(__lowercase, ["<unk>", "unwanted", ",", "running"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ), [0, 4, 8, 7] ) def A__ ( self : Tuple ): lowercase__ = TransfoXLTokenizer(lower_case=__lowercase ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? " ), ["hello", "!", "how", "are", "you", "?"] ) def A__ ( self : Tuple ): lowercase__ = TransfoXLTokenizer(lower_case=__lowercase ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? " ), ["HeLLo", "!", "how", "Are", "yoU", "?"] ) def A__ ( self : str ): lowercase__ = TransfoXLTokenizer(lower_case=__lowercase ) lowercase__ = "Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?" lowercase__ = [ "Hello", "(", "bracket", ")", "and", "side", "@-@", "scrolled", "[", "and", "]", "Henry", "'s", "$", "5", "@,@", "000", "with", "3", "@.@", "34", "m", ".", "What", "'s", "up", "!", "?", ] self.assertListEqual(tokenizer.tokenize(__lowercase ), __lowercase ) self.assertEqual(tokenizer.convert_tokens_to_string(__lowercase ), __lowercase ) def A__ ( self : List[str] ): lowercase__ = self.get_tokenizer() lowercase__ = len(__lowercase ) tokenizer.add_tokens(["new1", "new2"] ) tokenizer.move_added_token("new1", 1 ) # Check that moved token is not copied (duplicate) self.assertEqual(len(__lowercase ), original_len + 2 ) # Check that token is moved to specified id self.assertEqual(tokenizer.encode("new1" ), [1] ) self.assertEqual(tokenizer.decode([1] ), "new1" )
37
1
import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch) # also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml # same for Vicuna-13b from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipImageProcessor, InstructBlipConfig, InstructBlipForConditionalGeneration, InstructBlipProcessor, InstructBlipQFormerConfig, InstructBlipVisionConfig, LlamaConfig, LlamaTokenizerFast, TaConfig, TaTokenizerFast, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def __lowerCAmelCase ( ): lowercase__ = "https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg" lowercase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ).convert("RGB" ) return image def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): lowercase__ = [] # fmt: off # vision encoder rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") ) rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") ) rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") ) rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") ) rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") ) rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) ) rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') ) # QFormer rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.embeddings.layernorm.weight") ) rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.embeddings.layernorm.bias") ) # fmt: on return rename_keys def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ = dct.pop(SCREAMING_SNAKE_CASE_ ) lowercase__ = val def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases lowercase__ = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' ) lowercase__ = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' ) # next, set bias in the state dict lowercase__ = torch.cat((q_bias, torch.zeros_like(SCREAMING_SNAKE_CASE_ , requires_grad=SCREAMING_SNAKE_CASE_ ), v_bias) ) lowercase__ = qkv_bias def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): lowercase__ = 364 if "coco" in model_name else 224 lowercase__ = InstructBlipVisionConfig(image_size=SCREAMING_SNAKE_CASE_ ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "t5-xl" in model_name: lowercase__ = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: lowercase__ = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict() elif "vicuna-7b" in model_name: lowercase__ = LlamaConfig.from_pretrained("decapoda-research/llama-7b-hf" , vocab_size=3_2001 ).to_dict() elif "vicuna-13b" in model_name: lowercase__ = LlamaConfig.from_pretrained("decapoda-research/llama-13b-hf" , vocab_size=3_2001 ).to_dict() else: raise ValueError("Model name not supported" ) # the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1 lowercase__ = InstructBlipQFormerConfig(vocab_size=3_0523 ).to_dict() lowercase__ = InstructBlipConfig(vision_config=SCREAMING_SNAKE_CASE_ , text_config=SCREAMING_SNAKE_CASE_ , qformer_config=SCREAMING_SNAKE_CASE_ ) return config, image_size @torch.no_grad() def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False ): lowercase__ = AutoTokenizer.from_pretrained("bert-base-uncased" , truncation_side="left" ) qformer_tokenizer.add_special_tokens({"bos_token": "[DEC]"} ) if "t5" in model_name: lowercase__ = TaTokenizerFast.from_pretrained("google/flan-t5-xl" , truncation_side="left" ) elif "vicuna" in model_name: # the following was used in the original implementation: # tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left") # tokenizer.add_special_tokens({"pad_token": "[PAD]"}) # tokenizer.add_special_tokens({"bos_token": "</s>"}) # tokenizer.add_special_tokens({"eos_token": "</s>"}) # tokenizer.add_special_tokens({"unk_token": "</s>"}) lowercase__ = LlamaTokenizerFast.from_pretrained( "huggyllama/llama-7b" , truncation_side="left" , bos_token="</s>" , unk_token="</s>" ) tokenizer.add_special_tokens({"pad_token": "[PAD]"} ) lowercase__ , lowercase__ = get_blipa_config(SCREAMING_SNAKE_CASE_ ) lowercase__ = InstructBlipForConditionalGeneration(SCREAMING_SNAKE_CASE_ ).eval() lowercase__ = { "instructblip-vicuna-7b": ("blip2_vicuna_instruct", "vicuna7b"), "instructblip-vicuna-13b": ("blip2_vicuna_instruct", "vicuna13b"), "instructblip-flan-t5-xl": ("blip2_t5_instruct", "flant5xl"), "instructblip-flan-t5-xxl": ("blip2_t5_instruct", "flant5xxl"), } lowercase__ , lowercase__ = model_name_to_original[model_name] # load original model print("Loading original model..." ) lowercase__ = "cuda:1" if torch.cuda.is_available() else "cpu" lowercase__ = "cuda:2" if torch.cuda.is_available() else "cpu" lowercase__ , lowercase__ , lowercase__ = load_model_and_preprocess( name=SCREAMING_SNAKE_CASE_ , model_type=SCREAMING_SNAKE_CASE_ , is_eval=SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ ) original_model.eval() print("Done!" ) # update state dict keys lowercase__ = original_model.state_dict() lowercase__ = create_rename_keys(SCREAMING_SNAKE_CASE_ ) for src, dest in rename_keys: rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): lowercase__ = state_dict.pop(SCREAMING_SNAKE_CASE_ ) if key.startswith("Qformer.bert" ): lowercase__ = key.replace("Qformer.bert" , "qformer" ) if "attention.self" in key: lowercase__ = key.replace("self" , "attention" ) if "llm_proj" in key: lowercase__ = key.replace("llm_proj" , "language_projection" ) if "t5_proj" in key: lowercase__ = key.replace("t5_proj" , "language_projection" ) if key.startswith("llm_model" ): lowercase__ = key.replace("llm_model" , "language_model" ) if key.startswith("t5" ): lowercase__ = key.replace("t5" , "language" ) lowercase__ = val # read in qv biases read_in_q_v_bias(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # note: weights get loaded in torch.float32 by default hf_model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ ) lowercase__ = load_demo_image() lowercase__ = "What is unusual about this image?" # create processor lowercase__ = BlipImageProcessor( size={"height": image_size, "width": image_size} , image_mean=SCREAMING_SNAKE_CASE_ , image_std=SCREAMING_SNAKE_CASE_ ) lowercase__ = InstructBlipProcessor( image_processor=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , qformer_tokenizer=SCREAMING_SNAKE_CASE_ , ) lowercase__ = processor(images=SCREAMING_SNAKE_CASE_ , text=SCREAMING_SNAKE_CASE_ , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE_ ) # make sure processor creates exact same pixel values lowercase__ = vis_processors["eval"](SCREAMING_SNAKE_CASE_ ).unsqueeze(0 ).to(SCREAMING_SNAKE_CASE_ ) lowercase__ = inputs.pixel_values assert torch.allclose(original_pixel_values.to(pixel_values.device ) , SCREAMING_SNAKE_CASE_ ) original_model.to(SCREAMING_SNAKE_CASE_ ) hf_model.to(SCREAMING_SNAKE_CASE_ ) with torch.no_grad(): if "vicuna" in model_name: lowercase__ = original_model({"image": original_pixel_values, "text_input": [prompt]} ).logits lowercase__ = hf_model(**SCREAMING_SNAKE_CASE_ ).logits else: lowercase__ = original_model( {"image": original_pixel_values, "text_input": [prompt], "text_output": ["\n"]} ).logits lowercase__ = tokenizer("\n" , return_tensors="pt" ).input_ids.to(SCREAMING_SNAKE_CASE_ ) lowercase__ = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 ) lowercase__ = hf_model(**SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ).logits print("First values of original logits:" , original_logits[0, :3, :3] ) print("First values of HF logits:" , logits[0, :3, :3] ) # assert values assert original_logits.shape == logits.shape lowercase__ = 1e-4 if "vicuna" in model_name else 1e-5 assert torch.allclose(original_logits.to(logits.device ) , SCREAMING_SNAKE_CASE_ , atol=SCREAMING_SNAKE_CASE_ ) print("Looks ok!" ) print("Generating with original model..." ) lowercase__ = original_model.generate({"image": original_pixel_values, "prompt": prompt} , num_beams=5 ) # important: we need to cast the weights of the HF model to the appropriate type print("Generating with HF model..." ) lowercase__ = hf_model.generate( **SCREAMING_SNAKE_CASE_ , do_sample=SCREAMING_SNAKE_CASE_ , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , ) if "vicuna" in model_name: # convert output id 0 to 2 (eos_token_id) # TODO add this in the generate method? lowercase__ = 2 print("Original generation:" , SCREAMING_SNAKE_CASE_ ) lowercase__ = processor.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) lowercase__ = [text.strip() for text in output_text] print("HF generation:" , SCREAMING_SNAKE_CASE_ ) if pytorch_dump_folder_path is not None: processor.save_pretrained(SCREAMING_SNAKE_CASE_ ) hf_model.save_pretrained(SCREAMING_SNAKE_CASE_ ) if push_to_hub: processor.push_to_hub(f'''Salesforce/{model_name}''' ) hf_model.push_to_hub(f'''Salesforce/{model_name}''' ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() lowercase_ = [ """instructblip-vicuna-7b""", """instructblip-vicuna-13b""", """instructblip-flan-t5-xl""", """instructblip-flan-t5-xxl""", ] parser.add_argument( """--model_name""", default="""instructblip-flan-t5-xl""", choices=choices, type=str, help="""Path to hf config.json of model to convert""", ) parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to push the model and processor to the hub after converting""", ) lowercase_ = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
37
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def __lowerCAmelCase ( ): lowercase__ = HfArgumentParser(SCREAMING_SNAKE_CASE_ ) lowercase__ = parser.parse_args_into_dataclasses()[0] lowercase__ = TensorFlowBenchmark(args=SCREAMING_SNAKE_CASE_ ) try: lowercase__ = parser.parse_args_into_dataclasses()[0] except ValueError as e: lowercase__ = "Arg --no_{0} is no longer used, please use --no-{0} instead." lowercase__ = " ".join(str(SCREAMING_SNAKE_CASE_ ).split(" " )[:-1] ) lowercase__ = "" lowercase__ = eval(str(SCREAMING_SNAKE_CASE_ ).split(" " )[-1] ) lowercase__ = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(SCREAMING_SNAKE_CASE_ ) if len(SCREAMING_SNAKE_CASE_ ) > 0: lowercase__ = full_error_msg + begin_error_msg + str(SCREAMING_SNAKE_CASE_ ) raise ValueError(SCREAMING_SNAKE_CASE_ ) benchmark.run() if __name__ == "__main__": main()
37
1
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ = word.split() def justify(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str: lowercase__ = max_width - width lowercase__ = len(SCREAMING_SNAKE_CASE_ ) if len(SCREAMING_SNAKE_CASE_ ) == 1: # if there is only word in line # just insert overall_spaces_count for the remainder of line return line[0] + " " * overall_spaces_count else: lowercase__ = words_count - 1 # num_spaces_between_words_list[i] : tells you to insert # num_spaces_between_words_list[i] spaces # after word on line[i] lowercase__ = spaces_to_insert_between_words * [ overall_spaces_count // spaces_to_insert_between_words ] lowercase__ = ( overall_spaces_count % spaces_to_insert_between_words ) # distribute spaces via round robin to the left words for i in range(SCREAMING_SNAKE_CASE_ ): num_spaces_between_words_list[i] += 1 lowercase__ = [] for i in range(SCREAMING_SNAKE_CASE_ ): # add the word aligned_words_list.append(line[i] ) # add the spaces to insert aligned_words_list.append(num_spaces_between_words_list[i] * " " ) # just add the last word to the sentence aligned_words_list.append(line[-1] ) # join the aligned words list to form a justified line return "".join(SCREAMING_SNAKE_CASE_ ) lowercase__ = [] lowercase__ = [] lowercase__ = 0 for word in words: if width + len(SCREAMING_SNAKE_CASE_ ) + len(SCREAMING_SNAKE_CASE_ ) <= max_width: # keep adding words until we can fill out max_width # width = sum of length of all words (without overall_spaces_count) # len(word) = length of current word # len(line) = number of overall_spaces_count to insert between words line.append(SCREAMING_SNAKE_CASE_ ) width += len(SCREAMING_SNAKE_CASE_ ) else: # justify the line and add it to result answer.append(justify(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) # reset new line and new width lowercase__ , lowercase__ = [word], len(SCREAMING_SNAKE_CASE_ ) lowercase__ = max_width - width - len(SCREAMING_SNAKE_CASE_ ) answer.append(" ".join(SCREAMING_SNAKE_CASE_ ) + (remaining_spaces + 1) * " " ) return answer if __name__ == "__main__": from doctest import testmod testmod()
37
import os import re import shutil from argparse import ArgumentParser, Namespace from datasets.commands import BaseDatasetsCLICommand from datasets.utils.logging import get_logger lowercase_ = """<<<<<<< This should probably be modified because it mentions: """ lowercase_ = """======= >>>>>>> """ lowercase_ = [ """TextEncoderConfig""", """ByteTextEncoder""", """SubwordTextEncoder""", """encoder_config""", """maybe_build_from_corpus""", """manual_dir""", ] lowercase_ = [ # (pattern, replacement) # Order is important here for some replacements (r"""tfds\.core""", r"""datasets"""), (r"""tf\.io\.gfile\.GFile""", r"""open"""), (r"""tf\.([\w\d]+)""", r"""datasets.Value('\1')"""), (r"""tfds\.features\.Text\(\)""", r"""datasets.Value('string')"""), (r"""tfds\.features\.Text\(""", r"""datasets.Value('string'),"""), (r"""features\s*=\s*tfds.features.FeaturesDict\(""", r"""features=datasets.Features("""), (r"""tfds\.features\.FeaturesDict\(""", r"""dict("""), (r"""The TensorFlow Datasets Authors""", r"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""), (r"""tfds\.""", r"""datasets."""), (r"""dl_manager\.manual_dir""", r"""self.config.data_dir"""), (r"""self\.builder_config""", r"""self.config"""), ] def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): return ConvertCommand(args.tfds_path , args.datasets_directory ) class _snake_case ( lowercase__): @staticmethod def A__ ( __lowercase : ArgumentParser ): lowercase__ = parser.add_parser( "convert", help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.", ) train_parser.add_argument( "--tfds_path", type=__lowercase, required=__lowercase, help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.", ) train_parser.add_argument( "--datasets_directory", type=__lowercase, required=__lowercase, help="Path to the HuggingFace Datasets folder." ) train_parser.set_defaults(func=__lowercase ) def __init__( self : Tuple, __lowercase : str, __lowercase : str, *__lowercase : Tuple ): lowercase__ = get_logger("datasets-cli/converting" ) lowercase__ = tfds_path lowercase__ = datasets_directory def A__ ( self : Any ): if os.path.isdir(self._tfds_path ): lowercase__ = os.path.abspath(self._tfds_path ) elif os.path.isfile(self._tfds_path ): lowercase__ = os.path.dirname(self._tfds_path ) else: raise ValueError("--tfds_path is neither a directory nor a file. Please check path." ) lowercase__ = os.path.abspath(self._datasets_directory ) self._logger.info(F'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' ) lowercase__ = [] lowercase__ = [] lowercase__ = {} if os.path.isdir(self._tfds_path ): lowercase__ = os.listdir(__lowercase ) else: lowercase__ = [os.path.basename(self._tfds_path )] for f_name in file_names: self._logger.info(F'''Looking at file {f_name}''' ) lowercase__ = os.path.join(__lowercase, __lowercase ) lowercase__ = os.path.join(__lowercase, __lowercase ) if not os.path.isfile(__lowercase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name: self._logger.info("Skipping file" ) continue with open(__lowercase, encoding="utf-8" ) as f: lowercase__ = f.readlines() lowercase__ = [] lowercase__ = False lowercase__ = False lowercase__ = [] for line in lines: lowercase__ = line # Convert imports if "import tensorflow.compat.v2 as tf" in out_line: continue elif "@tfds.core" in out_line: continue elif "builder=self" in out_line: continue elif "import tensorflow_datasets.public_api as tfds" in out_line: lowercase__ = "import datasets\n" elif "import tensorflow" in out_line: # order is important here lowercase__ = "" continue elif "from absl import logging" in out_line: lowercase__ = "from datasets import logging\n" elif "getLogger" in out_line: lowercase__ = out_line.replace("getLogger", "get_logger" ) elif any(expression in out_line for expression in TO_HIGHLIGHT ): lowercase__ = True lowercase__ = list(filter(lambda __lowercase : e in out_line, __lowercase ) ) out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(__lowercase ) + "\n" ) out_lines.append(__lowercase ) out_lines.append(__lowercase ) continue else: for pattern, replacement in TO_CONVERT: lowercase__ = re.sub(__lowercase, __lowercase, __lowercase ) # Take care of saving utilities (to later move them together with main script) if "tensorflow_datasets" in out_line: lowercase__ = re.match(R"from\stensorflow_datasets.*import\s([^\.\r\n]+)", __lowercase ) tfds_imports.extend(imp.strip() for imp in match.group(1 ).split("," ) ) lowercase__ = "from . import " + match.group(1 ) # Check we have not forget anything if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line: raise ValueError(F'''Error converting {out_line.strip()}''' ) if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line: lowercase__ = True out_lines.append(__lowercase ) if is_builder or "wmt" in f_name: # We create a new directory for each dataset lowercase__ = f_name.replace(".py", "" ) lowercase__ = os.path.join(__lowercase, __lowercase ) lowercase__ = os.path.join(__lowercase, __lowercase ) os.makedirs(__lowercase, exist_ok=__lowercase ) self._logger.info(F'''Adding directory {output_dir}''' ) imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} ) else: # Utilities will be moved at the end utils_files.append(__lowercase ) if needs_manual_update: with_manual_update.append(__lowercase ) with open(__lowercase, "w", encoding="utf-8" ) as f: f.writelines(__lowercase ) self._logger.info(F'''Converted in {output_file}''' ) for utils_file in utils_files: try: lowercase__ = os.path.basename(__lowercase ) lowercase__ = imports_to_builder_map[f_name.replace(".py", "" )] self._logger.info(F'''Moving {dest_folder} to {utils_file}''' ) shutil.copy(__lowercase, __lowercase ) except KeyError: self._logger.error(F'''Cannot find destination folder for {utils_file}. Please copy manually.''' ) if with_manual_update: for file_path in with_manual_update: self._logger.warning( F'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
37
1
import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class _snake_case ( lowercase__): UpperCamelCase__ : List[Any] =(PNDMScheduler,) UpperCamelCase__ : List[str] =(("""num_inference_steps""", 5_0),) def A__ ( self : Dict, **__lowercase : Dict ): lowercase__ = { "num_train_timesteps": 1000, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", } config.update(**__lowercase ) return config def A__ ( self : Optional[int], __lowercase : Optional[Any]=0, **__lowercase : Optional[int] ): lowercase__ = dict(self.forward_default_kwargs ) lowercase__ = kwargs.pop("num_inference_steps", __lowercase ) lowercase__ = self.dummy_sample lowercase__ = 0.1 * sample lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: lowercase__ = self.get_scheduler_config(**__lowercase ) lowercase__ = scheduler_class(**__lowercase ) scheduler.set_timesteps(__lowercase ) # copy over dummy past residuals lowercase__ = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__lowercase ) lowercase__ = scheduler_class.from_pretrained(__lowercase ) new_scheduler.set_timesteps(__lowercase ) # copy over dummy past residuals lowercase__ = dummy_past_residuals[:] lowercase__ = scheduler.step_prk(__lowercase, __lowercase, __lowercase, **__lowercase ).prev_sample lowercase__ = new_scheduler.step_prk(__lowercase, __lowercase, __lowercase, **__lowercase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" lowercase__ = scheduler.step_plms(__lowercase, __lowercase, __lowercase, **__lowercase ).prev_sample lowercase__ = new_scheduler.step_plms(__lowercase, __lowercase, __lowercase, **__lowercase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def A__ ( self : Optional[Any] ): pass def A__ ( self : Union[str, Any], __lowercase : List[str]=0, **__lowercase : Any ): lowercase__ = dict(self.forward_default_kwargs ) lowercase__ = kwargs.pop("num_inference_steps", __lowercase ) lowercase__ = self.dummy_sample lowercase__ = 0.1 * sample lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: lowercase__ = self.get_scheduler_config() lowercase__ = scheduler_class(**__lowercase ) scheduler.set_timesteps(__lowercase ) # copy over dummy past residuals (must be after setting timesteps) lowercase__ = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__lowercase ) lowercase__ = scheduler_class.from_pretrained(__lowercase ) # copy over dummy past residuals new_scheduler.set_timesteps(__lowercase ) # copy over dummy past residual (must be after setting timesteps) lowercase__ = dummy_past_residuals[:] lowercase__ = scheduler.step_prk(__lowercase, __lowercase, __lowercase, **__lowercase ).prev_sample lowercase__ = new_scheduler.step_prk(__lowercase, __lowercase, __lowercase, **__lowercase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" lowercase__ = scheduler.step_plms(__lowercase, __lowercase, __lowercase, **__lowercase ).prev_sample lowercase__ = new_scheduler.step_plms(__lowercase, __lowercase, __lowercase, **__lowercase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def A__ ( self : Dict, **__lowercase : Any ): lowercase__ = self.scheduler_classes[0] lowercase__ = self.get_scheduler_config(**__lowercase ) lowercase__ = scheduler_class(**__lowercase ) lowercase__ = 10 lowercase__ = self.dummy_model() lowercase__ = self.dummy_sample_deter scheduler.set_timesteps(__lowercase ) for i, t in enumerate(scheduler.prk_timesteps ): lowercase__ = model(__lowercase, __lowercase ) lowercase__ = scheduler.step_prk(__lowercase, __lowercase, __lowercase ).prev_sample for i, t in enumerate(scheduler.plms_timesteps ): lowercase__ = model(__lowercase, __lowercase ) lowercase__ = scheduler.step_plms(__lowercase, __lowercase, __lowercase ).prev_sample return sample def A__ ( self : int ): lowercase__ = dict(self.forward_default_kwargs ) lowercase__ = kwargs.pop("num_inference_steps", __lowercase ) for scheduler_class in self.scheduler_classes: lowercase__ = self.get_scheduler_config() lowercase__ = scheduler_class(**__lowercase ) lowercase__ = self.dummy_sample lowercase__ = 0.1 * sample if num_inference_steps is not None and hasattr(__lowercase, "set_timesteps" ): scheduler.set_timesteps(__lowercase ) elif num_inference_steps is not None and not hasattr(__lowercase, "set_timesteps" ): lowercase__ = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] lowercase__ = dummy_past_residuals[:] lowercase__ = scheduler.step_prk(__lowercase, 0, __lowercase, **__lowercase ).prev_sample lowercase__ = scheduler.step_prk(__lowercase, 1, __lowercase, **__lowercase ).prev_sample self.assertEqual(output_a.shape, sample.shape ) self.assertEqual(output_a.shape, output_a.shape ) lowercase__ = scheduler.step_plms(__lowercase, 0, __lowercase, **__lowercase ).prev_sample lowercase__ = scheduler.step_plms(__lowercase, 1, __lowercase, **__lowercase ).prev_sample self.assertEqual(output_a.shape, sample.shape ) self.assertEqual(output_a.shape, output_a.shape ) def A__ ( self : str ): for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=__lowercase ) def A__ ( self : List[str] ): for steps_offset in [0, 1]: self.check_over_configs(steps_offset=__lowercase ) lowercase__ = self.scheduler_classes[0] lowercase__ = self.get_scheduler_config(steps_offset=1 ) lowercase__ = scheduler_class(**__lowercase ) scheduler.set_timesteps(10 ) assert torch.equal( scheduler.timesteps, torch.LongTensor( [901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ), ) def A__ ( self : str ): for beta_start, beta_end in zip([0.0001, 0.001], [0.002, 0.02] ): self.check_over_configs(beta_start=__lowercase, beta_end=__lowercase ) def A__ ( self : Tuple ): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=__lowercase ) def A__ ( self : Union[str, Any] ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__lowercase ) def A__ ( self : List[Any] ): for t in [1, 5, 10]: self.check_over_forward(time_step=__lowercase ) def A__ ( self : int ): for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100] ): self.check_over_forward(num_inference_steps=__lowercase ) def A__ ( self : int ): # earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3 lowercase__ = 27 for scheduler_class in self.scheduler_classes: lowercase__ = self.dummy_sample lowercase__ = 0.1 * sample lowercase__ = self.get_scheduler_config() lowercase__ = scheduler_class(**__lowercase ) scheduler.set_timesteps(__lowercase ) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2] ): lowercase__ = scheduler.step_prk(__lowercase, __lowercase, __lowercase ).prev_sample def A__ ( self : List[str] ): with self.assertRaises(__lowercase ): lowercase__ = self.scheduler_classes[0] lowercase__ = self.get_scheduler_config() lowercase__ = scheduler_class(**__lowercase ) scheduler.step_plms(self.dummy_sample, 1, self.dummy_sample ).prev_sample def A__ ( self : List[str] ): lowercase__ = self.full_loop() lowercase__ = torch.sum(torch.abs(__lowercase ) ) lowercase__ = torch.mean(torch.abs(__lowercase ) ) assert abs(result_sum.item() - 198.1318 ) < 1e-2 assert abs(result_mean.item() - 0.2580 ) < 1e-3 def A__ ( self : Any ): lowercase__ = self.full_loop(prediction_type="v_prediction" ) lowercase__ = torch.sum(torch.abs(__lowercase ) ) lowercase__ = torch.mean(torch.abs(__lowercase ) ) assert abs(result_sum.item() - 67.3986 ) < 1e-2 assert abs(result_mean.item() - 0.0878 ) < 1e-3 def A__ ( self : Optional[int] ): # We specify different beta, so that the first alpha is 0.99 lowercase__ = self.full_loop(set_alpha_to_one=__lowercase, beta_start=0.01 ) lowercase__ = torch.sum(torch.abs(__lowercase ) ) lowercase__ = torch.mean(torch.abs(__lowercase ) ) assert abs(result_sum.item() - 230.0399 ) < 1e-2 assert abs(result_mean.item() - 0.2995 ) < 1e-3 def A__ ( self : List[str] ): # We specify different beta, so that the first alpha is 0.99 lowercase__ = self.full_loop(set_alpha_to_one=__lowercase, beta_start=0.01 ) lowercase__ = torch.sum(torch.abs(__lowercase ) ) lowercase__ = torch.mean(torch.abs(__lowercase ) ) assert abs(result_sum.item() - 186.9482 ) < 1e-2 assert abs(result_mean.item() - 0.2434 ) < 1e-3
37
import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer lowercase_ = logging.get_logger(__name__) lowercase_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} lowercase_ = { """vocab_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""", }, """merges_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""", }, """tokenizer_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""", }, } lowercase_ = { """allenai/led-base-16384""": 1_6384, } class _snake_case ( lowercase__): UpperCamelCase__ : int =VOCAB_FILES_NAMES UpperCamelCase__ : Any =PRETRAINED_VOCAB_FILES_MAP UpperCamelCase__ : Dict =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase__ : List[Any] =LEDTokenizer UpperCamelCase__ : Tuple =["""input_ids""", """attention_mask"""] def __init__( self : Optional[Any], __lowercase : Optional[Any]=None, __lowercase : Dict=None, __lowercase : Tuple=None, __lowercase : Union[str, Any]="replace", __lowercase : Tuple="<s>", __lowercase : Optional[Any]="</s>", __lowercase : Tuple="</s>", __lowercase : List[str]="<s>", __lowercase : Tuple="<unk>", __lowercase : Dict="<pad>", __lowercase : Dict="<mask>", __lowercase : Any=False, __lowercase : Any=True, **__lowercase : List[Any], ): super().__init__( __lowercase, __lowercase, tokenizer_file=__lowercase, errors=__lowercase, bos_token=__lowercase, eos_token=__lowercase, sep_token=__lowercase, cls_token=__lowercase, unk_token=__lowercase, pad_token=__lowercase, mask_token=__lowercase, add_prefix_space=__lowercase, trim_offsets=__lowercase, **__lowercase, ) lowercase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space", __lowercase ) != add_prefix_space: lowercase__ = getattr(__lowercase, pre_tok_state.pop("type" ) ) lowercase__ = add_prefix_space lowercase__ = pre_tok_class(**__lowercase ) lowercase__ = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` lowercase__ = "post_processor" lowercase__ = getattr(self.backend_tokenizer, __lowercase, __lowercase ) if tokenizer_component_instance: lowercase__ = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: lowercase__ = tuple(state["sep"] ) if "cls" in state: lowercase__ = tuple(state["cls"] ) lowercase__ = False if state.get("add_prefix_space", __lowercase ) != add_prefix_space: lowercase__ = add_prefix_space lowercase__ = True if state.get("trim_offsets", __lowercase ) != trim_offsets: lowercase__ = trim_offsets lowercase__ = True if changes_to_apply: lowercase__ = getattr(__lowercase, state.pop("type" ) ) lowercase__ = component_class(**__lowercase ) setattr(self.backend_tokenizer, __lowercase, __lowercase ) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def A__ ( self : str ): if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def A__ ( self : Optional[int], __lowercase : Dict ): lowercase__ = AddedToken(__lowercase, lstrip=__lowercase, rstrip=__lowercase ) if isinstance(__lowercase, __lowercase ) else value lowercase__ = value def A__ ( self : Any, *__lowercase : List[Any], **__lowercase : Optional[Any] ): lowercase__ = kwargs.get("is_split_into_words", __lowercase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*__lowercase, **__lowercase ) def A__ ( self : int, *__lowercase : Union[str, Any], **__lowercase : List[str] ): lowercase__ = kwargs.get("is_split_into_words", __lowercase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*__lowercase, **__lowercase ) def A__ ( self : Optional[Any], __lowercase : str, __lowercase : Optional[str] = None ): lowercase__ = self._tokenizer.model.save(__lowercase, name=__lowercase ) return tuple(__lowercase ) def A__ ( self : List[str], __lowercase : int, __lowercase : Optional[int]=None ): lowercase__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def A__ ( self : int, __lowercase : List[int], __lowercase : Optional[List[int]] = None ): lowercase__ = [self.sep_token_id] lowercase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def A__ ( self : Union[str, Any], __lowercase : Union[Dict[str, EncodedInput], BatchEncoding], __lowercase : Optional[int] = None, __lowercase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD, __lowercase : Optional[int] = None, __lowercase : Optional[bool] = None, ): lowercase__ = super()._pad( encoded_inputs=__lowercase, max_length=__lowercase, padding_strategy=__lowercase, pad_to_multiple_of=__lowercase, return_attention_mask=__lowercase, ) # Load from model defaults if return_attention_mask is None: lowercase__ = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: lowercase__ = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. lowercase__ = len(encoded_inputs["global_attention_mask"] ) != len(__lowercase ) if needs_to_be_padded: lowercase__ = len(__lowercase ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` lowercase__ = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": lowercase__ = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
37
1
import os import tempfile import unittest import uuid from pathlib import Path from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available if is_torch_available(): import torch if is_soundfile_availble(): import soundfile as sf if is_vision_available(): from PIL import Image def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_="" ): lowercase__ = tempfile.mkdtemp() return os.path.join(SCREAMING_SNAKE_CASE_ , str(uuid.uuida() ) + suffix ) @require_soundfile @require_torch class _snake_case ( unittest.TestCase): def A__ ( self : List[str] ): lowercase__ = torch.rand(12, dtype=torch.floataa ) - 0.5 lowercase__ = AgentAudio(__lowercase ) lowercase__ = str(agent_type.to_string() ) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(__lowercase, agent_type.to_raw(), atol=1e-4 ) ) del agent_type # Ensure the path remains even after the object deletion self.assertTrue(os.path.exists(__lowercase ) ) # Ensure that the file contains the same value as the original tensor lowercase__ , lowercase__ = sf.read(__lowercase ) self.assertTrue(torch.allclose(__lowercase, torch.tensor(__lowercase ), atol=1e-4 ) ) def A__ ( self : str ): lowercase__ = torch.rand(12, dtype=torch.floataa ) - 0.5 lowercase__ = get_new_path(suffix=".wav" ) sf.write(__lowercase, __lowercase, 1_6000 ) lowercase__ = AgentAudio(__lowercase ) self.assertTrue(torch.allclose(__lowercase, agent_type.to_raw(), atol=1e-4 ) ) self.assertEqual(agent_type.to_string(), __lowercase ) @require_vision @require_torch class _snake_case ( unittest.TestCase): def A__ ( self : Optional[int] ): lowercase__ = torch.randint(0, 256, (64, 64, 3) ) lowercase__ = AgentImage(__lowercase ) lowercase__ = str(agent_type.to_string() ) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(__lowercase, agent_type._tensor, atol=1e-4 ) ) self.assertIsInstance(agent_type.to_raw(), Image.Image ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(__lowercase ) ) def A__ ( self : Dict ): lowercase__ = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" lowercase__ = Image.open(__lowercase ) lowercase__ = AgentImage(__lowercase ) self.assertTrue(path.samefile(agent_type.to_string() ) ) self.assertTrue(image == agent_type.to_raw() ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(__lowercase ) ) def A__ ( self : Optional[int] ): lowercase__ = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" lowercase__ = Image.open(__lowercase ) lowercase__ = AgentImage(__lowercase ) self.assertFalse(path.samefile(agent_type.to_string() ) ) self.assertTrue(image == agent_type.to_raw() ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(__lowercase ) ) class _snake_case ( unittest.TestCase): def A__ ( self : Union[str, Any] ): lowercase__ = "Hey!" lowercase__ = AgentText(__lowercase ) self.assertEqual(__lowercase, agent_type.to_string() ) self.assertEqual(__lowercase, agent_type.to_raw() ) self.assertEqual(__lowercase, __lowercase )
37
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def __lowerCAmelCase ( ): lowercase__ = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=SCREAMING_SNAKE_CASE_ ) lowercase__ = parser.add_subparsers(help="accelerate command helpers" ) # Register commands get_config_parser(subparsers=SCREAMING_SNAKE_CASE_ ) env_command_parser(subparsers=SCREAMING_SNAKE_CASE_ ) launch_command_parser(subparsers=SCREAMING_SNAKE_CASE_ ) tpu_command_parser(subparsers=SCREAMING_SNAKE_CASE_ ) test_command_parser(subparsers=SCREAMING_SNAKE_CASE_ ) # Let's go lowercase__ = parser.parse_args() if not hasattr(SCREAMING_SNAKE_CASE_ , "func" ): parser.print_help() exit(1 ) # Run args.func(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": main()
37
1
import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class _snake_case ( lowercase__): def A__ ( self : Optional[Any], __lowercase : str ): with open(__lowercase, encoding="utf-8" ) as input_file: lowercase__ = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" ) lowercase__ = input_file.read() lowercase__ = regexp.search(__lowercase ) return match def A__ ( self : str, __lowercase : str ): with open(__lowercase, encoding="utf-8" ) as input_file: lowercase__ = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()", re.DOTALL ) lowercase__ = input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` lowercase__ = regexp.finditer(__lowercase ) lowercase__ = [match for match in matches if match is not None and match.group(1 ) is not None] return matches[0] if matches else None def A__ ( self : Union[str, Any] ): lowercase__ = Path("./datasets" ) lowercase__ = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_encoding_on_file_open(str(__lowercase ) ): raise AssertionError(F'''open(...) must use utf-8 encoding in {dataset}''' ) def A__ ( self : Union[str, Any] ): lowercase__ = Path("./datasets" ) lowercase__ = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_print_statements(str(__lowercase ) ): raise AssertionError(F'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
37
import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class _snake_case ( unittest.TestCase): def __init__( self : Dict, __lowercase : int, __lowercase : Union[str, Any]=7, __lowercase : Union[str, Any]=3, __lowercase : Any=18, __lowercase : Union[str, Any]=30, __lowercase : Any=400, __lowercase : List[str]=True, __lowercase : Dict=None, __lowercase : List[str]=True, __lowercase : int=False, __lowercase : Union[str, Any]=True, __lowercase : str=True, __lowercase : Optional[int]=[0.5, 0.5, 0.5], __lowercase : List[Any]=[0.5, 0.5, 0.5], ): lowercase__ = parent lowercase__ = batch_size lowercase__ = num_channels lowercase__ = image_size lowercase__ = min_resolution lowercase__ = max_resolution lowercase__ = do_resize lowercase__ = size if size is not None else {"height": 18, "width": 20} lowercase__ = do_thumbnail lowercase__ = do_align_axis lowercase__ = do_pad lowercase__ = do_normalize lowercase__ = image_mean lowercase__ = image_std def A__ ( self : Optional[Any] ): return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class _snake_case ( lowercase__ , unittest.TestCase): UpperCamelCase__ : Optional[int] =DonutImageProcessor if is_vision_available() else None def A__ ( self : str ): lowercase__ = DonutImageProcessingTester(self ) @property def A__ ( self : List[str] ): return self.image_processor_tester.prepare_image_processor_dict() def A__ ( self : Optional[Any] ): lowercase__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowercase, "do_resize" ) ) self.assertTrue(hasattr(__lowercase, "size" ) ) self.assertTrue(hasattr(__lowercase, "do_thumbnail" ) ) self.assertTrue(hasattr(__lowercase, "do_align_long_axis" ) ) self.assertTrue(hasattr(__lowercase, "do_pad" ) ) self.assertTrue(hasattr(__lowercase, "do_normalize" ) ) self.assertTrue(hasattr(__lowercase, "image_mean" ) ) self.assertTrue(hasattr(__lowercase, "image_std" ) ) def A__ ( self : str ): lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size, {"height": 18, "width": 20} ) lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict, size=42 ) self.assertEqual(image_processor.size, {"height": 42, "width": 42} ) # Previous config had dimensions in (width, height) order lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict, size=(42, 84) ) self.assertEqual(image_processor.size, {"height": 84, "width": 42} ) def A__ ( self : List[str] ): pass @is_flaky() def A__ ( self : Dict ): # Initialize image_processing lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase, Image.Image ) # Test not batched input lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) # Test batched lowercase__ = image_processing(__lowercase, return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) @is_flaky() def A__ ( self : Optional[Any] ): # Initialize image_processing lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, numpify=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase, np.ndarray ) # Test not batched input lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) # Test batched lowercase__ = image_processing(__lowercase, return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) @is_flaky() def A__ ( self : Tuple ): # Initialize image_processing lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, torchify=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase, torch.Tensor ) # Test not batched input lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) # Test batched lowercase__ = image_processing(__lowercase, return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), )
37
1
from __future__ import annotations def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None ): if start is None: lowercase__ = 0 if end is None: lowercase__ = len(SCREAMING_SNAKE_CASE_ ) - 1 if start >= end: return lowercase__ = (start + end) // 2 slowsort(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) slowsort(SCREAMING_SNAKE_CASE_ , mid + 1 , SCREAMING_SNAKE_CASE_ ) if sequence[end] < sequence[mid]: lowercase__ , lowercase__ = sequence[mid], sequence[end] slowsort(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
37
import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class _snake_case ( lowercase__): def A__ ( self : Optional[Any], __lowercase : str ): with open(__lowercase, encoding="utf-8" ) as input_file: lowercase__ = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" ) lowercase__ = input_file.read() lowercase__ = regexp.search(__lowercase ) return match def A__ ( self : str, __lowercase : str ): with open(__lowercase, encoding="utf-8" ) as input_file: lowercase__ = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()", re.DOTALL ) lowercase__ = input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` lowercase__ = regexp.finditer(__lowercase ) lowercase__ = [match for match in matches if match is not None and match.group(1 ) is not None] return matches[0] if matches else None def A__ ( self : Union[str, Any] ): lowercase__ = Path("./datasets" ) lowercase__ = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_encoding_on_file_open(str(__lowercase ) ): raise AssertionError(F'''open(...) must use utf-8 encoding in {dataset}''' ) def A__ ( self : Union[str, Any] ): lowercase__ = Path("./datasets" ) lowercase__ = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_print_statements(str(__lowercase ) ): raise AssertionError(F'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
37
1
import dataclasses import json import warnings from dataclasses import dataclass, field from time import time from typing import List from ..utils import logging lowercase_ = logging.get_logger(__name__) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None ): return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE_ ) @dataclass class _snake_case : UpperCamelCase__ : List[str] =list_field( default=[] , metadata={ """help""": ( """Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version""" """ of all available models""" ) } , ) UpperCamelCase__ : List[int] =list_field( default=[8] , metadata={"""help""": """List of batch sizes for which memory and time performance will be evaluated"""}) UpperCamelCase__ : List[int] =list_field( default=[8, 3_2, 1_2_8, 5_1_2] , metadata={"""help""": """List of sequence lengths for which memory and time performance will be evaluated"""} , ) UpperCamelCase__ : bool =field( default=lowercase__ , metadata={"""help""": """Whether to benchmark inference of model. Inference can be disabled via --no-inference."""} , ) UpperCamelCase__ : bool =field( default=lowercase__ , metadata={"""help""": """Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."""} , ) UpperCamelCase__ : bool =field( default=lowercase__ , metadata={"""help""": """Whether to run on available tpu devices. TPU can be disabled via --no-tpu."""}) UpperCamelCase__ : bool =field(default=lowercase__ , metadata={"""help""": """Use FP16 to accelerate inference."""}) UpperCamelCase__ : bool =field(default=lowercase__ , metadata={"""help""": """Benchmark training of model"""}) UpperCamelCase__ : bool =field(default=lowercase__ , metadata={"""help""": """Verbose memory tracing"""}) UpperCamelCase__ : bool =field( default=lowercase__ , metadata={"""help""": """Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."""} , ) UpperCamelCase__ : bool =field( default=lowercase__ , metadata={ """help""": """Whether to perform memory measurements. Memory measurements can be disabled via --no-memory""" } , ) UpperCamelCase__ : bool =field(default=lowercase__ , metadata={"""help""": """Trace memory line by line"""}) UpperCamelCase__ : bool =field(default=lowercase__ , metadata={"""help""": """Save result to a CSV file"""}) UpperCamelCase__ : bool =field(default=lowercase__ , metadata={"""help""": """Save all print statements in a log file"""}) UpperCamelCase__ : bool =field(default=lowercase__ , metadata={"""help""": """Whether to print environment information"""}) UpperCamelCase__ : bool =field( default=lowercase__ , metadata={ """help""": ( """Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use""" """ multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled""" """ for debugging / testing and on TPU.""" ) } , ) UpperCamelCase__ : str =field( default=F"""inference_time_{round(time())}.csv""" , metadata={"""help""": """CSV filename used if saving time results to csv."""} , ) UpperCamelCase__ : str =field( default=F"""inference_memory_{round(time())}.csv""" , metadata={"""help""": """CSV filename used if saving memory results to csv."""} , ) UpperCamelCase__ : str =field( default=F"""train_time_{round(time())}.csv""" , metadata={"""help""": """CSV filename used if saving time results to csv for training."""} , ) UpperCamelCase__ : str =field( default=F"""train_memory_{round(time())}.csv""" , metadata={"""help""": """CSV filename used if saving memory results to csv for training."""} , ) UpperCamelCase__ : str =field( default=F"""env_info_{round(time())}.csv""" , metadata={"""help""": """CSV filename used if saving environment information."""} , ) UpperCamelCase__ : str =field( default=F"""log_{round(time())}.csv""" , metadata={"""help""": """Log filename used if print statements are saved in log."""} , ) UpperCamelCase__ : int =field(default=3 , metadata={"""help""": """Times an experiment will be run."""}) UpperCamelCase__ : bool =field( default=lowercase__ , metadata={ """help""": ( """Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain""" """ model weights.""" ) } , ) def A__ ( self : Optional[int] ): warnings.warn( F'''The class {self.__class__} is deprecated. Hugging Face Benchmarking utils''' " are deprecated in general and it is advised to use external Benchmarking libraries " " to benchmark Transformer models.", __lowercase, ) def A__ ( self : Any ): return json.dumps(dataclasses.asdict(self ), indent=2 ) @property def A__ ( self : Dict ): if len(self.models ) <= 0: raise ValueError( "Please make sure you provide at least one model name / model identifier, *e.g.* `--models" " bert-base-cased` or `args.models = ['bert-base-cased']." ) return self.models @property def A__ ( self : Optional[Any] ): if not self.multi_process: return False elif self.is_tpu: logger.info("Multiprocessing is currently not possible on TPU." ) return False else: return True
37
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase_ = { """configuration_xmod""": [ """XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XmodConfig""", """XmodOnnxConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """XMOD_PRETRAINED_MODEL_ARCHIVE_LIST""", """XmodForCausalLM""", """XmodForMaskedLM""", """XmodForMultipleChoice""", """XmodForQuestionAnswering""", """XmodForSequenceClassification""", """XmodForTokenClassification""", """XmodModel""", """XmodPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
37
1
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase_ = { """configuration_xmod""": [ """XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XmodConfig""", """XmodOnnxConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """XMOD_PRETRAINED_MODEL_ARCHIVE_LIST""", """XmodForCausalLM""", """XmodForMaskedLM""", """XmodForMultipleChoice""", """XmodForQuestionAnswering""", """XmodForSequenceClassification""", """XmodForTokenClassification""", """XmodModel""", """XmodPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
37
import unittest import numpy as np import requests from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: lowercase_ = False if is_vision_available(): from PIL import Image from transformers import PixaStructImageProcessor class _snake_case ( unittest.TestCase): def __init__( self : List[Any], __lowercase : int, __lowercase : Optional[int]=7, __lowercase : List[str]=3, __lowercase : Tuple=18, __lowercase : List[Any]=30, __lowercase : Tuple=400, __lowercase : Any=None, __lowercase : Optional[int]=True, __lowercase : List[str]=True, __lowercase : Union[str, Any]=None, ): lowercase__ = size if size is not None else {"height": 20, "width": 20} lowercase__ = parent lowercase__ = batch_size lowercase__ = num_channels lowercase__ = image_size lowercase__ = min_resolution lowercase__ = max_resolution lowercase__ = size lowercase__ = do_normalize lowercase__ = do_convert_rgb lowercase__ = [512, 1024, 2048, 4096] lowercase__ = patch_size if patch_size is not None else {"height": 16, "width": 16} def A__ ( self : List[str] ): return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb} def A__ ( self : Any ): lowercase__ = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg" lowercase__ = Image.open(requests.get(__lowercase, stream=__lowercase ).raw ).convert("RGB" ) return raw_image @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , ) @require_torch @require_vision class _snake_case ( lowercase__ , unittest.TestCase): UpperCamelCase__ : Any =PixaStructImageProcessor if is_vision_available() else None def A__ ( self : Any ): lowercase__ = PixaStructImageProcessingTester(self ) @property def A__ ( self : Union[str, Any] ): return self.image_processor_tester.prepare_image_processor_dict() def A__ ( self : Optional[Any] ): lowercase__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowercase, "do_normalize" ) ) self.assertTrue(hasattr(__lowercase, "do_convert_rgb" ) ) def A__ ( self : Optional[int] ): lowercase__ = self.image_processor_tester.prepare_dummy_image() lowercase__ = self.image_processing_class(**self.image_processor_dict ) lowercase__ = 2048 lowercase__ = image_processor(__lowercase, return_tensors="pt", max_patches=__lowercase ) self.assertTrue(torch.allclose(inputs.flattened_patches.mean(), torch.tensor(0.0606 ), atol=1e-3, rtol=1e-3 ) ) def A__ ( self : Union[str, Any] ): # Initialize image_processor lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase, Image.Image ) # Test not batched input lowercase__ = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input lowercase__ = image_processor( image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches self.assertEqual( encoded_images.shape, (1, max_patch, expected_hidden_dim), ) # Test batched lowercase__ = image_processor( __lowercase, return_tensors="pt", max_patches=__lowercase ).flattened_patches self.assertEqual( encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), ) def A__ ( self : int ): # Initialize image_processor lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase, Image.Image ) # Test not batched input lowercase__ = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 lowercase__ = True for max_patch in self.image_processor_tester.max_patches: # Test not batched input with self.assertRaises(__lowercase ): lowercase__ = image_processor( image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches lowercase__ = "Hello" lowercase__ = image_processor( image_inputs[0], return_tensors="pt", max_patches=__lowercase, header_text=__lowercase ).flattened_patches self.assertEqual( encoded_images.shape, (1, max_patch, expected_hidden_dim), ) # Test batched lowercase__ = image_processor( __lowercase, return_tensors="pt", max_patches=__lowercase, header_text=__lowercase ).flattened_patches self.assertEqual( encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), ) def A__ ( self : Tuple ): # Initialize image_processor lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, numpify=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase, np.ndarray ) lowercase__ = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input lowercase__ = image_processor( image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches self.assertEqual( encoded_images.shape, (1, max_patch, expected_hidden_dim), ) # Test batched lowercase__ = image_processor( __lowercase, return_tensors="pt", max_patches=__lowercase ).flattened_patches self.assertEqual( encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), ) def A__ ( self : Any ): # Initialize image_processor lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, torchify=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase, torch.Tensor ) # Test not batched input lowercase__ = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input lowercase__ = image_processor( image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches self.assertEqual( encoded_images.shape, (1, max_patch, expected_hidden_dim), ) # Test batched lowercase__ = image_processor( __lowercase, return_tensors="pt", max_patches=__lowercase ).flattened_patches self.assertEqual( encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), ) @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , ) @require_torch @require_vision class _snake_case ( lowercase__ , unittest.TestCase): UpperCamelCase__ : Optional[int] =PixaStructImageProcessor if is_vision_available() else None def A__ ( self : Optional[int] ): lowercase__ = PixaStructImageProcessingTester(self, num_channels=4 ) lowercase__ = 3 @property def A__ ( self : Union[str, Any] ): return self.image_processor_tester.prepare_image_processor_dict() def A__ ( self : Dict ): lowercase__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowercase, "do_normalize" ) ) self.assertTrue(hasattr(__lowercase, "do_convert_rgb" ) ) def A__ ( self : Union[str, Any] ): # Initialize image_processor lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase, Image.Image ) # Test not batched input lowercase__ = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * (self.image_processor_tester.num_channels - 1) ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input lowercase__ = image_processor( image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches self.assertEqual( encoded_images.shape, (1, max_patch, expected_hidden_dim), ) # Test batched lowercase__ = image_processor( __lowercase, return_tensors="pt", max_patches=__lowercase ).flattened_patches self.assertEqual( encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
37
1
from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { """google/umt5-small""": """https://huggingface.co/google/umt5-small/resolve/main/config.json""", # See all umt5 models at https://huggingface.co/models?filter=umt5 } class _snake_case ( lowercase__): UpperCamelCase__ : Tuple ="""umt5""" UpperCamelCase__ : Optional[int] =["""past_key_values"""] def __init__( self : Dict, __lowercase : str=25_0112, __lowercase : Tuple=512, __lowercase : str=64, __lowercase : List[Any]=1024, __lowercase : Dict=8, __lowercase : List[str]=None, __lowercase : List[str]=6, __lowercase : Dict=32, __lowercase : Optional[int]=128, __lowercase : int=0.1, __lowercase : Dict=1e-6, __lowercase : List[str]=1.0, __lowercase : int="gated-gelu", __lowercase : List[str]=True, __lowercase : Optional[int]=True, __lowercase : Optional[int]="T5Tokenizer", __lowercase : List[str]=True, __lowercase : Any=0, __lowercase : int=1, __lowercase : List[Any]=0, **__lowercase : List[Any], ): super().__init__( is_encoder_decoder=__lowercase, tokenizer_class=__lowercase, tie_word_embeddings=__lowercase, pad_token_id=__lowercase, eos_token_id=__lowercase, decoder_start_token_id=__lowercase, **__lowercase, ) lowercase__ = vocab_size lowercase__ = d_model lowercase__ = d_kv lowercase__ = d_ff lowercase__ = num_layers lowercase__ = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry lowercase__ = num_heads lowercase__ = relative_attention_num_buckets lowercase__ = relative_attention_max_distance lowercase__ = dropout_rate lowercase__ = layer_norm_epsilon lowercase__ = initializer_factor lowercase__ = feed_forward_proj lowercase__ = use_cache lowercase__ = self.feed_forward_proj.split("-" ) lowercase__ = act_info[-1] lowercase__ = act_info[0] == "gated" if len(__lowercase ) > 1 and act_info[0] != "gated" or len(__lowercase ) > 2: raise ValueError( F'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.''' "Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. " "'gated-gelu' or 'relu'" ) if feed_forward_proj == "gated-gelu": lowercase__ = "gelu_new" @property def A__ ( self : int ): return self.d_model @property def A__ ( self : int ): return self.num_heads @property def A__ ( self : str ): return self.num_layers class _snake_case ( lowercase__): @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs def A__ ( self : Dict ): lowercase__ = { "input_ids": {0: "batch", 1: "encoder_sequence"}, "attention_mask": {0: "batch", 1: "encoder_sequence"}, } if self.use_past: lowercase__ = "past_encoder_sequence + sequence" lowercase__ = {0: "batch"} lowercase__ = {0: "batch", 1: "past_decoder_sequence + sequence"} else: lowercase__ = {0: "batch", 1: "decoder_sequence"} lowercase__ = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(__lowercase, direction="inputs" ) return common_inputs @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset def A__ ( self : Optional[Any] ): return 13 @property def A__ ( self : Optional[int] ): return 5e-4
37
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ , lowercase__ = len(SCREAMING_SNAKE_CASE_ ), len(grid[0] ) if ( min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) < 0 or row == row_length or col == col_length or (row, col) in visit or grid[row][col] == 1 ): return 0 if row == row_length - 1 and col == col_length - 1: return 1 visit.add((row, col) ) lowercase__ = 0 count += depth_first_search(SCREAMING_SNAKE_CASE_ , row + 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) count += depth_first_search(SCREAMING_SNAKE_CASE_ , row - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) count += depth_first_search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , col + 1 , SCREAMING_SNAKE_CASE_ ) count += depth_first_search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , col - 1 , SCREAMING_SNAKE_CASE_ ) visit.remove((row, col) ) return count if __name__ == "__main__": import doctest doctest.testmod()
37
1
from ..utils import DummyObject, requires_backends class _snake_case ( metaclass=lowercase__): UpperCamelCase__ : Union[str, Any] =["""transformers""", """torch""", """note_seq"""] def __init__( self : List[str], *__lowercase : List[Any], **__lowercase : Optional[Any] ): requires_backends(self, ["transformers", "torch", "note_seq"] ) @classmethod def A__ ( cls : Optional[int], *__lowercase : List[Any], **__lowercase : Tuple ): requires_backends(cls, ["transformers", "torch", "note_seq"] ) @classmethod def A__ ( cls : str, *__lowercase : str, **__lowercase : Any ): requires_backends(cls, ["transformers", "torch", "note_seq"] )
37
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): lowercase__ = 0 for ch in input_str: lowercase__ = ord(SCREAMING_SNAKE_CASE_ ) lowercase__ = pow(2 , SCREAMING_SNAKE_CASE_ ) # If we already turned on bit for current character's unicode if bitmap >> ch_unicode & 1 == 1: return False bitmap |= ch_bit_index_on return True if __name__ == "__main__": import doctest doctest.testmod()
37
1
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): lowercase__ = 0 for ch in input_str: lowercase__ = ord(SCREAMING_SNAKE_CASE_ ) lowercase__ = pow(2 , SCREAMING_SNAKE_CASE_ ) # If we already turned on bit for current character's unicode if bitmap >> ch_unicode & 1 == 1: return False bitmap |= ch_bit_index_on return True if __name__ == "__main__": import doctest doctest.testmod()
37
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): lowercase__ = len(SCREAMING_SNAKE_CASE_ ) for i in range(SCREAMING_SNAKE_CASE_ ): for j in range(i + 1 , SCREAMING_SNAKE_CASE_ ): if numbers[j] < numbers[i]: lowercase__ , lowercase__ = numbers[j], numbers[i] return numbers if __name__ == "__main__": lowercase_ = input("""Enter numbers separated by a comma:\n""").strip() lowercase_ = [int(item) for item in user_input.split(""",""")] print(exchange_sort(unsorted))
37
1
from typing import List, Optional, Union import torch from transformers import ( XLMRobertaTokenizer, ) from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) from .text_encoder import MultilingualCLIP lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name lowercase_ = """ Examples: ```py >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline >>> import torch >>> pipe_prior = KandinskyPriorPipeline.from_pretrained(\"kandinsky-community/Kandinsky-2-1-prior\") >>> pipe_prior.to(\"cuda\") >>> prompt = \"red cat, 4k photo\" >>> out = pipe_prior(prompt) >>> image_emb = out.image_embeds >>> negative_image_emb = out.negative_image_embeds >>> pipe = KandinskyPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-1\") >>> pipe.to(\"cuda\") >>> image = pipe( ... prompt, ... image_embeds=image_emb, ... negative_image_embeds=negative_image_emb, ... height=768, ... width=768, ... num_inference_steps=100, ... ).images >>> image[0].save(\"cat.png\") ``` """ def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=8 ): lowercase__ = h // scale_factor**2 if h % scale_factor**2 != 0: new_h += 1 lowercase__ = w // scale_factor**2 if w % scale_factor**2 != 0: new_w += 1 return new_h * scale_factor, new_w * scale_factor class _snake_case ( lowercase__): def __init__( self : Tuple, __lowercase : MultilingualCLIP, __lowercase : XLMRobertaTokenizer, __lowercase : UNetaDConditionModel, __lowercase : Union[DDIMScheduler, DDPMScheduler], __lowercase : VQModel, ): super().__init__() self.register_modules( text_encoder=__lowercase, tokenizer=__lowercase, unet=__lowercase, scheduler=__lowercase, movq=__lowercase, ) lowercase__ = 2 ** (len(self.movq.config.block_out_channels ) - 1) def A__ ( self : Optional[Any], __lowercase : List[str], __lowercase : Tuple, __lowercase : Dict, __lowercase : Dict, __lowercase : Any, __lowercase : List[Any] ): if latents is None: lowercase__ = randn_tensor(__lowercase, generator=__lowercase, device=__lowercase, dtype=__lowercase ) else: if latents.shape != shape: raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' ) lowercase__ = latents.to(__lowercase ) lowercase__ = latents * scheduler.init_noise_sigma return latents def A__ ( self : Tuple, __lowercase : List[Any], __lowercase : str, __lowercase : int, __lowercase : List[str], __lowercase : Union[str, Any]=None, ): lowercase__ = len(__lowercase ) if isinstance(__lowercase, __lowercase ) else 1 # get prompt text embeddings lowercase__ = self.tokenizer( __lowercase, padding="max_length", truncation=__lowercase, max_length=77, return_attention_mask=__lowercase, add_special_tokens=__lowercase, return_tensors="pt", ) lowercase__ = text_inputs.input_ids lowercase__ = self.tokenizer(__lowercase, padding="longest", return_tensors="pt" ).input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(__lowercase, __lowercase ): lowercase__ = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' ) lowercase__ = text_input_ids.to(__lowercase ) lowercase__ = text_inputs.attention_mask.to(__lowercase ) lowercase__ , lowercase__ = self.text_encoder( input_ids=__lowercase, attention_mask=__lowercase ) lowercase__ = prompt_embeds.repeat_interleave(__lowercase, dim=0 ) lowercase__ = text_encoder_hidden_states.repeat_interleave(__lowercase, dim=0 ) lowercase__ = text_mask.repeat_interleave(__lowercase, dim=0 ) if do_classifier_free_guidance: lowercase__ = 42 if negative_prompt is None: lowercase__ = [""] * batch_size elif type(__lowercase ) is not type(__lowercase ): raise TypeError( F'''`negative_prompt` should be the same type to `prompt`, but got {type(__lowercase )} !=''' F''' {type(__lowercase )}.''' ) elif isinstance(__lowercase, __lowercase ): lowercase__ = [negative_prompt] elif batch_size != len(__lowercase ): raise ValueError( F'''`negative_prompt`: {negative_prompt} has batch size {len(__lowercase )}, but `prompt`:''' F''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches''' " the batch size of `prompt`." ) else: lowercase__ = negative_prompt lowercase__ = self.tokenizer( __lowercase, padding="max_length", max_length=77, truncation=__lowercase, return_attention_mask=__lowercase, add_special_tokens=__lowercase, return_tensors="pt", ) lowercase__ = uncond_input.input_ids.to(__lowercase ) lowercase__ = uncond_input.attention_mask.to(__lowercase ) lowercase__ , lowercase__ = self.text_encoder( input_ids=__lowercase, attention_mask=__lowercase ) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method lowercase__ = negative_prompt_embeds.shape[1] lowercase__ = negative_prompt_embeds.repeat(1, __lowercase ) lowercase__ = negative_prompt_embeds.view(batch_size * num_images_per_prompt, __lowercase ) lowercase__ = uncond_text_encoder_hidden_states.shape[1] lowercase__ = uncond_text_encoder_hidden_states.repeat(1, __lowercase, 1 ) lowercase__ = uncond_text_encoder_hidden_states.view( batch_size * num_images_per_prompt, __lowercase, -1 ) lowercase__ = uncond_text_mask.repeat_interleave(__lowercase, dim=0 ) # done duplicates # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes lowercase__ = torch.cat([negative_prompt_embeds, prompt_embeds] ) lowercase__ = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] ) lowercase__ = torch.cat([uncond_text_mask, text_mask] ) return prompt_embeds, text_encoder_hidden_states, text_mask def A__ ( self : Optional[int], __lowercase : str=0 ): if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("Please install accelerate via `pip install accelerate`" ) lowercase__ = torch.device(F'''cuda:{gpu_id}''' ) lowercase__ = [ self.unet, self.text_encoder, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(__lowercase, __lowercase ) def A__ ( self : str, __lowercase : Union[str, Any]=0 ): if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0" ): from accelerate import cpu_offload_with_hook else: raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." ) lowercase__ = torch.device(F'''cuda:{gpu_id}''' ) if self.device.type != "cpu": self.to("cpu", silence_dtype_warnings=__lowercase ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) lowercase__ = None for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]: lowercase__ , lowercase__ = cpu_offload_with_hook(__lowercase, __lowercase, prev_module_hook=__lowercase ) if self.safety_checker is not None: lowercase__ , lowercase__ = cpu_offload_with_hook(self.safety_checker, __lowercase, prev_module_hook=__lowercase ) # We'll offload the last model manually. lowercase__ = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def A__ ( self : List[Any] ): if not hasattr(self.unet, "_hf_hook" ): return self.device for module in self.unet.modules(): if ( hasattr(__lowercase, "_hf_hook" ) and hasattr(module._hf_hook, "execution_device" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(__lowercase ) def __call__( self : Dict, __lowercase : Union[str, List[str]], __lowercase : Union[torch.FloatTensor, List[torch.FloatTensor]], __lowercase : Union[torch.FloatTensor, List[torch.FloatTensor]], __lowercase : Optional[Union[str, List[str]]] = None, __lowercase : int = 512, __lowercase : int = 512, __lowercase : int = 100, __lowercase : float = 4.0, __lowercase : int = 1, __lowercase : Optional[Union[torch.Generator, List[torch.Generator]]] = None, __lowercase : Optional[torch.FloatTensor] = None, __lowercase : Optional[str] = "pil", __lowercase : bool = True, ): if isinstance(__lowercase, __lowercase ): lowercase__ = 1 elif isinstance(__lowercase, __lowercase ): lowercase__ = len(__lowercase ) else: raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(__lowercase )}''' ) lowercase__ = self._execution_device lowercase__ = batch_size * num_images_per_prompt lowercase__ = guidance_scale > 1.0 lowercase__ , lowercase__ , lowercase__ = self._encode_prompt( __lowercase, __lowercase, __lowercase, __lowercase, __lowercase ) if isinstance(__lowercase, __lowercase ): lowercase__ = torch.cat(__lowercase, dim=0 ) if isinstance(__lowercase, __lowercase ): lowercase__ = torch.cat(__lowercase, dim=0 ) if do_classifier_free_guidance: lowercase__ = image_embeds.repeat_interleave(__lowercase, dim=0 ) lowercase__ = negative_image_embeds.repeat_interleave(__lowercase, dim=0 ) lowercase__ = torch.cat([negative_image_embeds, image_embeds], dim=0 ).to( dtype=prompt_embeds.dtype, device=__lowercase ) self.scheduler.set_timesteps(__lowercase, device=__lowercase ) lowercase__ = self.scheduler.timesteps lowercase__ = self.unet.config.in_channels lowercase__ , lowercase__ = get_new_h_w(__lowercase, __lowercase, self.movq_scale_factor ) # create initial latent lowercase__ = self.prepare_latents( (batch_size, num_channels_latents, height, width), text_encoder_hidden_states.dtype, __lowercase, __lowercase, __lowercase, self.scheduler, ) for i, t in enumerate(self.progress_bar(__lowercase ) ): # expand the latents if we are doing classifier free guidance lowercase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents lowercase__ = {"text_embeds": prompt_embeds, "image_embeds": image_embeds} lowercase__ = self.unet( sample=__lowercase, timestep=__lowercase, encoder_hidden_states=__lowercase, added_cond_kwargs=__lowercase, return_dict=__lowercase, )[0] if do_classifier_free_guidance: lowercase__ , lowercase__ = noise_pred.split(latents.shape[1], dim=1 ) lowercase__ , lowercase__ = noise_pred.chunk(2 ) lowercase__ , lowercase__ = variance_pred.chunk(2 ) lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) lowercase__ = torch.cat([noise_pred, variance_pred_text], dim=1 ) if not ( hasattr(self.scheduler.config, "variance_type" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): lowercase__ , lowercase__ = noise_pred.split(latents.shape[1], dim=1 ) # compute the previous noisy sample x_t -> x_t-1 lowercase__ = self.scheduler.step( __lowercase, __lowercase, __lowercase, generator=__lowercase, ).prev_sample # post-processing lowercase__ = self.movq.decode(__lowercase, force_not_quantize=__lowercase )["sample"] if output_type not in ["pt", "np", "pil"]: raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' ) if output_type in ["np", "pil"]: lowercase__ = image * 0.5 + 0.5 lowercase__ = image.clamp(0, 1 ) lowercase__ = image.cpu().permute(0, 2, 3, 1 ).float().numpy() if output_type == "pil": lowercase__ = self.numpy_to_pil(__lowercase ) if not return_dict: return (image,) return ImagePipelineOutput(images=__lowercase )
37
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): if upper_limit < 0: raise ValueError("Limit for the Catalan sequence must be ≥ 0" ) lowercase__ = [0] * (upper_limit + 1) # Base case: C(0) = C(1) = 1 lowercase__ = 1 if upper_limit > 0: lowercase__ = 1 # Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i for i in range(2 , upper_limit + 1 ): for j in range(SCREAMING_SNAKE_CASE_ ): catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1] return catalan_list if __name__ == "__main__": print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""") print("""\n*** Enter -1 at any time to quit ***""") print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""") try: while True: lowercase_ = int(input().strip()) if N < 0: print("""\n********* Goodbye!! ************""") break else: print(F'The Catalan numbers from 0 through {N} are:') print(catalan_numbers(N)) print("""Try another upper limit for the sequence: """, end="""""") except (NameError, ValueError): print("""\n********* Invalid input, goodbye! ************\n""") import doctest doctest.testmod()
37
1
import sys import webbrowser import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": print("""Googling.....""") lowercase_ = """https://www.google.com/search?q=""" + """ """.join(sys.argv[1:]) lowercase_ = requests.get(url, headers={"""UserAgent""": UserAgent().random}) # res.raise_for_status() with open("""project1a.html""", """wb""") as out_file: # only for knowing the class for data in res.iter_content(1_0000): out_file.write(data) lowercase_ = BeautifulSoup(res.text, """html.parser""") lowercase_ = list(soup.select(""".eZt8xd"""))[:5] print(len(links)) for link in links: if link.text == "Maps": webbrowser.open(link.get("""href""")) else: webbrowser.open(F'https://google.com{link.get("href")}')
37
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowercase_ = { """configuration_roberta_prelayernorm""": [ """ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaPreLayerNormConfig""", """RobertaPreLayerNormOnnxConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""", """RobertaPreLayerNormForCausalLM""", """RobertaPreLayerNormForMaskedLM""", """RobertaPreLayerNormForMultipleChoice""", """RobertaPreLayerNormForQuestionAnswering""", """RobertaPreLayerNormForSequenceClassification""", """RobertaPreLayerNormForTokenClassification""", """RobertaPreLayerNormModel""", """RobertaPreLayerNormPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFRobertaPreLayerNormForCausalLM""", """TFRobertaPreLayerNormForMaskedLM""", """TFRobertaPreLayerNormForMultipleChoice""", """TFRobertaPreLayerNormForQuestionAnswering""", """TFRobertaPreLayerNormForSequenceClassification""", """TFRobertaPreLayerNormForTokenClassification""", """TFRobertaPreLayerNormMainLayer""", """TFRobertaPreLayerNormModel""", """TFRobertaPreLayerNormPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """FlaxRobertaPreLayerNormForCausalLM""", """FlaxRobertaPreLayerNormForMaskedLM""", """FlaxRobertaPreLayerNormForMultipleChoice""", """FlaxRobertaPreLayerNormForQuestionAnswering""", """FlaxRobertaPreLayerNormForSequenceClassification""", """FlaxRobertaPreLayerNormForTokenClassification""", """FlaxRobertaPreLayerNormModel""", """FlaxRobertaPreLayerNormPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_roberta_prelayernorm import ( ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaPreLayerNormConfig, RobertaPreLayerNormOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta_prelayernorm import ( ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaPreLayerNormForCausalLM, RobertaPreLayerNormForMaskedLM, RobertaPreLayerNormForMultipleChoice, RobertaPreLayerNormForQuestionAnswering, RobertaPreLayerNormForSequenceClassification, RobertaPreLayerNormForTokenClassification, RobertaPreLayerNormModel, RobertaPreLayerNormPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta_prelayernorm import ( TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaPreLayerNormForCausalLM, TFRobertaPreLayerNormForMaskedLM, TFRobertaPreLayerNormForMultipleChoice, TFRobertaPreLayerNormForQuestionAnswering, TFRobertaPreLayerNormForSequenceClassification, TFRobertaPreLayerNormForTokenClassification, TFRobertaPreLayerNormMainLayer, TFRobertaPreLayerNormModel, TFRobertaPreLayerNormPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
37
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { """tiiuae/falcon-40b""": """https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json""", """tiiuae/falcon-7b""": """https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json""", } class _snake_case ( lowercase__): UpperCamelCase__ : Dict ="""falcon""" UpperCamelCase__ : Any =["""past_key_values"""] def __init__( self : Any, __lowercase : Tuple=6_5024, __lowercase : Optional[Any]=4544, __lowercase : Optional[Any]=32, __lowercase : str=71, __lowercase : Any=1e-5, __lowercase : int=0.02, __lowercase : Optional[int]=True, __lowercase : List[str]=0.0, __lowercase : Optional[int]=0.0, __lowercase : Optional[Any]=None, __lowercase : str=False, __lowercase : Tuple=False, __lowercase : List[str]=True, __lowercase : Optional[Any]=True, __lowercase : int=False, __lowercase : Optional[Any]=11, __lowercase : List[str]=11, **__lowercase : Tuple, ): lowercase__ = vocab_size # Backward compatibility with n_embed kwarg lowercase__ = kwargs.pop("n_embed", __lowercase ) lowercase__ = hidden_size if n_embed is None else n_embed lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = layer_norm_epsilon lowercase__ = initializer_range lowercase__ = use_cache lowercase__ = hidden_dropout lowercase__ = attention_dropout lowercase__ = bos_token_id lowercase__ = eos_token_id lowercase__ = num_attention_heads if num_kv_heads is None else num_kv_heads lowercase__ = alibi lowercase__ = new_decoder_architecture lowercase__ = multi_query # Ignored when new_decoder_architecture is True lowercase__ = parallel_attn lowercase__ = bias super().__init__(bos_token_id=__lowercase, eos_token_id=__lowercase, **__lowercase ) @property def A__ ( self : int ): return self.hidden_size // self.num_attention_heads @property def A__ ( self : str ): return not self.alibi
37
import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value") lowercase__ = ( ("layer.", "layer_"), ("word_embeddings.weight", "word_embeddings"), ("position_embeddings.weight", "position_embeddings"), ("token_type_embeddings.weight", "token_type_embeddings"), (".", "/"), ("LayerNorm/weight", "LayerNorm/gamma"), ("LayerNorm/bias", "LayerNorm/beta"), ("weight", "kernel"), ) if not os.path.isdir(SCREAMING_SNAKE_CASE_ ): os.makedirs(SCREAMING_SNAKE_CASE_ ) lowercase__ = model.state_dict() def to_tf_var_name(SCREAMING_SNAKE_CASE_ ): for patt, repl in iter(SCREAMING_SNAKE_CASE_ ): lowercase__ = name.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return f'''bert/{name}''' def create_tf_var(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ = tf.dtypes.as_dtype(tensor.dtype ) lowercase__ = tf.get_variable(dtype=SCREAMING_SNAKE_CASE_ , shape=tensor.shape , name=SCREAMING_SNAKE_CASE_ , initializer=tf.zeros_initializer() ) session.run(tf.variables_initializer([tf_var] ) ) session.run(SCREAMING_SNAKE_CASE_ ) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: lowercase__ = to_tf_var_name(SCREAMING_SNAKE_CASE_ ) lowercase__ = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose ): lowercase__ = torch_tensor.T lowercase__ = create_tf_var(tensor=SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ , session=SCREAMING_SNAKE_CASE_ ) tf.keras.backend.set_value(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowercase__ = session.run(SCREAMING_SNAKE_CASE_ ) print(f'''Successfully created {tf_name}: {np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}''' ) lowercase__ = tf.train.Saver(tf.trainable_variables() ) saver.save(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , model_name.replace("-" , "_" ) + ".ckpt" ) ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_=None ): lowercase__ = argparse.ArgumentParser() parser.add_argument("--model_name" , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="model name e.g. bert-base-uncased" ) parser.add_argument( "--cache_dir" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="Directory containing pytorch model" ) parser.add_argument("--pytorch_model_path" , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="/path/to/<pytorch-model-name>.bin" ) parser.add_argument("--tf_cache_dir" , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="Directory in which to save tensorflow model" ) lowercase__ = parser.parse_args(SCREAMING_SNAKE_CASE_ ) lowercase__ = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , ) convert_pytorch_checkpoint_to_tf(model=SCREAMING_SNAKE_CASE_ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name ) if __name__ == "__main__": main()
37
1
# Usage: # ./gen-card-facebook-wmt19.py import os from pathlib import Path def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, oder?", } # BLUE scores as follows: # "pair": [fairseq, transformers] lowercase__ = { "ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"], "en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"], "en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"], "de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"], } lowercase__ = f'''{src_lang}-{tgt_lang}''' lowercase__ = f''' --- language: - {src_lang} - {tgt_lang} thumbnail: tags: - translation - wmt19 - facebook license: apache-2.0 datasets: - wmt19 metrics: - bleu --- # FSMT ## Model description This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}. For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616). The abbreviation FSMT stands for FairSeqMachineTranslation All four models are available: * [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru) * [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en) * [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de) * [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en) ## Intended uses & limitations #### How to use ```python from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = "facebook/wmt19-{src_lang}-{tgt_lang}" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) input = "{texts[src_lang]}" input_ids = tokenizer.encode(input, return_tensors="pt") outputs = model.generate(input_ids) decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) print(decoded) # {texts[tgt_lang]} ``` #### Limitations and bias - The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981) ## Training data Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616). ## Eval results pair | fairseq | transformers -------|---------|---------- {pair} | {scores[pair][0]} | {scores[pair][1]} The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support: - model ensemble, therefore the best performing checkpoint was ported (``model4.pt``). - re-ranking The score was calculated using this code: ```bash git clone https://github.com/huggingface/transformers cd transformers export PAIR={pair} export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 export NUM_BEAMS=15 mkdir -p $DATA_DIR sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target echo $PAIR PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS ``` note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`. ## Data Sources - [training, etc.](http://www.statmt.org/wmt19/) - [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561) ### BibTeX entry and citation info ```bibtex @inproceedings{{..., year={{2020}}, title={{Facebook FAIR\'s WMT19 News Translation Task Submission}}, author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}}, booktitle={{Proc. of WMT}}, }} ``` ## TODO - port model ensemble (fairseq uses 4 model checkpoints) ''' os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) lowercase__ = os.path.join(SCREAMING_SNAKE_CASE_ , "README.md" ) print(f'''Generating {path}''' ) with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as f: f.write(SCREAMING_SNAKE_CASE_ ) # make sure we are under the root of the project lowercase_ = Path(__file__).resolve().parent.parent.parent lowercase_ = repo_dir / """model_cards""" for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: lowercase_ , lowercase_ , lowercase_ = model_name.split("""-""") lowercase_ = model_cards_dir / """facebook""" / model_name write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
37
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase_ = { """configuration_timesformer""": ["""TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TimesformerConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TimesformerModel""", """TimesformerForVideoClassification""", """TimesformerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timesformer import ( TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimesformerForVideoClassification, TimesformerModel, TimesformerPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
37
1
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): if upper_limit < 0: raise ValueError("Limit for the Catalan sequence must be ≥ 0" ) lowercase__ = [0] * (upper_limit + 1) # Base case: C(0) = C(1) = 1 lowercase__ = 1 if upper_limit > 0: lowercase__ = 1 # Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i for i in range(2 , upper_limit + 1 ): for j in range(SCREAMING_SNAKE_CASE_ ): catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1] return catalan_list if __name__ == "__main__": print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""") print("""\n*** Enter -1 at any time to quit ***""") print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""") try: while True: lowercase_ = int(input().strip()) if N < 0: print("""\n********* Goodbye!! ************""") break else: print(F'The Catalan numbers from 0 through {N} are:') print(catalan_numbers(N)) print("""Try another upper limit for the sequence: """, end="""""") except (NameError, ValueError): print("""\n********* Invalid input, goodbye! ************\n""") import doctest doctest.testmod()
37
import argparse import os from . import ( ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BART_PRETRAINED_MODEL_ARCHIVE_LIST, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, BartConfig, BertConfig, CamembertConfig, CTRLConfig, DistilBertConfig, DPRConfig, ElectraConfig, FlaubertConfig, GPTaConfig, LayoutLMConfig, LxmertConfig, OpenAIGPTConfig, RobertaConfig, TaConfig, TFAlbertForPreTraining, TFBartForConditionalGeneration, TFBartForSequenceClassification, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFCamembertForMaskedLM, TFCTRLLMHeadModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, TFElectraForPreTraining, TFFlaubertWithLMHeadModel, TFGPTaLMHeadModel, TFLayoutLMForMaskedLM, TFLxmertForPreTraining, TFLxmertVisualFeatureEncoder, TFOpenAIGPTLMHeadModel, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForSequenceClassification, TFTaForConditionalGeneration, TFTransfoXLLMHeadModel, TFWavaVecaModel, TFXLMRobertaForMaskedLM, TFXLMWithLMHeadModel, TFXLNetLMHeadModel, TransfoXLConfig, WavaVecaConfig, WavaVecaModel, XLMConfig, XLMRobertaConfig, XLNetConfig, is_torch_available, load_pytorch_checkpoint_in_tfa_model, ) from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging if is_torch_available(): import numpy as np import torch from . import ( AlbertForPreTraining, BartForConditionalGeneration, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, CamembertForMaskedLM, CTRLLMHeadModel, DistilBertForMaskedLM, DistilBertForQuestionAnswering, DPRContextEncoder, DPRQuestionEncoder, DPRReader, ElectraForPreTraining, FlaubertWithLMHeadModel, GPTaLMHeadModel, LayoutLMForMaskedLM, LxmertForPreTraining, LxmertVisualFeatureEncoder, OpenAIGPTLMHeadModel, RobertaForMaskedLM, RobertaForSequenceClassification, TaForConditionalGeneration, TransfoXLLMHeadModel, XLMRobertaForMaskedLM, XLMWithLMHeadModel, XLNetLMHeadModel, ) logging.set_verbosity_info() lowercase_ = { """bart""": ( BartConfig, TFBartForConditionalGeneration, TFBartForSequenceClassification, BartForConditionalGeneration, BART_PRETRAINED_MODEL_ARCHIVE_LIST, ), """bert""": ( BertConfig, TFBertForPreTraining, BertForPreTraining, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-large-uncased-whole-word-masking-finetuned-squad""": ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-large-cased-whole-word-masking-finetuned-squad""": ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-base-cased-finetuned-mrpc""": ( BertConfig, TFBertForSequenceClassification, BertForSequenceClassification, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """dpr""": ( DPRConfig, TFDPRQuestionEncoder, TFDPRContextEncoder, TFDPRReader, DPRQuestionEncoder, DPRContextEncoder, DPRReader, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ), """gpt2""": ( GPTaConfig, TFGPTaLMHeadModel, GPTaLMHeadModel, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlnet""": ( XLNetConfig, TFXLNetLMHeadModel, XLNetLMHeadModel, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlm""": ( XLMConfig, TFXLMWithLMHeadModel, XLMWithLMHeadModel, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlm-roberta""": ( XLMRobertaConfig, TFXLMRobertaForMaskedLM, XLMRobertaForMaskedLM, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """transfo-xl""": ( TransfoXLConfig, TFTransfoXLLMHeadModel, TransfoXLLMHeadModel, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """openai-gpt""": ( OpenAIGPTConfig, TFOpenAIGPTLMHeadModel, OpenAIGPTLMHeadModel, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """roberta""": ( RobertaConfig, TFRobertaForCausalLM, TFRobertaForMaskedLM, RobertaForMaskedLM, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """layoutlm""": ( LayoutLMConfig, TFLayoutLMForMaskedLM, LayoutLMForMaskedLM, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, ), """roberta-large-mnli""": ( RobertaConfig, TFRobertaForSequenceClassification, RobertaForSequenceClassification, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """camembert""": ( CamembertConfig, TFCamembertForMaskedLM, CamembertForMaskedLM, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """flaubert""": ( FlaubertConfig, TFFlaubertWithLMHeadModel, FlaubertWithLMHeadModel, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """distilbert""": ( DistilBertConfig, TFDistilBertForMaskedLM, DistilBertForMaskedLM, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """distilbert-base-distilled-squad""": ( DistilBertConfig, TFDistilBertForQuestionAnswering, DistilBertForQuestionAnswering, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """lxmert""": ( LxmertConfig, TFLxmertForPreTraining, LxmertForPreTraining, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """lxmert-visual-feature-encoder""": ( LxmertConfig, TFLxmertVisualFeatureEncoder, LxmertVisualFeatureEncoder, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """ctrl""": ( CTRLConfig, TFCTRLLMHeadModel, CTRLLMHeadModel, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """albert""": ( AlbertConfig, TFAlbertForPreTraining, AlbertForPreTraining, ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """t5""": ( TaConfig, TFTaForConditionalGeneration, TaForConditionalGeneration, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """electra""": ( ElectraConfig, TFElectraForPreTraining, ElectraForPreTraining, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """wav2vec2""": ( WavaVecaConfig, TFWavaVecaModel, WavaVecaModel, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), } def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True ): if model_type not in MODEL_CLASSES: raise ValueError(f'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' ) lowercase__ , lowercase__ , lowercase__ , lowercase__ = MODEL_CLASSES[model_type] # Initialise TF model if config_file in aws_config_map: lowercase__ = cached_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , force_download=not use_cached_models ) lowercase__ = config_class.from_json_file(SCREAMING_SNAKE_CASE_ ) lowercase__ = True lowercase__ = True print(f'''Building TensorFlow model from configuration: {config}''' ) lowercase__ = model_class(SCREAMING_SNAKE_CASE_ ) # Load weights from tf checkpoint if pytorch_checkpoint_path in aws_config_map.keys(): lowercase__ = cached_file( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , force_download=not use_cached_models ) # Load PyTorch checkpoint in tf2 model: lowercase__ = load_pytorch_checkpoint_in_tfa_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if compare_with_pt_model: lowercase__ = tf_model(tf_model.dummy_inputs , training=SCREAMING_SNAKE_CASE_ ) # build the network lowercase__ = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" ) lowercase__ = pt_model_class.from_pretrained( pretrained_model_name_or_path=SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ , state_dict=SCREAMING_SNAKE_CASE_ ) with torch.no_grad(): lowercase__ = pt_model(**pt_model.dummy_inputs ) lowercase__ = pto[0].numpy() lowercase__ = tfo[0].numpy() lowercase__ = np.amax(np.abs(np_pt - np_tf ) ) print(f'''Max absolute difference between models outputs {diff}''' ) assert diff <= 2e-2, f'''Error, model absolute difference is >2e-2: {diff}''' # Save pytorch-model print(f'''Save TensorFlow model to {tf_dump_path}''' ) tf_model.save_weights(SCREAMING_SNAKE_CASE_ , save_format="h5" ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , ): if args_model_type is None: lowercase__ = list(MODEL_CLASSES.keys() ) else: lowercase__ = [args_model_type] for j, model_type in enumerate(SCREAMING_SNAKE_CASE_ , start=1 ): print("=" * 100 ) print(f''' Converting model type {j}/{len(SCREAMING_SNAKE_CASE_ )}: {model_type}''' ) print("=" * 100 ) if model_type not in MODEL_CLASSES: raise ValueError(f'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' ) lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = MODEL_CLASSES[model_type] if model_shortcut_names_or_path is None: lowercase__ = list(aws_model_maps.keys() ) if config_shortcut_names_or_path is None: lowercase__ = model_shortcut_names_or_path for i, (model_shortcut_name, config_shortcut_name) in enumerate( zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , start=1 ): print("-" * 100 ) if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name: if not only_convert_finetuned_models: print(f''' Skipping finetuned checkpoint {model_shortcut_name}''' ) continue lowercase__ = model_shortcut_name elif only_convert_finetuned_models: print(f''' Skipping not finetuned checkpoint {model_shortcut_name}''' ) continue print( f''' Converting checkpoint {i}/{len(SCREAMING_SNAKE_CASE_ )}: {model_shortcut_name} - model_type {model_type}''' ) print("-" * 100 ) if config_shortcut_name in aws_config_map: lowercase__ = cached_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , force_download=not use_cached_models ) else: lowercase__ = config_shortcut_name if model_shortcut_name in aws_model_maps: lowercase__ = cached_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , force_download=not use_cached_models ) else: lowercase__ = model_shortcut_name if os.path.isfile(SCREAMING_SNAKE_CASE_ ): lowercase__ = "converted_model" convert_pt_checkpoint_to_tf( model_type=SCREAMING_SNAKE_CASE_ , pytorch_checkpoint_path=SCREAMING_SNAKE_CASE_ , config_file=SCREAMING_SNAKE_CASE_ , tf_dump_path=os.path.join(SCREAMING_SNAKE_CASE_ , model_shortcut_name + "-tf_model.h5" ) , compare_with_pt_model=SCREAMING_SNAKE_CASE_ , ) if remove_cached_files: os.remove(SCREAMING_SNAKE_CASE_ ) os.remove(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_dump_path""", default=None, type=str, required=True, help="""Path to the output Tensorflow dump file.""" ) parser.add_argument( """--model_type""", default=None, type=str, help=( F'Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and ' """convert all the models from AWS.""" ), ) parser.add_argument( """--pytorch_checkpoint_path""", default=None, type=str, help=( """Path to the PyTorch checkpoint path or shortcut name to download from AWS. """ """If not given, will download and convert all the checkpoints from AWS.""" ), ) parser.add_argument( """--config_file""", default=None, type=str, help=( """The config json file corresponding to the pre-trained model. \n""" """This specifies the model architecture. If not given and """ """--pytorch_checkpoint_path is not given or is a shortcut name """ """use the configuration associated to the shortcut name on the AWS""" ), ) parser.add_argument( """--compare_with_pt_model""", action="""store_true""", help="""Compare Tensorflow and PyTorch model predictions.""" ) parser.add_argument( """--use_cached_models""", action="""store_true""", help="""Use cached models if possible instead of updating to latest checkpoint versions.""", ) parser.add_argument( """--remove_cached_files""", action="""store_true""", help="""Remove pytorch models after conversion (save memory when converting in batches).""", ) parser.add_argument("""--only_convert_finetuned_models""", action="""store_true""", help="""Only convert finetuned models.""") lowercase_ = parser.parse_args() # if args.pytorch_checkpoint_path is not None: # convert_pt_checkpoint_to_tf(args.model_type.lower(), # args.pytorch_checkpoint_path, # args.config_file if args.config_file is not None else args.pytorch_checkpoint_path, # args.tf_dump_path, # compare_with_pt_model=args.compare_with_pt_model, # use_cached_models=args.use_cached_models) # else: convert_all_pt_checkpoints_to_tf( args.model_type.lower() if args.model_type is not None else None, args.tf_dump_path, model_shortcut_names_or_path=[args.pytorch_checkpoint_path] if args.pytorch_checkpoint_path is not None else None, config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None, compare_with_pt_model=args.compare_with_pt_model, use_cached_models=args.use_cached_models, remove_cached_files=args.remove_cached_files, only_convert_finetuned_models=args.only_convert_finetuned_models, )
37
1
import shutil import tempfile import unittest from unittest.mock import patch from transformers import ( DefaultFlowCallback, IntervalStrategy, PrinterCallback, ProgressCallback, Trainer, TrainerCallback, TrainingArguments, is_torch_available, ) from transformers.testing_utils import require_torch if is_torch_available(): from transformers.trainer import DEFAULT_CALLBACKS from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel class _snake_case ( lowercase__): def __init__( self : List[Any] ): lowercase__ = [] def A__ ( self : Optional[int], __lowercase : Any, __lowercase : int, __lowercase : str, **__lowercase : Optional[Any] ): self.events.append("on_init_end" ) def A__ ( self : str, __lowercase : Tuple, __lowercase : Any, __lowercase : Optional[Any], **__lowercase : List[str] ): self.events.append("on_train_begin" ) def A__ ( self : Dict, __lowercase : Optional[int], __lowercase : Optional[Any], __lowercase : List[Any], **__lowercase : List[Any] ): self.events.append("on_train_end" ) def A__ ( self : Optional[int], __lowercase : Optional[int], __lowercase : int, __lowercase : Union[str, Any], **__lowercase : Optional[Any] ): self.events.append("on_epoch_begin" ) def A__ ( self : Optional[int], __lowercase : Dict, __lowercase : str, __lowercase : List[str], **__lowercase : Tuple ): self.events.append("on_epoch_end" ) def A__ ( self : Tuple, __lowercase : Tuple, __lowercase : str, __lowercase : List[Any], **__lowercase : Any ): self.events.append("on_step_begin" ) def A__ ( self : Tuple, __lowercase : List[str], __lowercase : Dict, __lowercase : List[Any], **__lowercase : Optional[int] ): self.events.append("on_step_end" ) def A__ ( self : List[Any], __lowercase : Optional[Any], __lowercase : Union[str, Any], __lowercase : Optional[Any], **__lowercase : Optional[Any] ): self.events.append("on_evaluate" ) def A__ ( self : Any, __lowercase : Union[str, Any], __lowercase : Optional[int], __lowercase : str, **__lowercase : List[Any] ): self.events.append("on_predict" ) def A__ ( self : Union[str, Any], __lowercase : Union[str, Any], __lowercase : List[str], __lowercase : List[str], **__lowercase : Tuple ): self.events.append("on_save" ) def A__ ( self : int, __lowercase : Dict, __lowercase : Tuple, __lowercase : Union[str, Any], **__lowercase : Optional[Any] ): self.events.append("on_log" ) def A__ ( self : Dict, __lowercase : List[Any], __lowercase : Union[str, Any], __lowercase : Optional[Any], **__lowercase : Optional[Any] ): self.events.append("on_prediction_step" ) @require_torch class _snake_case ( unittest.TestCase): def A__ ( self : str ): lowercase__ = tempfile.mkdtemp() def A__ ( self : Optional[Any] ): shutil.rmtree(self.output_dir ) def A__ ( self : Optional[int], __lowercase : int=0, __lowercase : Optional[Any]=0, __lowercase : Union[str, Any]=64, __lowercase : int=64, __lowercase : str=None, __lowercase : Union[str, Any]=False, **__lowercase : Tuple ): # disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure # its set to False since the tests later on depend on its value. lowercase__ = RegressionDataset(length=__lowercase ) lowercase__ = RegressionDataset(length=__lowercase ) lowercase__ = RegressionModelConfig(a=__lowercase, b=__lowercase ) lowercase__ = RegressionPreTrainedModel(__lowercase ) lowercase__ = TrainingArguments(self.output_dir, disable_tqdm=__lowercase, report_to=[], **__lowercase ) return Trainer( __lowercase, __lowercase, train_dataset=__lowercase, eval_dataset=__lowercase, callbacks=__lowercase, ) def A__ ( self : Any, __lowercase : str, __lowercase : Any ): self.assertEqual(len(__lowercase ), len(__lowercase ) ) # Order doesn't matter lowercase__ = sorted(__lowercase, key=lambda __lowercase : cb.__name__ if isinstance(__lowercase, __lowercase ) else cb.__class__.__name__ ) lowercase__ = sorted(__lowercase, key=lambda __lowercase : cb.__name__ if isinstance(__lowercase, __lowercase ) else cb.__class__.__name__ ) for cba, cba in zip(__lowercase, __lowercase ): if isinstance(__lowercase, __lowercase ) and isinstance(__lowercase, __lowercase ): self.assertEqual(__lowercase, __lowercase ) elif isinstance(__lowercase, __lowercase ) and not isinstance(__lowercase, __lowercase ): self.assertEqual(__lowercase, cba.__class__ ) elif not isinstance(__lowercase, __lowercase ) and isinstance(__lowercase, __lowercase ): self.assertEqual(cba.__class__, __lowercase ) else: self.assertEqual(__lowercase, __lowercase ) def A__ ( self : str, __lowercase : Dict ): lowercase__ = ["on_init_end", "on_train_begin"] lowercase__ = 0 lowercase__ = len(trainer.get_eval_dataloader() ) lowercase__ = ["on_prediction_step"] * len(trainer.get_eval_dataloader() ) + ["on_log", "on_evaluate"] for _ in range(trainer.state.num_train_epochs ): expected_events.append("on_epoch_begin" ) for _ in range(__lowercase ): step += 1 expected_events += ["on_step_begin", "on_step_end"] if step % trainer.args.logging_steps == 0: expected_events.append("on_log" ) if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0: expected_events += evaluation_events.copy() if step % trainer.args.save_steps == 0: expected_events.append("on_save" ) expected_events.append("on_epoch_end" ) if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH: expected_events += evaluation_events.copy() expected_events += ["on_log", "on_train_end"] return expected_events def A__ ( self : str ): lowercase__ = self.get_trainer() lowercase__ = DEFAULT_CALLBACKS.copy() + [ProgressCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks, __lowercase ) # Callbacks passed at init are added to the default callbacks lowercase__ = self.get_trainer(callbacks=[MyTestTrainerCallback] ) expected_callbacks.append(__lowercase ) self.check_callbacks_equality(trainer.callback_handler.callbacks, __lowercase ) # TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback lowercase__ = self.get_trainer(disable_tqdm=__lowercase ) lowercase__ = DEFAULT_CALLBACKS.copy() + [PrinterCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks, __lowercase ) def A__ ( self : Dict ): lowercase__ = DEFAULT_CALLBACKS.copy() + [ProgressCallback] lowercase__ = self.get_trainer() # We can add, pop, or remove by class name trainer.remove_callback(__lowercase ) expected_callbacks.remove(__lowercase ) self.check_callbacks_equality(trainer.callback_handler.callbacks, __lowercase ) lowercase__ = self.get_trainer() lowercase__ = trainer.pop_callback(__lowercase ) self.assertEqual(cb.__class__, __lowercase ) self.check_callbacks_equality(trainer.callback_handler.callbacks, __lowercase ) trainer.add_callback(__lowercase ) expected_callbacks.insert(0, __lowercase ) self.check_callbacks_equality(trainer.callback_handler.callbacks, __lowercase ) # We can also add, pop, or remove by instance lowercase__ = self.get_trainer() lowercase__ = trainer.callback_handler.callbacks[0] trainer.remove_callback(__lowercase ) expected_callbacks.remove(__lowercase ) self.check_callbacks_equality(trainer.callback_handler.callbacks, __lowercase ) lowercase__ = self.get_trainer() lowercase__ = trainer.callback_handler.callbacks[0] lowercase__ = trainer.pop_callback(__lowercase ) self.assertEqual(__lowercase, __lowercase ) self.check_callbacks_equality(trainer.callback_handler.callbacks, __lowercase ) trainer.add_callback(__lowercase ) expected_callbacks.insert(0, __lowercase ) self.check_callbacks_equality(trainer.callback_handler.callbacks, __lowercase ) def A__ ( self : Union[str, Any] ): import warnings # XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested warnings.simplefilter(action="ignore", category=__lowercase ) lowercase__ = self.get_trainer(callbacks=[MyTestTrainerCallback] ) trainer.train() lowercase__ = trainer.callback_handler.callbacks[-2].events self.assertEqual(__lowercase, self.get_expected_events(__lowercase ) ) # Independent log/save/eval lowercase__ = self.get_trainer(callbacks=[MyTestTrainerCallback], logging_steps=5 ) trainer.train() lowercase__ = trainer.callback_handler.callbacks[-2].events self.assertEqual(__lowercase, self.get_expected_events(__lowercase ) ) lowercase__ = self.get_trainer(callbacks=[MyTestTrainerCallback], save_steps=5 ) trainer.train() lowercase__ = trainer.callback_handler.callbacks[-2].events self.assertEqual(__lowercase, self.get_expected_events(__lowercase ) ) lowercase__ = self.get_trainer(callbacks=[MyTestTrainerCallback], eval_steps=5, evaluation_strategy="steps" ) trainer.train() lowercase__ = trainer.callback_handler.callbacks[-2].events self.assertEqual(__lowercase, self.get_expected_events(__lowercase ) ) lowercase__ = self.get_trainer(callbacks=[MyTestTrainerCallback], evaluation_strategy="epoch" ) trainer.train() lowercase__ = trainer.callback_handler.callbacks[-2].events self.assertEqual(__lowercase, self.get_expected_events(__lowercase ) ) # A bit of everything lowercase__ = self.get_trainer( callbacks=[MyTestTrainerCallback], logging_steps=3, save_steps=10, eval_steps=5, evaluation_strategy="steps", ) trainer.train() lowercase__ = trainer.callback_handler.callbacks[-2].events self.assertEqual(__lowercase, self.get_expected_events(__lowercase ) ) # warning should be emitted for duplicated callbacks with patch("transformers.trainer_callback.logger.warning" ) as warn_mock: lowercase__ = self.get_trainer( callbacks=[MyTestTrainerCallback, MyTestTrainerCallback], ) assert str(__lowercase ) in warn_mock.call_args[0][0]
37
import math def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): if 0 not in (x, y): # We use the relation x^y = y*log10(x), where 10 is the base. return y * math.logaa(SCREAMING_SNAKE_CASE_ ) else: if x == 0: # 0 raised to any number is 0 return 0 elif y == 0: return 1 # any number raised to 0 is 1 raise AssertionError("This should never happen" ) if __name__ == "__main__": # Main function # Read two numbers from input and typecast them to int using map function. # Here x is the base and y is the power. lowercase_ = """Enter the base and the power separated by a comma: """ lowercase_ , lowercase_ = map(int, input(prompt).split(""",""")) lowercase_ , lowercase_ = map(int, input(prompt).split(""",""")) # We find the log of each number, using the function res(), which takes two # arguments. lowercase_ = res(xa, ya) lowercase_ = res(xa, ya) # We check for the largest number if resa > resa: print("""Largest number is""", xa, """^""", ya) elif resa > resa: print("""Largest number is""", xa, """^""", ya) else: print("""Both are equal""")
37
1
import copy import random from transformers import CLIPTokenizer class _snake_case ( lowercase__): def __init__( self : Optional[int], *__lowercase : Optional[Any], **__lowercase : str ): super().__init__(*__lowercase, **__lowercase ) lowercase__ = {} def A__ ( self : Union[str, Any], __lowercase : int, *__lowercase : List[str], **__lowercase : Dict ): lowercase__ = super().add_tokens(__lowercase, *__lowercase, **__lowercase ) if num_added_tokens == 0: raise ValueError( F'''The tokenizer already contains the token {placeholder_token}. Please pass a different''' " `placeholder_token` that is not already in the tokenizer." ) def A__ ( self : Dict, __lowercase : List[Any], *__lowercase : List[Any], __lowercase : str=1, **__lowercase : int ): lowercase__ = [] if num_vec_per_token == 1: self.try_adding_tokens(__lowercase, *__lowercase, **__lowercase ) output.append(__lowercase ) else: lowercase__ = [] for i in range(__lowercase ): lowercase__ = placeholder_token + F'''_{i}''' self.try_adding_tokens(__lowercase, *__lowercase, **__lowercase ) output.append(__lowercase ) # handle cases where there is a new placeholder token that contains the current placeholder token but is larger for token in self.token_map: if token in placeholder_token: raise ValueError( F'''The tokenizer already has placeholder token {token} that can get confused with''' F''' {placeholder_token}keep placeholder tokens independent''' ) lowercase__ = output def A__ ( self : Optional[int], __lowercase : Optional[Any], __lowercase : str=False, __lowercase : Dict=1.0 ): if isinstance(__lowercase, __lowercase ): lowercase__ = [] for i in range(len(__lowercase ) ): output.append(self.replace_placeholder_tokens_in_text(text[i], vector_shuffle=__lowercase ) ) return output for placeholder_token in self.token_map: if placeholder_token in text: lowercase__ = self.token_map[placeholder_token] lowercase__ = tokens[: 1 + int(len(__lowercase ) * prop_tokens_to_load )] if vector_shuffle: lowercase__ = copy.copy(__lowercase ) random.shuffle(__lowercase ) lowercase__ = text.replace(__lowercase, " ".join(__lowercase ) ) return text def __call__( self : Optional[int], __lowercase : Tuple, *__lowercase : Dict, __lowercase : Any=False, __lowercase : Optional[int]=1.0, **__lowercase : Tuple ): return super().__call__( self.replace_placeholder_tokens_in_text( __lowercase, vector_shuffle=__lowercase, prop_tokens_to_load=__lowercase ), *__lowercase, **__lowercase, ) def A__ ( self : Optional[Any], __lowercase : Any, *__lowercase : Union[str, Any], __lowercase : int=False, __lowercase : Optional[int]=1.0, **__lowercase : Any ): return super().encode( self.replace_placeholder_tokens_in_text( __lowercase, vector_shuffle=__lowercase, prop_tokens_to_load=__lowercase ), *__lowercase, **__lowercase, )
37
import pickle import numpy as np from matplotlib import pyplot as plt class _snake_case : def __init__( self : Tuple, __lowercase : Union[str, Any], __lowercase : int, __lowercase : Union[str, Any], __lowercase : str, __lowercase : List[Any], __lowercase : List[str]=0.2, __lowercase : List[str]=0.2 ): lowercase__ = bp_numa lowercase__ = bp_numa lowercase__ = bp_numa lowercase__ = conva_get[:2] lowercase__ = conva_get[2] lowercase__ = size_pa lowercase__ = rate_w lowercase__ = rate_t lowercase__ = [ np.mat(-1 * np.random.rand(self.conva[0], self.conva[0] ) + 0.5 ) for i in range(self.conva[1] ) ] lowercase__ = np.mat(-1 * np.random.rand(self.num_bpa, self.num_bpa ) + 0.5 ) lowercase__ = np.mat(-1 * np.random.rand(self.num_bpa, self.num_bpa ) + 0.5 ) lowercase__ = -2 * np.random.rand(self.conva[1] ) + 1 lowercase__ = -2 * np.random.rand(self.num_bpa ) + 1 lowercase__ = -2 * np.random.rand(self.num_bpa ) + 1 def A__ ( self : Any, __lowercase : List[str] ): # save model dict with pickle lowercase__ = { "num_bp1": self.num_bpa, "num_bp2": self.num_bpa, "num_bp3": self.num_bpa, "conv1": self.conva, "step_conv1": self.step_conva, "size_pooling1": self.size_poolinga, "rate_weight": self.rate_weight, "rate_thre": self.rate_thre, "w_conv1": self.w_conva, "wkj": self.wkj, "vji": self.vji, "thre_conv1": self.thre_conva, "thre_bp2": self.thre_bpa, "thre_bp3": self.thre_bpa, } with open(__lowercase, "wb" ) as f: pickle.dump(__lowercase, __lowercase ) print(F'''Model saved: {save_path}''' ) @classmethod def A__ ( cls : Dict, __lowercase : Union[str, Any] ): # read saved model with open(__lowercase, "rb" ) as f: lowercase__ = pickle.load(__lowercase ) # noqa: S301 lowercase__ = model_dic.get("conv1" ) conv_get.append(model_dic.get("step_conv1" ) ) lowercase__ = model_dic.get("size_pooling1" ) lowercase__ = model_dic.get("num_bp1" ) lowercase__ = model_dic.get("num_bp2" ) lowercase__ = model_dic.get("num_bp3" ) lowercase__ = model_dic.get("rate_weight" ) lowercase__ = model_dic.get("rate_thre" ) # create model instance lowercase__ = CNN(__lowercase, __lowercase, __lowercase, __lowercase, __lowercase, __lowercase, __lowercase ) # modify model parameter lowercase__ = model_dic.get("w_conv1" ) lowercase__ = model_dic.get("wkj" ) lowercase__ = model_dic.get("vji" ) lowercase__ = model_dic.get("thre_conv1" ) lowercase__ = model_dic.get("thre_bp2" ) lowercase__ = model_dic.get("thre_bp3" ) return conv_ins def A__ ( self : str, __lowercase : List[Any] ): return 1 / (1 + np.exp(-1 * x )) def A__ ( self : List[str], __lowercase : Optional[Any] ): return round(__lowercase, 3 ) def A__ ( self : Optional[Any], __lowercase : Dict, __lowercase : Optional[int], __lowercase : Optional[int], __lowercase : Optional[Any], __lowercase : str ): # convolution process lowercase__ = convs[0] lowercase__ = convs[1] lowercase__ = np.shape(__lowercase )[0] # get the data slice of original image data, data_focus lowercase__ = [] for i_focus in range(0, size_data - size_conv + 1, __lowercase ): for j_focus in range(0, size_data - size_conv + 1, __lowercase ): lowercase__ = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(__lowercase ) # calculate the feature map of every single kernel, and saved as list of matrix lowercase__ = [] lowercase__ = int((size_data - size_conv) / conv_step + 1 ) for i_map in range(__lowercase ): lowercase__ = [] for i_focus in range(len(__lowercase ) ): lowercase__ = ( np.sum(np.multiply(data_focus[i_focus], w_convs[i_map] ) ) - thre_convs[i_map] ) featuremap.append(self.sig(__lowercase ) ) lowercase__ = np.asmatrix(__lowercase ).reshape( __lowercase, __lowercase ) data_featuremap.append(__lowercase ) # expanding the data slice to One dimenssion lowercase__ = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(__lowercase ) ) lowercase__ = np.asarray(__lowercase ) return focus_list, data_featuremap def A__ ( self : List[Any], __lowercase : Any, __lowercase : List[Any], __lowercase : Union[str, Any]="average_pool" ): # pooling process lowercase__ = len(featuremaps[0] ) lowercase__ = int(size_map / size_pooling ) lowercase__ = [] for i_map in range(len(__lowercase ) ): lowercase__ = featuremaps[i_map] lowercase__ = [] for i_focus in range(0, __lowercase, __lowercase ): for j_focus in range(0, __lowercase, __lowercase ): lowercase__ = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(__lowercase ) ) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(__lowercase ) ) lowercase__ = np.asmatrix(__lowercase ).reshape(__lowercase, __lowercase ) featuremap_pooled.append(__lowercase ) return featuremap_pooled def A__ ( self : str, __lowercase : Optional[Any] ): # expanding three dimension data to one dimension list lowercase__ = [] for i in range(len(__lowercase ) ): lowercase__ = np.shape(data[i] ) lowercase__ = data[i].reshape(1, shapes[0] * shapes[1] ) lowercase__ = data_listed.getA().tolist()[0] data_expanded.extend(__lowercase ) lowercase__ = np.asarray(__lowercase ) return data_expanded def A__ ( self : Optional[int], __lowercase : Optional[int] ): # expanding matrix to one dimension list lowercase__ = np.asarray(__lowercase ) lowercase__ = np.shape(__lowercase ) lowercase__ = data_mat.reshape(1, shapes[0] * shapes[1] ) return data_expanded def A__ ( self : str, __lowercase : Tuple, __lowercase : List[Any], __lowercase : Any, __lowercase : Union[str, Any], __lowercase : Tuple ): lowercase__ = [] lowercase__ = 0 for i_map in range(__lowercase ): lowercase__ = np.ones((size_map, size_map) ) for i in range(0, __lowercase, __lowercase ): for j in range(0, __lowercase, __lowercase ): lowercase__ = pd_pool[ i_pool ] lowercase__ = i_pool + 1 lowercase__ = np.multiply( __lowercase, np.multiply(out_map[i_map], (1 - out_map[i_map]) ) ) pd_all.append(__lowercase ) return pd_all def A__ ( self : Tuple, __lowercase : int, __lowercase : Optional[Any], __lowercase : List[Any], __lowercase : Optional[Any], __lowercase : List[Any], __lowercase : List[str]=bool ): # model traning print("----------------------Start Training-------------------------" ) print((" - - Shape: Train_Data ", np.shape(__lowercase )) ) print((" - - Shape: Teach_Data ", np.shape(__lowercase )) ) lowercase__ = 0 lowercase__ = [] lowercase__ = 1_0000 while rp < n_repeat and mse >= error_accuracy: lowercase__ = 0 print(F'''-------------Learning Time {rp}--------------''' ) for p in range(len(__lowercase ) ): # print('------------Learning Image: %d--------------'%p) lowercase__ = np.asmatrix(datas_train[p] ) lowercase__ = np.asarray(datas_teach[p] ) lowercase__ , lowercase__ = self.convolute( __lowercase, self.conva, self.w_conva, self.thre_conva, conv_step=self.step_conva, ) lowercase__ = self.pooling(__lowercase, self.size_poolinga ) lowercase__ = np.shape(__lowercase ) lowercase__ = self._expand(__lowercase ) lowercase__ = data_bp_input lowercase__ = np.dot(__lowercase, self.vji.T ) - self.thre_bpa lowercase__ = self.sig(__lowercase ) lowercase__ = np.dot(__lowercase, self.wkj.T ) - self.thre_bpa lowercase__ = self.sig(__lowercase ) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- lowercase__ = np.multiply( (data_teach - bp_outa), np.multiply(__lowercase, (1 - bp_outa) ) ) lowercase__ = np.multiply( np.dot(__lowercase, self.wkj ), np.multiply(__lowercase, (1 - bp_outa) ) ) lowercase__ = np.dot(__lowercase, self.vji ) lowercase__ = pd_i_all / (self.size_poolinga * self.size_poolinga) lowercase__ = pd_conva_pooled.T.getA().tolist() lowercase__ = self._calculate_gradient_from_pool( __lowercase, __lowercase, shape_featuremapa[0], shape_featuremapa[1], self.size_poolinga, ) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1] ): lowercase__ = self._expand_mat(pd_conva_all[k_conv] ) lowercase__ = self.rate_weight * np.dot(__lowercase, __lowercase ) lowercase__ = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0]) ) lowercase__ = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv] ) * self.rate_thre ) # all connected layer lowercase__ = self.wkj + pd_k_all.T * bp_outa * self.rate_weight lowercase__ = self.vji + pd_j_all.T * bp_outa * self.rate_weight lowercase__ = self.thre_bpa - pd_k_all * self.rate_thre lowercase__ = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image lowercase__ = np.sum(abs(data_teach - bp_outa ) ) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) lowercase__ = rp + 1 lowercase__ = error_count / patterns all_mse.append(__lowercase ) def draw_error(): lowercase__ = [error_accuracy for i in range(int(n_repeat * 1.2 ) )] plt.plot(__lowercase, "+-" ) plt.plot(__lowercase, "r--" ) plt.xlabel("Learning Times" ) plt.ylabel("All_mse" ) plt.grid(__lowercase, alpha=0.5 ) plt.show() print("------------------Training Complished---------------------" ) print((" - - Training epoch: ", rp, F''' - - Mse: {mse:.6f}''') ) if draw_e: draw_error() return mse def A__ ( self : List[str], __lowercase : Optional[int] ): # model predict lowercase__ = [] print("-------------------Start Testing-------------------------" ) print((" - - Shape: Test_Data ", np.shape(__lowercase )) ) for p in range(len(__lowercase ) ): lowercase__ = np.asmatrix(datas_test[p] ) lowercase__ , lowercase__ = self.convolute( __lowercase, self.conva, self.w_conva, self.thre_conva, conv_step=self.step_conva, ) lowercase__ = self.pooling(__lowercase, self.size_poolinga ) lowercase__ = self._expand(__lowercase ) lowercase__ = data_bp_input lowercase__ = bp_outa * self.vji.T - self.thre_bpa lowercase__ = self.sig(__lowercase ) lowercase__ = bp_outa * self.wkj.T - self.thre_bpa lowercase__ = self.sig(__lowercase ) produce_out.extend(bp_outa.getA().tolist() ) lowercase__ = [list(map(self.do_round, __lowercase ) ) for each in produce_out] return np.asarray(__lowercase ) def A__ ( self : int, __lowercase : Any ): # return the data of image after convoluting process so we can check it out lowercase__ = np.asmatrix(__lowercase ) lowercase__ , lowercase__ = self.convolute( __lowercase, self.conva, self.w_conva, self.thre_conva, conv_step=self.step_conva, ) lowercase__ = self.pooling(__lowercase, self.size_poolinga ) return data_conveda, data_pooleda if __name__ == "__main__": pass
37
1
import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class _snake_case ( lowercase__): UpperCamelCase__ : List[Any] ="""""" UpperCamelCase__ : str =( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) UpperCamelCase__ : str =None # compression type in fsspec. ex: "gzip" UpperCamelCase__ : str =None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__( self : Any, __lowercase : str = "", __lowercase : Optional[str] = None, __lowercase : Optional[dict] = None, **__lowercase : int ): super().__init__(self, **__lowercase ) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode lowercase__ = fsspec.open( __lowercase, mode="rb", protocol=__lowercase, compression=self.compression, client_kwargs={ "requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459 "trust_env": True, # Enable reading proxy env variables. **(target_options or {}).pop("client_kwargs", {} ), # To avoid issues if it was already passed. }, **(target_options or {}), ) lowercase__ = os.path.basename(self.file.path.split("::" )[0] ) lowercase__ = ( self.compressed_name[: self.compressed_name.rindex("." )] if "." in self.compressed_name else self.compressed_name ) lowercase__ = None @classmethod def A__ ( cls : Any, __lowercase : List[str] ): # compressed file paths are always relative to the archive root return super()._strip_protocol(__lowercase ).lstrip("/" ) def A__ ( self : List[str] ): if self.dir_cache is None: lowercase__ = {**self.file.fs.info(self.file.path ), "name": self.uncompressed_name} lowercase__ = {f["name"]: f} def A__ ( self : Union[str, Any], __lowercase : str ): return self.file.open().read() def A__ ( self : Optional[int], __lowercase : str, __lowercase : str = "rb", __lowercase : int=None, __lowercase : str=True, __lowercase : Tuple=None, **__lowercase : Dict, ): lowercase__ = self._strip_protocol(__lowercase ) if mode != "rb": raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' ) return self.file.open() class _snake_case ( lowercase__): UpperCamelCase__ : Any ="""bz2""" UpperCamelCase__ : Dict ="""bz2""" UpperCamelCase__ : Optional[int] =""".bz2""" class _snake_case ( lowercase__): UpperCamelCase__ : int ="""gzip""" UpperCamelCase__ : List[str] ="""gzip""" UpperCamelCase__ : Optional[Any] =""".gz""" class _snake_case ( lowercase__): UpperCamelCase__ : Any ="""lz4""" UpperCamelCase__ : Union[str, Any] ="""lz4""" UpperCamelCase__ : List[str] =""".lz4""" class _snake_case ( lowercase__): UpperCamelCase__ : Optional[int] ="""xz""" UpperCamelCase__ : Any ="""xz""" UpperCamelCase__ : str =""".xz""" class _snake_case ( lowercase__): UpperCamelCase__ : List[Any] ="""zstd""" UpperCamelCase__ : List[str] ="""zstd""" UpperCamelCase__ : Any =""".zst""" def __init__( self : Tuple, __lowercase : str, __lowercase : str = "rb", __lowercase : Optional[str] = None, __lowercase : Optional[dict] = None, __lowercase : int = DEFAULT_BLOCK_SIZE, **__lowercase : int, ): super().__init__( fo=__lowercase, mode=__lowercase, target_protocol=__lowercase, target_options=__lowercase, block_size=__lowercase, **__lowercase, ) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 lowercase__ = self.file.__enter__ class _snake_case : def __init__( self : Optional[Any], __lowercase : List[str] ): lowercase__ = file_ def __enter__( self : Optional[Any] ): self._file.__enter__() return self def __exit__( self : Optional[Any], *__lowercase : str, **__lowercase : Any ): self._file.__exit__(*__lowercase, **__lowercase ) def __iter__( self : Dict ): return iter(self._file ) def A__ ( self : Any ): return next(self._file ) def __getattr__( self : Union[str, Any], __lowercase : Tuple ): return getattr(self._file, __lowercase ) def fixed_enter(*__lowercase : Union[str, Any], **__lowercase : Dict ): return WrappedFile(_enter(*__lowercase, **__lowercase ) ) lowercase__ = fixed_enter
37
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from timm import create_model from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import BitConfig, BitForImageClassification, BitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() lowercase_ = logging.get_logger(__name__) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): lowercase__ = "huggingface/label-files" lowercase__ = "imagenet-1k-id2label.json" lowercase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) ) lowercase__ = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()} lowercase__ = {v: k for k, v in idalabel.items()} lowercase__ = "std_conv" if "bit" in model_name else False # note that when using BiT as backbone for ViT-hybrid checkpoints, # one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same", # config.conv_layer = "std_conv_same" lowercase__ = BitConfig( conv_layer=SCREAMING_SNAKE_CASE_ , num_labels=1000 , idalabel=SCREAMING_SNAKE_CASE_ , labelaid=SCREAMING_SNAKE_CASE_ , ) return config def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): if "stem.conv" in name: lowercase__ = name.replace("stem.conv" , "bit.embedder.convolution" ) if "blocks" in name: lowercase__ = name.replace("blocks" , "layers" ) if "head.fc" in name: lowercase__ = name.replace("head.fc" , "classifier.1" ) if name.startswith("norm" ): lowercase__ = "bit." + name if "bit" not in name and "classifier" not in name: lowercase__ = "bit.encoder." + name return name def __lowerCAmelCase ( ): lowercase__ = "http://images.cocodataset.org/val2017/000000039769.jpg" lowercase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ) return im @torch.no_grad() def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ): lowercase__ = get_config(SCREAMING_SNAKE_CASE_ ) # load original model from timm lowercase__ = create_model(SCREAMING_SNAKE_CASE_ , pretrained=SCREAMING_SNAKE_CASE_ ) timm_model.eval() # load state_dict of original model lowercase__ = timm_model.state_dict() for key in state_dict.copy().keys(): lowercase__ = state_dict.pop(SCREAMING_SNAKE_CASE_ ) lowercase__ = val.squeeze() if "head" in key else val # load HuggingFace model lowercase__ = BitForImageClassification(SCREAMING_SNAKE_CASE_ ) model.eval() model.load_state_dict(SCREAMING_SNAKE_CASE_ ) # create image processor lowercase__ = create_transform(**resolve_data_config({} , model=SCREAMING_SNAKE_CASE_ ) ) lowercase__ = transform.transforms lowercase__ = { "bilinear": PILImageResampling.BILINEAR, "bicubic": PILImageResampling.BICUBIC, "nearest": PILImageResampling.NEAREST, } lowercase__ = BitImageProcessor( do_resize=SCREAMING_SNAKE_CASE_ , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=SCREAMING_SNAKE_CASE_ , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=SCREAMING_SNAKE_CASE_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) lowercase__ = prepare_img() lowercase__ = transform(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 ) lowercase__ = processor(SCREAMING_SNAKE_CASE_ , return_tensors="pt" ).pixel_values # verify pixel values assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # verify logits with torch.no_grad(): lowercase__ = model(SCREAMING_SNAKE_CASE_ ) lowercase__ = outputs.logits print("Logits:" , logits[0, :3] ) print("Predicted class:" , model.config.idalabel[logits.argmax(-1 ).item()] ) lowercase__ = timm_model(SCREAMING_SNAKE_CASE_ ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(SCREAMING_SNAKE_CASE_ , outputs.logits , atol=1e-3 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ ) print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) processor.save_pretrained(SCREAMING_SNAKE_CASE_ ) if push_to_hub: print(f'''Pushing model {model_name} and processor to the hub''' ) model.push_to_hub(f'''ybelkada/{model_name}''' ) processor.push_to_hub(f'''ybelkada/{model_name}''' ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""resnetv2_50x1_bitm""", type=str, help="""Name of the BiT timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to push the model to the hub.""", ) lowercase_ = parser.parse_args() convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
37
1
import argparse import json import os from collections import OrderedDict import torch from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): # Load configuration defined in the metadata file with open(SCREAMING_SNAKE_CASE_ ) as metadata_file: lowercase__ = json.load(SCREAMING_SNAKE_CASE_ ) lowercase__ = LukeConfig(use_entity_aware_attention=SCREAMING_SNAKE_CASE_ , **metadata["model_config"] ) # Load in the weights from the checkpoint_path lowercase__ = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" )["module"] # Load the entity vocab file lowercase__ = load_original_entity_vocab(SCREAMING_SNAKE_CASE_ ) # add an entry for [MASK2] lowercase__ = max(entity_vocab.values() ) + 1 config.entity_vocab_size += 1 lowercase__ = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] ) # Add special tokens to the token vocabulary for downstream tasks lowercase__ = AddedToken("<ent>" , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) lowercase__ = AddedToken("<ent2>" , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(f'''Saving tokenizer to {pytorch_dump_folder_path}''' ) tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ ) with open(os.path.join(SCREAMING_SNAKE_CASE_ , "tokenizer_config.json" ) , "r" ) as f: lowercase__ = json.load(SCREAMING_SNAKE_CASE_ ) lowercase__ = "MLukeTokenizer" with open(os.path.join(SCREAMING_SNAKE_CASE_ , "tokenizer_config.json" ) , "w" ) as f: json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) with open(os.path.join(SCREAMING_SNAKE_CASE_ , MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f: json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowercase__ = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ ) # Initialize the embeddings of the special tokens lowercase__ = tokenizer.convert_tokens_to_ids(["@"] )[0] lowercase__ = tokenizer.convert_tokens_to_ids(["#"] )[0] lowercase__ = state_dict["embeddings.word_embeddings.weight"] lowercase__ = word_emb[ent_init_index].unsqueeze(0 ) lowercase__ = word_emb[enta_init_index].unsqueeze(0 ) lowercase__ = torch.cat([word_emb, ent_emb, enta_emb] ) # add special tokens for 'entity_predictions.bias' for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]: lowercase__ = state_dict[bias_name] lowercase__ = decoder_bias[ent_init_index].unsqueeze(0 ) lowercase__ = decoder_bias[enta_init_index].unsqueeze(0 ) lowercase__ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: lowercase__ = f'''encoder.layer.{layer_index}.attention.self.''' lowercase__ = state_dict[prefix + matrix_name] lowercase__ = state_dict[prefix + matrix_name] lowercase__ = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks lowercase__ = state_dict["entity_embeddings.entity_embeddings.weight"] lowercase__ = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 ) lowercase__ = torch.cat([entity_emb, entity_mask_emb] ) # add [MASK2] for 'entity_predictions.bias' lowercase__ = state_dict["entity_predictions.bias"] lowercase__ = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 ) lowercase__ = torch.cat([entity_prediction_bias, entity_mask_bias] ) lowercase__ = LukeForMaskedLM(config=SCREAMING_SNAKE_CASE_ ).eval() state_dict.pop("entity_predictions.decoder.weight" ) state_dict.pop("lm_head.decoder.weight" ) state_dict.pop("lm_head.decoder.bias" ) lowercase__ = OrderedDict() for key, value in state_dict.items(): if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )): lowercase__ = state_dict[key] else: lowercase__ = state_dict[key] lowercase__ , lowercase__ = model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ ) if set(SCREAMING_SNAKE_CASE_ ) != {"luke.embeddings.position_ids"}: raise ValueError(f'''Unexpected unexpected_keys: {unexpected_keys}''' ) if set(SCREAMING_SNAKE_CASE_ ) != { "lm_head.decoder.weight", "lm_head.decoder.bias", "entity_predictions.decoder.weight", }: raise ValueError(f'''Unexpected missing_keys: {missing_keys}''' ) model.tie_weights() assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all() assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all() # Check outputs lowercase__ = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ , task="entity_classification" ) lowercase__ = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)." lowercase__ = (0, 9) lowercase__ = tokenizer(SCREAMING_SNAKE_CASE_ , entity_spans=[span] , return_tensors="pt" ) lowercase__ = model(**SCREAMING_SNAKE_CASE_ ) # Verify word hidden states if model_size == "large": raise NotImplementedError else: # base lowercase__ = torch.Size((1, 33, 768) ) lowercase__ = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( f'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ): raise ValueError # Verify entity hidden states if model_size == "large": raise NotImplementedError else: # base lowercase__ = torch.Size((1, 1, 768) ) lowercase__ = torch.tensor([[-0.1482, 0.0609, 0.0322]] ) if not (outputs.entity_last_hidden_state.shape == expected_shape): raise ValueError( f'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is''' f''' {expected_shape}''' ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ): raise ValueError # Verify masked word/entity prediction lowercase__ = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ ) lowercase__ = "Tokyo is the capital of <mask>." lowercase__ = (24, 30) lowercase__ = tokenizer(SCREAMING_SNAKE_CASE_ , entity_spans=[span] , return_tensors="pt" ) lowercase__ = model(**SCREAMING_SNAKE_CASE_ ) lowercase__ = encoding["input_ids"][0].tolist() lowercase__ = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) ) lowercase__ = outputs.logits[0][mask_position_id].argmax(dim=-1 ) assert "Japan" == tokenizer.decode(SCREAMING_SNAKE_CASE_ ) lowercase__ = outputs.entity_logits[0][0].argmax().item() lowercase__ = [ entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id ] assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan" # Finally, save our PyTorch model and tokenizer print("Saving PyTorch model to {}".format(SCREAMING_SNAKE_CASE_ ) ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): lowercase__ = ["[MASK]", "[PAD]", "[UNK]"] lowercase__ = [json.loads(SCREAMING_SNAKE_CASE_ ) for line in open(SCREAMING_SNAKE_CASE_ )] lowercase__ = {} for entry in data: lowercase__ = entry["id"] for entity_name, language in entry["entities"]: if entity_name in SPECIAL_TOKENS: lowercase__ = entity_id break lowercase__ = f'''{language}:{entity_name}''' lowercase__ = entity_id return new_mapping if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""") parser.add_argument( """--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration.""" ) parser.add_argument( """--entity_vocab_path""", default=None, type=str, help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model.""" ) parser.add_argument( """--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted.""" ) lowercase_ = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
37
import json from typing import Iterator, List, Union from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers from tokenizers.implementations.base_tokenizer import BaseTokenizer from tokenizers.models import Unigram from tokenizers.processors import TemplateProcessing class _snake_case ( lowercase__): def __init__( self : Optional[Any], __lowercase : str = "▁", __lowercase : bool = True, __lowercase : Union[str, AddedToken] = "<unk>", __lowercase : Union[str, AddedToken] = "</s>", __lowercase : Union[str, AddedToken] = "<pad>", ): lowercase__ = { "pad": {"id": 0, "token": pad_token}, "eos": {"id": 1, "token": eos_token}, "unk": {"id": 2, "token": unk_token}, } lowercase__ = [None] * len(self.special_tokens ) for token_dict in self.special_tokens.values(): lowercase__ = token_dict["token"] lowercase__ = Tokenizer(Unigram() ) lowercase__ = normalizers.Sequence( [ normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(" {2,}" ), " " ), normalizers.Lowercase(), ] ) lowercase__ = pre_tokenizers.Sequence( [ pre_tokenizers.Metaspace(replacement=__lowercase, add_prefix_space=__lowercase ), pre_tokenizers.Digits(individual_digits=__lowercase ), pre_tokenizers.Punctuation(), ] ) lowercase__ = decoders.Metaspace(replacement=__lowercase, add_prefix_space=__lowercase ) lowercase__ = TemplateProcessing( single=F'''$A {self.special_tokens["eos"]["token"]}''', special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])], ) lowercase__ = { "model": "SentencePieceUnigram", "replacement": replacement, "add_prefix_space": add_prefix_space, } super().__init__(__lowercase, __lowercase ) def A__ ( self : Union[str, Any], __lowercase : Union[str, List[str]], __lowercase : int = 8000, __lowercase : bool = True, ): lowercase__ = trainers.UnigramTrainer( vocab_size=__lowercase, special_tokens=self.special_tokens_list, show_progress=__lowercase, ) if isinstance(__lowercase, __lowercase ): lowercase__ = [files] self._tokenizer.train(__lowercase, trainer=__lowercase ) self.add_unk_id() def A__ ( self : List[Any], __lowercase : Union[Iterator[str], Iterator[Iterator[str]]], __lowercase : int = 8000, __lowercase : bool = True, ): lowercase__ = trainers.UnigramTrainer( vocab_size=__lowercase, special_tokens=self.special_tokens_list, show_progress=__lowercase, ) self._tokenizer.train_from_iterator(__lowercase, trainer=__lowercase ) self.add_unk_id() def A__ ( self : str ): lowercase__ = json.loads(self._tokenizer.to_str() ) lowercase__ = self.special_tokens["unk"]["id"] lowercase__ = Tokenizer.from_str(json.dumps(__lowercase ) )
37
1
import argparse import os from . import ( ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BART_PRETRAINED_MODEL_ARCHIVE_LIST, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, BartConfig, BertConfig, CamembertConfig, CTRLConfig, DistilBertConfig, DPRConfig, ElectraConfig, FlaubertConfig, GPTaConfig, LayoutLMConfig, LxmertConfig, OpenAIGPTConfig, RobertaConfig, TaConfig, TFAlbertForPreTraining, TFBartForConditionalGeneration, TFBartForSequenceClassification, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFCamembertForMaskedLM, TFCTRLLMHeadModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, TFElectraForPreTraining, TFFlaubertWithLMHeadModel, TFGPTaLMHeadModel, TFLayoutLMForMaskedLM, TFLxmertForPreTraining, TFLxmertVisualFeatureEncoder, TFOpenAIGPTLMHeadModel, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForSequenceClassification, TFTaForConditionalGeneration, TFTransfoXLLMHeadModel, TFWavaVecaModel, TFXLMRobertaForMaskedLM, TFXLMWithLMHeadModel, TFXLNetLMHeadModel, TransfoXLConfig, WavaVecaConfig, WavaVecaModel, XLMConfig, XLMRobertaConfig, XLNetConfig, is_torch_available, load_pytorch_checkpoint_in_tfa_model, ) from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging if is_torch_available(): import numpy as np import torch from . import ( AlbertForPreTraining, BartForConditionalGeneration, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, CamembertForMaskedLM, CTRLLMHeadModel, DistilBertForMaskedLM, DistilBertForQuestionAnswering, DPRContextEncoder, DPRQuestionEncoder, DPRReader, ElectraForPreTraining, FlaubertWithLMHeadModel, GPTaLMHeadModel, LayoutLMForMaskedLM, LxmertForPreTraining, LxmertVisualFeatureEncoder, OpenAIGPTLMHeadModel, RobertaForMaskedLM, RobertaForSequenceClassification, TaForConditionalGeneration, TransfoXLLMHeadModel, XLMRobertaForMaskedLM, XLMWithLMHeadModel, XLNetLMHeadModel, ) logging.set_verbosity_info() lowercase_ = { """bart""": ( BartConfig, TFBartForConditionalGeneration, TFBartForSequenceClassification, BartForConditionalGeneration, BART_PRETRAINED_MODEL_ARCHIVE_LIST, ), """bert""": ( BertConfig, TFBertForPreTraining, BertForPreTraining, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-large-uncased-whole-word-masking-finetuned-squad""": ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-large-cased-whole-word-masking-finetuned-squad""": ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-base-cased-finetuned-mrpc""": ( BertConfig, TFBertForSequenceClassification, BertForSequenceClassification, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """dpr""": ( DPRConfig, TFDPRQuestionEncoder, TFDPRContextEncoder, TFDPRReader, DPRQuestionEncoder, DPRContextEncoder, DPRReader, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ), """gpt2""": ( GPTaConfig, TFGPTaLMHeadModel, GPTaLMHeadModel, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlnet""": ( XLNetConfig, TFXLNetLMHeadModel, XLNetLMHeadModel, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlm""": ( XLMConfig, TFXLMWithLMHeadModel, XLMWithLMHeadModel, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlm-roberta""": ( XLMRobertaConfig, TFXLMRobertaForMaskedLM, XLMRobertaForMaskedLM, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """transfo-xl""": ( TransfoXLConfig, TFTransfoXLLMHeadModel, TransfoXLLMHeadModel, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """openai-gpt""": ( OpenAIGPTConfig, TFOpenAIGPTLMHeadModel, OpenAIGPTLMHeadModel, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """roberta""": ( RobertaConfig, TFRobertaForCausalLM, TFRobertaForMaskedLM, RobertaForMaskedLM, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """layoutlm""": ( LayoutLMConfig, TFLayoutLMForMaskedLM, LayoutLMForMaskedLM, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, ), """roberta-large-mnli""": ( RobertaConfig, TFRobertaForSequenceClassification, RobertaForSequenceClassification, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """camembert""": ( CamembertConfig, TFCamembertForMaskedLM, CamembertForMaskedLM, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """flaubert""": ( FlaubertConfig, TFFlaubertWithLMHeadModel, FlaubertWithLMHeadModel, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """distilbert""": ( DistilBertConfig, TFDistilBertForMaskedLM, DistilBertForMaskedLM, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """distilbert-base-distilled-squad""": ( DistilBertConfig, TFDistilBertForQuestionAnswering, DistilBertForQuestionAnswering, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """lxmert""": ( LxmertConfig, TFLxmertForPreTraining, LxmertForPreTraining, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """lxmert-visual-feature-encoder""": ( LxmertConfig, TFLxmertVisualFeatureEncoder, LxmertVisualFeatureEncoder, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """ctrl""": ( CTRLConfig, TFCTRLLMHeadModel, CTRLLMHeadModel, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """albert""": ( AlbertConfig, TFAlbertForPreTraining, AlbertForPreTraining, ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """t5""": ( TaConfig, TFTaForConditionalGeneration, TaForConditionalGeneration, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """electra""": ( ElectraConfig, TFElectraForPreTraining, ElectraForPreTraining, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """wav2vec2""": ( WavaVecaConfig, TFWavaVecaModel, WavaVecaModel, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), } def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True ): if model_type not in MODEL_CLASSES: raise ValueError(f'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' ) lowercase__ , lowercase__ , lowercase__ , lowercase__ = MODEL_CLASSES[model_type] # Initialise TF model if config_file in aws_config_map: lowercase__ = cached_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , force_download=not use_cached_models ) lowercase__ = config_class.from_json_file(SCREAMING_SNAKE_CASE_ ) lowercase__ = True lowercase__ = True print(f'''Building TensorFlow model from configuration: {config}''' ) lowercase__ = model_class(SCREAMING_SNAKE_CASE_ ) # Load weights from tf checkpoint if pytorch_checkpoint_path in aws_config_map.keys(): lowercase__ = cached_file( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , force_download=not use_cached_models ) # Load PyTorch checkpoint in tf2 model: lowercase__ = load_pytorch_checkpoint_in_tfa_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if compare_with_pt_model: lowercase__ = tf_model(tf_model.dummy_inputs , training=SCREAMING_SNAKE_CASE_ ) # build the network lowercase__ = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" ) lowercase__ = pt_model_class.from_pretrained( pretrained_model_name_or_path=SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ , state_dict=SCREAMING_SNAKE_CASE_ ) with torch.no_grad(): lowercase__ = pt_model(**pt_model.dummy_inputs ) lowercase__ = pto[0].numpy() lowercase__ = tfo[0].numpy() lowercase__ = np.amax(np.abs(np_pt - np_tf ) ) print(f'''Max absolute difference between models outputs {diff}''' ) assert diff <= 2e-2, f'''Error, model absolute difference is >2e-2: {diff}''' # Save pytorch-model print(f'''Save TensorFlow model to {tf_dump_path}''' ) tf_model.save_weights(SCREAMING_SNAKE_CASE_ , save_format="h5" ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , ): if args_model_type is None: lowercase__ = list(MODEL_CLASSES.keys() ) else: lowercase__ = [args_model_type] for j, model_type in enumerate(SCREAMING_SNAKE_CASE_ , start=1 ): print("=" * 100 ) print(f''' Converting model type {j}/{len(SCREAMING_SNAKE_CASE_ )}: {model_type}''' ) print("=" * 100 ) if model_type not in MODEL_CLASSES: raise ValueError(f'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' ) lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = MODEL_CLASSES[model_type] if model_shortcut_names_or_path is None: lowercase__ = list(aws_model_maps.keys() ) if config_shortcut_names_or_path is None: lowercase__ = model_shortcut_names_or_path for i, (model_shortcut_name, config_shortcut_name) in enumerate( zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , start=1 ): print("-" * 100 ) if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name: if not only_convert_finetuned_models: print(f''' Skipping finetuned checkpoint {model_shortcut_name}''' ) continue lowercase__ = model_shortcut_name elif only_convert_finetuned_models: print(f''' Skipping not finetuned checkpoint {model_shortcut_name}''' ) continue print( f''' Converting checkpoint {i}/{len(SCREAMING_SNAKE_CASE_ )}: {model_shortcut_name} - model_type {model_type}''' ) print("-" * 100 ) if config_shortcut_name in aws_config_map: lowercase__ = cached_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , force_download=not use_cached_models ) else: lowercase__ = config_shortcut_name if model_shortcut_name in aws_model_maps: lowercase__ = cached_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , force_download=not use_cached_models ) else: lowercase__ = model_shortcut_name if os.path.isfile(SCREAMING_SNAKE_CASE_ ): lowercase__ = "converted_model" convert_pt_checkpoint_to_tf( model_type=SCREAMING_SNAKE_CASE_ , pytorch_checkpoint_path=SCREAMING_SNAKE_CASE_ , config_file=SCREAMING_SNAKE_CASE_ , tf_dump_path=os.path.join(SCREAMING_SNAKE_CASE_ , model_shortcut_name + "-tf_model.h5" ) , compare_with_pt_model=SCREAMING_SNAKE_CASE_ , ) if remove_cached_files: os.remove(SCREAMING_SNAKE_CASE_ ) os.remove(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_dump_path""", default=None, type=str, required=True, help="""Path to the output Tensorflow dump file.""" ) parser.add_argument( """--model_type""", default=None, type=str, help=( F'Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and ' """convert all the models from AWS.""" ), ) parser.add_argument( """--pytorch_checkpoint_path""", default=None, type=str, help=( """Path to the PyTorch checkpoint path or shortcut name to download from AWS. """ """If not given, will download and convert all the checkpoints from AWS.""" ), ) parser.add_argument( """--config_file""", default=None, type=str, help=( """The config json file corresponding to the pre-trained model. \n""" """This specifies the model architecture. If not given and """ """--pytorch_checkpoint_path is not given or is a shortcut name """ """use the configuration associated to the shortcut name on the AWS""" ), ) parser.add_argument( """--compare_with_pt_model""", action="""store_true""", help="""Compare Tensorflow and PyTorch model predictions.""" ) parser.add_argument( """--use_cached_models""", action="""store_true""", help="""Use cached models if possible instead of updating to latest checkpoint versions.""", ) parser.add_argument( """--remove_cached_files""", action="""store_true""", help="""Remove pytorch models after conversion (save memory when converting in batches).""", ) parser.add_argument("""--only_convert_finetuned_models""", action="""store_true""", help="""Only convert finetuned models.""") lowercase_ = parser.parse_args() # if args.pytorch_checkpoint_path is not None: # convert_pt_checkpoint_to_tf(args.model_type.lower(), # args.pytorch_checkpoint_path, # args.config_file if args.config_file is not None else args.pytorch_checkpoint_path, # args.tf_dump_path, # compare_with_pt_model=args.compare_with_pt_model, # use_cached_models=args.use_cached_models) # else: convert_all_pt_checkpoints_to_tf( args.model_type.lower() if args.model_type is not None else None, args.tf_dump_path, model_shortcut_names_or_path=[args.pytorch_checkpoint_path] if args.pytorch_checkpoint_path is not None else None, config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None, compare_with_pt_model=args.compare_with_pt_model, use_cached_models=args.use_cached_models, remove_cached_files=args.remove_cached_files, only_convert_finetuned_models=args.only_convert_finetuned_models, )
37
# Usage: # ./gen-card-facebook-wmt19.py import os from pathlib import Path def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, oder?", } # BLUE scores as follows: # "pair": [fairseq, transformers] lowercase__ = { "ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"], "en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"], "en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"], "de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"], } lowercase__ = f'''{src_lang}-{tgt_lang}''' lowercase__ = f''' --- language: - {src_lang} - {tgt_lang} thumbnail: tags: - translation - wmt19 - facebook license: apache-2.0 datasets: - wmt19 metrics: - bleu --- # FSMT ## Model description This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}. For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616). The abbreviation FSMT stands for FairSeqMachineTranslation All four models are available: * [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru) * [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en) * [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de) * [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en) ## Intended uses & limitations #### How to use ```python from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = "facebook/wmt19-{src_lang}-{tgt_lang}" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) input = "{texts[src_lang]}" input_ids = tokenizer.encode(input, return_tensors="pt") outputs = model.generate(input_ids) decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) print(decoded) # {texts[tgt_lang]} ``` #### Limitations and bias - The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981) ## Training data Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616). ## Eval results pair | fairseq | transformers -------|---------|---------- {pair} | {scores[pair][0]} | {scores[pair][1]} The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support: - model ensemble, therefore the best performing checkpoint was ported (``model4.pt``). - re-ranking The score was calculated using this code: ```bash git clone https://github.com/huggingface/transformers cd transformers export PAIR={pair} export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 export NUM_BEAMS=15 mkdir -p $DATA_DIR sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target echo $PAIR PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS ``` note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`. ## Data Sources - [training, etc.](http://www.statmt.org/wmt19/) - [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561) ### BibTeX entry and citation info ```bibtex @inproceedings{{..., year={{2020}}, title={{Facebook FAIR\'s WMT19 News Translation Task Submission}}, author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}}, booktitle={{Proc. of WMT}}, }} ``` ## TODO - port model ensemble (fairseq uses 4 model checkpoints) ''' os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) lowercase__ = os.path.join(SCREAMING_SNAKE_CASE_ , "README.md" ) print(f'''Generating {path}''' ) with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as f: f.write(SCREAMING_SNAKE_CASE_ ) # make sure we are under the root of the project lowercase_ = Path(__file__).resolve().parent.parent.parent lowercase_ = repo_dir / """model_cards""" for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: lowercase_ , lowercase_ , lowercase_ = model_name.split("""-""") lowercase_ = model_cards_dir / """facebook""" / model_name write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
37
1
from ....utils import logging lowercase_ = logging.get_logger(__name__) class _snake_case ( lowercase__): def __init__( self : Union[str, Any], __lowercase : Any, __lowercase : Tuple=None, __lowercase : int=2048 ): lowercase__ = config.__dict__ lowercase__ = modal_hidden_size if num_labels: lowercase__ = num_labels
37
import os import unittest from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class _snake_case ( lowercase__ , unittest.TestCase): UpperCamelCase__ : Dict =TransfoXLTokenizer UpperCamelCase__ : List[Any] =False UpperCamelCase__ : List[Any] =False def A__ ( self : Union[str, Any] ): super().setUp() lowercase__ = [ "<unk>", "[CLS]", "[SEP]", "want", "unwanted", "wa", "un", "running", ",", "low", "l", ] lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file, "w", encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) def A__ ( self : Union[str, Any], **__lowercase : Any ): lowercase__ = True return TransfoXLTokenizer.from_pretrained(self.tmpdirname, **__lowercase ) def A__ ( self : Tuple, __lowercase : Optional[int] ): lowercase__ = "<unk> UNwanted , running" lowercase__ = "<unk> unwanted, running" return input_text, output_text def A__ ( self : str ): lowercase__ = TransfoXLTokenizer(vocab_file=self.vocab_file, lower_case=__lowercase ) lowercase__ = tokenizer.tokenize("<unk> UNwanted , running" ) self.assertListEqual(__lowercase, ["<unk>", "unwanted", ",", "running"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ), [0, 4, 8, 7] ) def A__ ( self : Tuple ): lowercase__ = TransfoXLTokenizer(lower_case=__lowercase ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? " ), ["hello", "!", "how", "are", "you", "?"] ) def A__ ( self : Tuple ): lowercase__ = TransfoXLTokenizer(lower_case=__lowercase ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? " ), ["HeLLo", "!", "how", "Are", "yoU", "?"] ) def A__ ( self : str ): lowercase__ = TransfoXLTokenizer(lower_case=__lowercase ) lowercase__ = "Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?" lowercase__ = [ "Hello", "(", "bracket", ")", "and", "side", "@-@", "scrolled", "[", "and", "]", "Henry", "'s", "$", "5", "@,@", "000", "with", "3", "@.@", "34", "m", ".", "What", "'s", "up", "!", "?", ] self.assertListEqual(tokenizer.tokenize(__lowercase ), __lowercase ) self.assertEqual(tokenizer.convert_tokens_to_string(__lowercase ), __lowercase ) def A__ ( self : List[str] ): lowercase__ = self.get_tokenizer() lowercase__ = len(__lowercase ) tokenizer.add_tokens(["new1", "new2"] ) tokenizer.move_added_token("new1", 1 ) # Check that moved token is not copied (duplicate) self.assertEqual(len(__lowercase ), original_len + 2 ) # Check that token is moved to specified id self.assertEqual(tokenizer.encode("new1" ), [1] ) self.assertEqual(tokenizer.decode([1] ), "new1" )
37
1
import json import logging import os import sys from pathlib import Path import finetune_rag from transformers.file_utils import is_apex_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, require_ray, require_torch_gpu, require_torch_multi_gpu, ) logging.basicConfig(level=logging.DEBUG) lowercase_ = logging.getLogger() lowercase_ = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class _snake_case ( lowercase__): def A__ ( self : Tuple, __lowercase : Tuple ): os.makedirs(__lowercase, exist_ok=__lowercase ) lowercase__ = {"source": "What is love ?", "target": "life"} lowercase__ = {"train": 12, "val": 2, "test": 2} for split in ["train", "test", "val"]: for field in ["source", "target"]: lowercase__ = "\n".join([contents[field]] * n_lines[split] ) with open(os.path.join(__lowercase, F'''{split}.{field}''' ), "w" ) as f: f.write(__lowercase ) def A__ ( self : int, __lowercase : int, __lowercase : str = "pytorch" ): lowercase__ = self.get_auto_remove_tmp_dir() lowercase__ = os.path.join(__lowercase, "output" ) lowercase__ = os.path.join(__lowercase, "data" ) self._create_dummy_data(data_dir=__lowercase ) lowercase__ = F''' --data_dir {data_dir} \ --output_dir {output_dir} \ --model_name_or_path facebook/rag-sequence-base \ --model_type rag_sequence \ --do_train \ --do_predict \ --n_val -1 \ --val_check_interval 1.0 \ --train_batch_size 2 \ --eval_batch_size 1 \ --max_source_length 25 \ --max_target_length 25 \ --val_max_target_length 25 \ --test_max_target_length 25 \ --label_smoothing 0.1 \ --dropout 0.1 \ --attention_dropout 0.1 \ --weight_decay 0.001 \ --adam_epsilon 1e-08 \ --max_grad_norm 0.1 \ --lr_scheduler polynomial \ --learning_rate 3e-04 \ --num_train_epochs 1 \ --warmup_steps 4 \ --gradient_accumulation_steps 1 \ --distributed-port 8787 \ --use_dummy_dataset 1 \ --distributed_retriever {distributed_retriever} \ '''.split() if gpus > 0: testargs.append(F'''--gpus={gpus}''' ) if is_apex_available(): testargs.append("--fp16" ) else: testargs.append("--gpus=0" ) testargs.append("--distributed_backend=ddp_cpu" ) testargs.append("--num_processes=2" ) lowercase__ = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs execute_subprocess_async(__lowercase, env=self.get_env() ) lowercase__ = os.path.join(__lowercase, "metrics.json" ) with open(__lowercase ) as f: lowercase__ = json.load(__lowercase ) return result @require_torch_gpu def A__ ( self : Optional[Any] ): lowercase__ = self._run_finetune(gpus=1 ) self.assertGreaterEqual(result["test"][0]["test_avg_em"], 0.2 ) @require_torch_multi_gpu def A__ ( self : Optional[Any] ): lowercase__ = self._run_finetune(gpus=2 ) self.assertGreaterEqual(result["test"][0]["test_avg_em"], 0.2 ) @require_torch_gpu @require_ray def A__ ( self : Tuple ): lowercase__ = self._run_finetune(gpus=1, distributed_retriever="ray" ) self.assertGreaterEqual(result["test"][0]["test_avg_em"], 0.2 ) @require_torch_multi_gpu @require_ray def A__ ( self : Tuple ): lowercase__ = self._run_finetune(gpus=1, distributed_retriever="ray" ) self.assertGreaterEqual(result["test"][0]["test_avg_em"], 0.2 )
37
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def __lowerCAmelCase ( ): lowercase__ = HfArgumentParser(SCREAMING_SNAKE_CASE_ ) lowercase__ = parser.parse_args_into_dataclasses()[0] lowercase__ = TensorFlowBenchmark(args=SCREAMING_SNAKE_CASE_ ) try: lowercase__ = parser.parse_args_into_dataclasses()[0] except ValueError as e: lowercase__ = "Arg --no_{0} is no longer used, please use --no-{0} instead." lowercase__ = " ".join(str(SCREAMING_SNAKE_CASE_ ).split(" " )[:-1] ) lowercase__ = "" lowercase__ = eval(str(SCREAMING_SNAKE_CASE_ ).split(" " )[-1] ) lowercase__ = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(SCREAMING_SNAKE_CASE_ ) if len(SCREAMING_SNAKE_CASE_ ) > 0: lowercase__ = full_error_msg + begin_error_msg + str(SCREAMING_SNAKE_CASE_ ) raise ValueError(SCREAMING_SNAKE_CASE_ ) benchmark.run() if __name__ == "__main__": main()
37
1
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): return x if y == 0 else greatest_common_divisor(SCREAMING_SNAKE_CASE_ , x % y ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): return (x * y) // greatest_common_divisor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ = 20 ): lowercase__ = 1 for i in range(1 , n + 1 ): lowercase__ = lcm(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return g if __name__ == "__main__": print(F'{solution() = }')
37
import os import re import shutil from argparse import ArgumentParser, Namespace from datasets.commands import BaseDatasetsCLICommand from datasets.utils.logging import get_logger lowercase_ = """<<<<<<< This should probably be modified because it mentions: """ lowercase_ = """======= >>>>>>> """ lowercase_ = [ """TextEncoderConfig""", """ByteTextEncoder""", """SubwordTextEncoder""", """encoder_config""", """maybe_build_from_corpus""", """manual_dir""", ] lowercase_ = [ # (pattern, replacement) # Order is important here for some replacements (r"""tfds\.core""", r"""datasets"""), (r"""tf\.io\.gfile\.GFile""", r"""open"""), (r"""tf\.([\w\d]+)""", r"""datasets.Value('\1')"""), (r"""tfds\.features\.Text\(\)""", r"""datasets.Value('string')"""), (r"""tfds\.features\.Text\(""", r"""datasets.Value('string'),"""), (r"""features\s*=\s*tfds.features.FeaturesDict\(""", r"""features=datasets.Features("""), (r"""tfds\.features\.FeaturesDict\(""", r"""dict("""), (r"""The TensorFlow Datasets Authors""", r"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""), (r"""tfds\.""", r"""datasets."""), (r"""dl_manager\.manual_dir""", r"""self.config.data_dir"""), (r"""self\.builder_config""", r"""self.config"""), ] def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): return ConvertCommand(args.tfds_path , args.datasets_directory ) class _snake_case ( lowercase__): @staticmethod def A__ ( __lowercase : ArgumentParser ): lowercase__ = parser.add_parser( "convert", help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.", ) train_parser.add_argument( "--tfds_path", type=__lowercase, required=__lowercase, help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.", ) train_parser.add_argument( "--datasets_directory", type=__lowercase, required=__lowercase, help="Path to the HuggingFace Datasets folder." ) train_parser.set_defaults(func=__lowercase ) def __init__( self : Tuple, __lowercase : str, __lowercase : str, *__lowercase : Tuple ): lowercase__ = get_logger("datasets-cli/converting" ) lowercase__ = tfds_path lowercase__ = datasets_directory def A__ ( self : Any ): if os.path.isdir(self._tfds_path ): lowercase__ = os.path.abspath(self._tfds_path ) elif os.path.isfile(self._tfds_path ): lowercase__ = os.path.dirname(self._tfds_path ) else: raise ValueError("--tfds_path is neither a directory nor a file. Please check path." ) lowercase__ = os.path.abspath(self._datasets_directory ) self._logger.info(F'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' ) lowercase__ = [] lowercase__ = [] lowercase__ = {} if os.path.isdir(self._tfds_path ): lowercase__ = os.listdir(__lowercase ) else: lowercase__ = [os.path.basename(self._tfds_path )] for f_name in file_names: self._logger.info(F'''Looking at file {f_name}''' ) lowercase__ = os.path.join(__lowercase, __lowercase ) lowercase__ = os.path.join(__lowercase, __lowercase ) if not os.path.isfile(__lowercase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name: self._logger.info("Skipping file" ) continue with open(__lowercase, encoding="utf-8" ) as f: lowercase__ = f.readlines() lowercase__ = [] lowercase__ = False lowercase__ = False lowercase__ = [] for line in lines: lowercase__ = line # Convert imports if "import tensorflow.compat.v2 as tf" in out_line: continue elif "@tfds.core" in out_line: continue elif "builder=self" in out_line: continue elif "import tensorflow_datasets.public_api as tfds" in out_line: lowercase__ = "import datasets\n" elif "import tensorflow" in out_line: # order is important here lowercase__ = "" continue elif "from absl import logging" in out_line: lowercase__ = "from datasets import logging\n" elif "getLogger" in out_line: lowercase__ = out_line.replace("getLogger", "get_logger" ) elif any(expression in out_line for expression in TO_HIGHLIGHT ): lowercase__ = True lowercase__ = list(filter(lambda __lowercase : e in out_line, __lowercase ) ) out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(__lowercase ) + "\n" ) out_lines.append(__lowercase ) out_lines.append(__lowercase ) continue else: for pattern, replacement in TO_CONVERT: lowercase__ = re.sub(__lowercase, __lowercase, __lowercase ) # Take care of saving utilities (to later move them together with main script) if "tensorflow_datasets" in out_line: lowercase__ = re.match(R"from\stensorflow_datasets.*import\s([^\.\r\n]+)", __lowercase ) tfds_imports.extend(imp.strip() for imp in match.group(1 ).split("," ) ) lowercase__ = "from . import " + match.group(1 ) # Check we have not forget anything if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line: raise ValueError(F'''Error converting {out_line.strip()}''' ) if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line: lowercase__ = True out_lines.append(__lowercase ) if is_builder or "wmt" in f_name: # We create a new directory for each dataset lowercase__ = f_name.replace(".py", "" ) lowercase__ = os.path.join(__lowercase, __lowercase ) lowercase__ = os.path.join(__lowercase, __lowercase ) os.makedirs(__lowercase, exist_ok=__lowercase ) self._logger.info(F'''Adding directory {output_dir}''' ) imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} ) else: # Utilities will be moved at the end utils_files.append(__lowercase ) if needs_manual_update: with_manual_update.append(__lowercase ) with open(__lowercase, "w", encoding="utf-8" ) as f: f.writelines(__lowercase ) self._logger.info(F'''Converted in {output_file}''' ) for utils_file in utils_files: try: lowercase__ = os.path.basename(__lowercase ) lowercase__ = imports_to_builder_map[f_name.replace(".py", "" )] self._logger.info(F'''Moving {dest_folder} to {utils_file}''' ) shutil.copy(__lowercase, __lowercase ) except KeyError: self._logger.error(F'''Cannot find destination folder for {utils_file}. Please copy manually.''' ) if with_manual_update: for file_path in with_manual_update: self._logger.warning( F'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
37
1
import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPTaConfig, GPTaLMHeadModel, GPTaTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed lowercase_ = { """distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), """roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), """bert""": (BertConfig, BertForMaskedLM, BertTokenizer), """gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer), } def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts ) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config ) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights ) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): if args.student_type == "roberta": lowercase__ = False elif args.student_type == "gpt2": lowercase__ = False def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): if args.student_type == "roberta": lowercase__ = False def __lowerCAmelCase ( ): lowercase__ = argparse.ArgumentParser(description="Training" ) parser.add_argument("--force" , action="store_true" , help="Overwrite dump_path if it already exists." ) parser.add_argument( "--dump_path" , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="The output directory (log, checkpoints, parameters, etc.)" ) parser.add_argument( "--data_file" , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence." , ) parser.add_argument( "--student_type" , type=SCREAMING_SNAKE_CASE_ , choices=["distilbert", "roberta", "gpt2"] , required=SCREAMING_SNAKE_CASE_ , help="The student type (DistilBERT, RoBERTa)." , ) parser.add_argument("--student_config" , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="Path to the student configuration." ) parser.add_argument( "--student_pretrained_weights" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , help="Load student initialization checkpoint." ) parser.add_argument( "--teacher_type" , choices=["bert", "roberta", "gpt2"] , required=SCREAMING_SNAKE_CASE_ , help="Teacher type (BERT, RoBERTa)." ) parser.add_argument("--teacher_name" , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="The teacher model." ) parser.add_argument("--temperature" , default=2.0 , type=SCREAMING_SNAKE_CASE_ , help="Temperature for the softmax temperature." ) parser.add_argument( "--alpha_ce" , default=0.5 , type=SCREAMING_SNAKE_CASE_ , help="Linear weight for the distillation loss. Must be >=0." ) parser.add_argument( "--alpha_mlm" , default=0.0 , type=SCREAMING_SNAKE_CASE_ , help="Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag." , ) parser.add_argument("--alpha_clm" , default=0.5 , type=SCREAMING_SNAKE_CASE_ , help="Linear weight for the CLM loss. Must be >=0." ) parser.add_argument("--alpha_mse" , default=0.0 , type=SCREAMING_SNAKE_CASE_ , help="Linear weight of the MSE loss. Must be >=0." ) parser.add_argument( "--alpha_cos" , default=0.0 , type=SCREAMING_SNAKE_CASE_ , help="Linear weight of the cosine embedding loss. Must be >=0." ) parser.add_argument( "--mlm" , action="store_true" , help="The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM." ) parser.add_argument( "--mlm_mask_prop" , default=0.15 , type=SCREAMING_SNAKE_CASE_ , help="Proportion of tokens for which we need to make a prediction." , ) parser.add_argument("--word_mask" , default=0.8 , type=SCREAMING_SNAKE_CASE_ , help="Proportion of tokens to mask out." ) parser.add_argument("--word_keep" , default=0.1 , type=SCREAMING_SNAKE_CASE_ , help="Proportion of tokens to keep." ) parser.add_argument("--word_rand" , default=0.1 , type=SCREAMING_SNAKE_CASE_ , help="Proportion of tokens to randomly replace." ) parser.add_argument( "--mlm_smoothing" , default=0.7 , type=SCREAMING_SNAKE_CASE_ , help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec)." , ) parser.add_argument("--token_counts" , type=SCREAMING_SNAKE_CASE_ , help="The token counts in the data_file for MLM." ) parser.add_argument( "--restrict_ce_to_mask" , action="store_true" , help="If true, compute the distillation loss only the [MLM] prediction distribution." , ) parser.add_argument( "--freeze_pos_embs" , action="store_true" , help="Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only." , ) parser.add_argument( "--freeze_token_type_embds" , action="store_true" , help="Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only." , ) parser.add_argument("--n_epoch" , type=SCREAMING_SNAKE_CASE_ , default=3 , help="Number of pass on the whole dataset." ) parser.add_argument("--batch_size" , type=SCREAMING_SNAKE_CASE_ , default=5 , help="Batch size (for each process)." ) parser.add_argument( "--group_by_size" , action="store_false" , help="If true, group sequences that have similar length into the same batch. Default is true." , ) parser.add_argument( "--gradient_accumulation_steps" , type=SCREAMING_SNAKE_CASE_ , default=50 , help="Gradient accumulation for larger training batches." , ) parser.add_argument("--warmup_prop" , default=0.05 , type=SCREAMING_SNAKE_CASE_ , help="Linear warmup proportion." ) parser.add_argument("--weight_decay" , default=0.0 , type=SCREAMING_SNAKE_CASE_ , help="Weight decay if we apply some." ) parser.add_argument("--learning_rate" , default=5e-4 , type=SCREAMING_SNAKE_CASE_ , help="The initial learning rate for Adam." ) parser.add_argument("--adam_epsilon" , default=1e-6 , type=SCREAMING_SNAKE_CASE_ , help="Epsilon for Adam optimizer." ) parser.add_argument("--max_grad_norm" , default=5.0 , type=SCREAMING_SNAKE_CASE_ , help="Max gradient norm." ) parser.add_argument("--initializer_range" , default=0.02 , type=SCREAMING_SNAKE_CASE_ , help="Random initialization range." ) parser.add_argument( "--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , ) parser.add_argument( "--fp16_opt_level" , type=SCREAMING_SNAKE_CASE_ , default="O1" , help=( "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html" ) , ) parser.add_argument("--n_gpu" , type=SCREAMING_SNAKE_CASE_ , default=1 , help="Number of GPUs in the node." ) parser.add_argument("--local_rank" , type=SCREAMING_SNAKE_CASE_ , default=-1 , help="Distributed training - Local rank" ) parser.add_argument("--seed" , type=SCREAMING_SNAKE_CASE_ , default=56 , help="Random seed" ) parser.add_argument("--log_interval" , type=SCREAMING_SNAKE_CASE_ , default=500 , help="Tensorboard logging interval." ) parser.add_argument("--checkpoint_interval" , type=SCREAMING_SNAKE_CASE_ , default=4000 , help="Checkpoint interval." ) lowercase__ = parser.parse_args() sanity_checks(SCREAMING_SNAKE_CASE_ ) # ARGS # init_gpu_params(SCREAMING_SNAKE_CASE_ ) set_seed(SCREAMING_SNAKE_CASE_ ) if args.is_master: if os.path.exists(args.dump_path ): if not args.force: raise ValueError( f'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite''' " itUse `--force` if you want to overwrite it" ) else: shutil.rmtree(args.dump_path ) if not os.path.exists(args.dump_path ): os.makedirs(args.dump_path ) logger.info(f'''Experiment will be dumped and logged in {args.dump_path}''' ) # SAVE PARAMS # logger.info(f'''Param: {args}''' ) with open(os.path.join(args.dump_path , "parameters.json" ) , "w" ) as f: json.dump(vars(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , indent=4 ) git_log(args.dump_path ) lowercase__ , lowercase__ , lowercase__ = MODEL_CLASSES[args.student_type] lowercase__ , lowercase__ , lowercase__ = MODEL_CLASSES[args.teacher_type] # TOKENIZER # lowercase__ = teacher_tokenizer_class.from_pretrained(args.teacher_name ) lowercase__ = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): lowercase__ = tokenizer.all_special_tokens.index(SCREAMING_SNAKE_CASE_ ) lowercase__ = tokenizer.all_special_ids[idx] logger.info(f'''Special tokens {special_tok_ids}''' ) lowercase__ = special_tok_ids lowercase__ = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(f'''Loading data from {args.data_file}''' ) with open(args.data_file , "rb" ) as fp: lowercase__ = pickle.load(SCREAMING_SNAKE_CASE_ ) if args.mlm: logger.info(f'''Loading token counts from {args.token_counts} (already pre-computed)''' ) with open(args.token_counts , "rb" ) as fp: lowercase__ = pickle.load(SCREAMING_SNAKE_CASE_ ) lowercase__ = np.maximum(SCREAMING_SNAKE_CASE_ , 1 ) ** -args.mlm_smoothing for idx in special_tok_ids.values(): lowercase__ = 0.0 # do not predict special tokens lowercase__ = torch.from_numpy(SCREAMING_SNAKE_CASE_ ) else: lowercase__ = None lowercase__ = LmSeqsDataset(params=SCREAMING_SNAKE_CASE_ , data=SCREAMING_SNAKE_CASE_ ) logger.info("Data loader created." ) # STUDENT # logger.info(f'''Loading student config from {args.student_config}''' ) lowercase__ = student_config_class.from_pretrained(args.student_config ) lowercase__ = True if args.student_pretrained_weights is not None: logger.info(f'''Loading pretrained weights from {args.student_pretrained_weights}''' ) lowercase__ = student_model_class.from_pretrained(args.student_pretrained_weights , config=SCREAMING_SNAKE_CASE_ ) else: lowercase__ = student_model_class(SCREAMING_SNAKE_CASE_ ) if args.n_gpu > 0: student.to(f'''cuda:{args.local_rank}''' ) logger.info("Student loaded." ) # TEACHER # lowercase__ = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=SCREAMING_SNAKE_CASE_ ) if args.n_gpu > 0: teacher.to(f'''cuda:{args.local_rank}''' ) logger.info(f'''Teacher loaded from {args.teacher_name}.''' ) # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if args.freeze_token_type_embds: freeze_token_type_embeddings(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0 ) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() lowercase__ = Distiller( params=SCREAMING_SNAKE_CASE_ , dataset=SCREAMING_SNAKE_CASE_ , token_probs=SCREAMING_SNAKE_CASE_ , student=SCREAMING_SNAKE_CASE_ , teacher=SCREAMING_SNAKE_CASE_ ) distiller.train() logger.info("Let's go get some drinks." ) if __name__ == "__main__": main()
37
import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer lowercase_ = logging.get_logger(__name__) lowercase_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} lowercase_ = { """vocab_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""", }, """merges_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""", }, """tokenizer_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""", }, } lowercase_ = { """allenai/led-base-16384""": 1_6384, } class _snake_case ( lowercase__): UpperCamelCase__ : int =VOCAB_FILES_NAMES UpperCamelCase__ : Any =PRETRAINED_VOCAB_FILES_MAP UpperCamelCase__ : Dict =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase__ : List[Any] =LEDTokenizer UpperCamelCase__ : Tuple =["""input_ids""", """attention_mask"""] def __init__( self : Optional[Any], __lowercase : Optional[Any]=None, __lowercase : Dict=None, __lowercase : Tuple=None, __lowercase : Union[str, Any]="replace", __lowercase : Tuple="<s>", __lowercase : Optional[Any]="</s>", __lowercase : Tuple="</s>", __lowercase : List[str]="<s>", __lowercase : Tuple="<unk>", __lowercase : Dict="<pad>", __lowercase : Dict="<mask>", __lowercase : Any=False, __lowercase : Any=True, **__lowercase : List[Any], ): super().__init__( __lowercase, __lowercase, tokenizer_file=__lowercase, errors=__lowercase, bos_token=__lowercase, eos_token=__lowercase, sep_token=__lowercase, cls_token=__lowercase, unk_token=__lowercase, pad_token=__lowercase, mask_token=__lowercase, add_prefix_space=__lowercase, trim_offsets=__lowercase, **__lowercase, ) lowercase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space", __lowercase ) != add_prefix_space: lowercase__ = getattr(__lowercase, pre_tok_state.pop("type" ) ) lowercase__ = add_prefix_space lowercase__ = pre_tok_class(**__lowercase ) lowercase__ = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` lowercase__ = "post_processor" lowercase__ = getattr(self.backend_tokenizer, __lowercase, __lowercase ) if tokenizer_component_instance: lowercase__ = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: lowercase__ = tuple(state["sep"] ) if "cls" in state: lowercase__ = tuple(state["cls"] ) lowercase__ = False if state.get("add_prefix_space", __lowercase ) != add_prefix_space: lowercase__ = add_prefix_space lowercase__ = True if state.get("trim_offsets", __lowercase ) != trim_offsets: lowercase__ = trim_offsets lowercase__ = True if changes_to_apply: lowercase__ = getattr(__lowercase, state.pop("type" ) ) lowercase__ = component_class(**__lowercase ) setattr(self.backend_tokenizer, __lowercase, __lowercase ) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def A__ ( self : str ): if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def A__ ( self : Optional[int], __lowercase : Dict ): lowercase__ = AddedToken(__lowercase, lstrip=__lowercase, rstrip=__lowercase ) if isinstance(__lowercase, __lowercase ) else value lowercase__ = value def A__ ( self : Any, *__lowercase : List[Any], **__lowercase : Optional[Any] ): lowercase__ = kwargs.get("is_split_into_words", __lowercase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*__lowercase, **__lowercase ) def A__ ( self : int, *__lowercase : Union[str, Any], **__lowercase : List[str] ): lowercase__ = kwargs.get("is_split_into_words", __lowercase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*__lowercase, **__lowercase ) def A__ ( self : Optional[Any], __lowercase : str, __lowercase : Optional[str] = None ): lowercase__ = self._tokenizer.model.save(__lowercase, name=__lowercase ) return tuple(__lowercase ) def A__ ( self : List[str], __lowercase : int, __lowercase : Optional[int]=None ): lowercase__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def A__ ( self : int, __lowercase : List[int], __lowercase : Optional[List[int]] = None ): lowercase__ = [self.sep_token_id] lowercase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def A__ ( self : Union[str, Any], __lowercase : Union[Dict[str, EncodedInput], BatchEncoding], __lowercase : Optional[int] = None, __lowercase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD, __lowercase : Optional[int] = None, __lowercase : Optional[bool] = None, ): lowercase__ = super()._pad( encoded_inputs=__lowercase, max_length=__lowercase, padding_strategy=__lowercase, pad_to_multiple_of=__lowercase, return_attention_mask=__lowercase, ) # Load from model defaults if return_attention_mask is None: lowercase__ = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: lowercase__ = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. lowercase__ = len(encoded_inputs["global_attention_mask"] ) != len(__lowercase ) if needs_to_be_padded: lowercase__ = len(__lowercase ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` lowercase__ = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": lowercase__ = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
37
1
import argparse from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta from transformers.utils import logging logging.set_verbosity_info() def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): # Initialise PyTorch model lowercase__ = TaConfig.from_json_file(SCREAMING_SNAKE_CASE_ ) print(f'''Building PyTorch model from configuration: {config}''' ) lowercase__ = TaForConditionalGeneration(SCREAMING_SNAKE_CASE_ ) # Load weights from tf checkpoint load_tf_weights_in_ta(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Save pytorch-model print(f'''Save PyTorch model to {pytorch_dump_path}''' ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) lowercase_ = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
37
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def __lowerCAmelCase ( ): lowercase__ = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=SCREAMING_SNAKE_CASE_ ) lowercase__ = parser.add_subparsers(help="accelerate command helpers" ) # Register commands get_config_parser(subparsers=SCREAMING_SNAKE_CASE_ ) env_command_parser(subparsers=SCREAMING_SNAKE_CASE_ ) launch_command_parser(subparsers=SCREAMING_SNAKE_CASE_ ) tpu_command_parser(subparsers=SCREAMING_SNAKE_CASE_ ) test_command_parser(subparsers=SCREAMING_SNAKE_CASE_ ) # Let's go lowercase__ = parser.parse_args() if not hasattr(SCREAMING_SNAKE_CASE_ , "func" ): parser.print_help() exit(1 ) # Run args.func(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": main()
37
1
import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging lowercase_ = logging.get_logger(__name__) lowercase_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""} # See all LED models at https://huggingface.co/models?filter=LED lowercase_ = { """vocab_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""", }, """merges_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""", }, """tokenizer_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""", }, } lowercase_ = { """allenai/led-base-16384""": 1_6384, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def __lowerCAmelCase ( ): lowercase__ = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) lowercase__ = bs[:] lowercase__ = 0 for b in range(2**8 ): if b not in bs: bs.append(SCREAMING_SNAKE_CASE_ ) cs.append(2**8 + n ) n += 1 lowercase__ = [chr(SCREAMING_SNAKE_CASE_ ) for n in cs] return dict(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): lowercase__ = set() lowercase__ = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowercase__ = char return pairs class _snake_case ( lowercase__): UpperCamelCase__ : Any =VOCAB_FILES_NAMES UpperCamelCase__ : Any =PRETRAINED_VOCAB_FILES_MAP UpperCamelCase__ : List[str] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase__ : int =["""input_ids""", """attention_mask"""] def __init__( self : int, __lowercase : List[str], __lowercase : Union[str, Any], __lowercase : str="replace", __lowercase : int="<s>", __lowercase : Dict="</s>", __lowercase : List[str]="</s>", __lowercase : int="<s>", __lowercase : List[Any]="<unk>", __lowercase : Dict="<pad>", __lowercase : Union[str, Any]="<mask>", __lowercase : int=False, **__lowercase : int, ): lowercase__ = AddedToken(__lowercase, lstrip=__lowercase, rstrip=__lowercase ) if isinstance(__lowercase, __lowercase ) else bos_token lowercase__ = AddedToken(__lowercase, lstrip=__lowercase, rstrip=__lowercase ) if isinstance(__lowercase, __lowercase ) else eos_token lowercase__ = AddedToken(__lowercase, lstrip=__lowercase, rstrip=__lowercase ) if isinstance(__lowercase, __lowercase ) else sep_token lowercase__ = AddedToken(__lowercase, lstrip=__lowercase, rstrip=__lowercase ) if isinstance(__lowercase, __lowercase ) else cls_token lowercase__ = AddedToken(__lowercase, lstrip=__lowercase, rstrip=__lowercase ) if isinstance(__lowercase, __lowercase ) else unk_token lowercase__ = AddedToken(__lowercase, lstrip=__lowercase, rstrip=__lowercase ) if isinstance(__lowercase, __lowercase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowercase__ = AddedToken(__lowercase, lstrip=__lowercase, rstrip=__lowercase ) if isinstance(__lowercase, __lowercase ) else mask_token super().__init__( errors=__lowercase, bos_token=__lowercase, eos_token=__lowercase, unk_token=__lowercase, sep_token=__lowercase, cls_token=__lowercase, pad_token=__lowercase, mask_token=__lowercase, add_prefix_space=__lowercase, **__lowercase, ) with open(__lowercase, encoding="utf-8" ) as vocab_handle: lowercase__ = json.load(__lowercase ) lowercase__ = {v: k for k, v in self.encoder.items()} lowercase__ = errors # how to handle errors in decoding lowercase__ = bytes_to_unicode() lowercase__ = {v: k for k, v in self.byte_encoder.items()} with open(__lowercase, encoding="utf-8" ) as merges_handle: lowercase__ = merges_handle.read().split("\n" )[1:-1] lowercase__ = [tuple(merge.split() ) for merge in bpe_merges] lowercase__ = dict(zip(__lowercase, range(len(__lowercase ) ) ) ) lowercase__ = {} lowercase__ = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions lowercase__ = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def A__ ( self : Tuple ): return len(self.encoder ) def A__ ( self : str ): return dict(self.encoder, **self.added_tokens_encoder ) def A__ ( self : Optional[int], __lowercase : Optional[int] ): if token in self.cache: return self.cache[token] lowercase__ = tuple(__lowercase ) lowercase__ = get_pairs(__lowercase ) if not pairs: return token while True: lowercase__ = min(__lowercase, key=lambda __lowercase : self.bpe_ranks.get(__lowercase, float("inf" ) ) ) if bigram not in self.bpe_ranks: break lowercase__ , lowercase__ = bigram lowercase__ = [] lowercase__ = 0 while i < len(__lowercase ): try: lowercase__ = word.index(__lowercase, __lowercase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowercase__ = j if word[i] == first and i < len(__lowercase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowercase__ = tuple(__lowercase ) lowercase__ = new_word if len(__lowercase ) == 1: break else: lowercase__ = get_pairs(__lowercase ) lowercase__ = " ".join(__lowercase ) lowercase__ = word return word def A__ ( self : Union[str, Any], __lowercase : Union[str, Any] ): lowercase__ = [] for token in re.findall(self.pat, __lowercase ): lowercase__ = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowercase ).split(" " ) ) return bpe_tokens def A__ ( self : List[Any], __lowercase : int ): return self.encoder.get(__lowercase, self.encoder.get(self.unk_token ) ) def A__ ( self : Tuple, __lowercase : Optional[int] ): return self.decoder.get(__lowercase ) def A__ ( self : List[str], __lowercase : str ): lowercase__ = "".join(__lowercase ) lowercase__ = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8", errors=self.errors ) return text def A__ ( self : Tuple, __lowercase : str, __lowercase : Optional[str] = None ): if not os.path.isdir(__lowercase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowercase__ = os.path.join( __lowercase, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) lowercase__ = os.path.join( __lowercase, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(__lowercase, "w", encoding="utf-8" ) as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=__lowercase, ensure_ascii=__lowercase ) + "\n" ) lowercase__ = 0 with open(__lowercase, "w", encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda __lowercase : kv[1] ): if index != token_index: logger.warning( F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' " Please check that the tokenizer is not corrupted!" ) lowercase__ = token_index writer.write(" ".join(__lowercase ) + "\n" ) index += 1 return vocab_file, merge_file def A__ ( self : str, __lowercase : List[int], __lowercase : Optional[List[int]] = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowercase__ = [self.cls_token_id] lowercase__ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def A__ ( self : List[Any], __lowercase : List[int], __lowercase : Optional[List[int]] = None, __lowercase : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowercase, token_ids_a=__lowercase, already_has_special_tokens=__lowercase ) if token_ids_a is None: return [1] + ([0] * len(__lowercase )) + [1] return [1] + ([0] * len(__lowercase )) + [1, 1] + ([0] * len(__lowercase )) + [1] def A__ ( self : int, __lowercase : List[int], __lowercase : Optional[List[int]] = None ): lowercase__ = [self.sep_token_id] lowercase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def A__ ( self : Optional[Any], __lowercase : str, __lowercase : int=False, **__lowercase : Optional[int] ): lowercase__ = kwargs.pop("add_prefix_space", self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__lowercase ) > 0 and not text[0].isspace()): lowercase__ = " " + text return (text, kwargs) def A__ ( self : str, __lowercase : Union[Dict[str, EncodedInput], BatchEncoding], __lowercase : Optional[int] = None, __lowercase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD, __lowercase : Optional[int] = None, __lowercase : Optional[bool] = None, ): lowercase__ = super()._pad( encoded_inputs=__lowercase, max_length=__lowercase, padding_strategy=__lowercase, pad_to_multiple_of=__lowercase, return_attention_mask=__lowercase, ) # Load from model defaults if return_attention_mask is None: lowercase__ = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: lowercase__ = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. lowercase__ = len(encoded_inputs["global_attention_mask"] ) != len(__lowercase ) if needs_to_be_padded: lowercase__ = len(__lowercase ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` lowercase__ = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": lowercase__ = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
37
import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class _snake_case ( unittest.TestCase): def __init__( self : Dict, __lowercase : int, __lowercase : Union[str, Any]=7, __lowercase : Union[str, Any]=3, __lowercase : Any=18, __lowercase : Union[str, Any]=30, __lowercase : Any=400, __lowercase : List[str]=True, __lowercase : Dict=None, __lowercase : List[str]=True, __lowercase : int=False, __lowercase : Union[str, Any]=True, __lowercase : str=True, __lowercase : Optional[int]=[0.5, 0.5, 0.5], __lowercase : List[Any]=[0.5, 0.5, 0.5], ): lowercase__ = parent lowercase__ = batch_size lowercase__ = num_channels lowercase__ = image_size lowercase__ = min_resolution lowercase__ = max_resolution lowercase__ = do_resize lowercase__ = size if size is not None else {"height": 18, "width": 20} lowercase__ = do_thumbnail lowercase__ = do_align_axis lowercase__ = do_pad lowercase__ = do_normalize lowercase__ = image_mean lowercase__ = image_std def A__ ( self : Optional[Any] ): return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class _snake_case ( lowercase__ , unittest.TestCase): UpperCamelCase__ : Optional[int] =DonutImageProcessor if is_vision_available() else None def A__ ( self : str ): lowercase__ = DonutImageProcessingTester(self ) @property def A__ ( self : List[str] ): return self.image_processor_tester.prepare_image_processor_dict() def A__ ( self : Optional[Any] ): lowercase__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowercase, "do_resize" ) ) self.assertTrue(hasattr(__lowercase, "size" ) ) self.assertTrue(hasattr(__lowercase, "do_thumbnail" ) ) self.assertTrue(hasattr(__lowercase, "do_align_long_axis" ) ) self.assertTrue(hasattr(__lowercase, "do_pad" ) ) self.assertTrue(hasattr(__lowercase, "do_normalize" ) ) self.assertTrue(hasattr(__lowercase, "image_mean" ) ) self.assertTrue(hasattr(__lowercase, "image_std" ) ) def A__ ( self : str ): lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size, {"height": 18, "width": 20} ) lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict, size=42 ) self.assertEqual(image_processor.size, {"height": 42, "width": 42} ) # Previous config had dimensions in (width, height) order lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict, size=(42, 84) ) self.assertEqual(image_processor.size, {"height": 84, "width": 42} ) def A__ ( self : List[str] ): pass @is_flaky() def A__ ( self : Dict ): # Initialize image_processing lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase, Image.Image ) # Test not batched input lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) # Test batched lowercase__ = image_processing(__lowercase, return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) @is_flaky() def A__ ( self : Optional[Any] ): # Initialize image_processing lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, numpify=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase, np.ndarray ) # Test not batched input lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) # Test batched lowercase__ = image_processing(__lowercase, return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) @is_flaky() def A__ ( self : Tuple ): # Initialize image_processing lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, torchify=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase, torch.Tensor ) # Test not batched input lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) # Test batched lowercase__ = image_processing(__lowercase, return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), )
37
1
def __lowerCAmelCase ( ): return [ a * b * (1000 - a - b) for a in range(1 , 999 ) for b in range(SCREAMING_SNAKE_CASE_ , 999 ) if (a * a + b * b == (1000 - a - b) ** 2) ][0] if __name__ == "__main__": print(F'{solution() = }')
37
import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class _snake_case ( lowercase__): def A__ ( self : Optional[Any], __lowercase : str ): with open(__lowercase, encoding="utf-8" ) as input_file: lowercase__ = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" ) lowercase__ = input_file.read() lowercase__ = regexp.search(__lowercase ) return match def A__ ( self : str, __lowercase : str ): with open(__lowercase, encoding="utf-8" ) as input_file: lowercase__ = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()", re.DOTALL ) lowercase__ = input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` lowercase__ = regexp.finditer(__lowercase ) lowercase__ = [match for match in matches if match is not None and match.group(1 ) is not None] return matches[0] if matches else None def A__ ( self : Union[str, Any] ): lowercase__ = Path("./datasets" ) lowercase__ = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_encoding_on_file_open(str(__lowercase ) ): raise AssertionError(F'''open(...) must use utf-8 encoding in {dataset}''' ) def A__ ( self : Union[str, Any] ): lowercase__ = Path("./datasets" ) lowercase__ = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_print_statements(str(__lowercase ) ): raise AssertionError(F'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
37
1
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def __lowerCAmelCase ( ): lowercase__ = HfArgumentParser(SCREAMING_SNAKE_CASE_ ) lowercase__ = parser.parse_args_into_dataclasses()[0] lowercase__ = TensorFlowBenchmark(args=SCREAMING_SNAKE_CASE_ ) try: lowercase__ = parser.parse_args_into_dataclasses()[0] except ValueError as e: lowercase__ = "Arg --no_{0} is no longer used, please use --no-{0} instead." lowercase__ = " ".join(str(SCREAMING_SNAKE_CASE_ ).split(" " )[:-1] ) lowercase__ = "" lowercase__ = eval(str(SCREAMING_SNAKE_CASE_ ).split(" " )[-1] ) lowercase__ = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(SCREAMING_SNAKE_CASE_ ) if len(SCREAMING_SNAKE_CASE_ ) > 0: lowercase__ = full_error_msg + begin_error_msg + str(SCREAMING_SNAKE_CASE_ ) raise ValueError(SCREAMING_SNAKE_CASE_ ) benchmark.run() if __name__ == "__main__": main()
37
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase_ = { """configuration_xmod""": [ """XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XmodConfig""", """XmodOnnxConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """XMOD_PRETRAINED_MODEL_ARCHIVE_LIST""", """XmodForCausalLM""", """XmodForMaskedLM""", """XmodForMultipleChoice""", """XmodForQuestionAnswering""", """XmodForSequenceClassification""", """XmodForTokenClassification""", """XmodModel""", """XmodPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
37
1
from itertools import permutations def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False lowercase__ = [7, 11, 13, 17] for i, test in enumerate(SCREAMING_SNAKE_CASE_ ): if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ = 10 ): return sum( int("".join(map(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) ) for num in permutations(range(SCREAMING_SNAKE_CASE_ ) ) if is_substring_divisible(SCREAMING_SNAKE_CASE_ ) ) if __name__ == "__main__": print(F'{solution() = }')
37
import unittest import numpy as np import requests from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: lowercase_ = False if is_vision_available(): from PIL import Image from transformers import PixaStructImageProcessor class _snake_case ( unittest.TestCase): def __init__( self : List[Any], __lowercase : int, __lowercase : Optional[int]=7, __lowercase : List[str]=3, __lowercase : Tuple=18, __lowercase : List[Any]=30, __lowercase : Tuple=400, __lowercase : Any=None, __lowercase : Optional[int]=True, __lowercase : List[str]=True, __lowercase : Union[str, Any]=None, ): lowercase__ = size if size is not None else {"height": 20, "width": 20} lowercase__ = parent lowercase__ = batch_size lowercase__ = num_channels lowercase__ = image_size lowercase__ = min_resolution lowercase__ = max_resolution lowercase__ = size lowercase__ = do_normalize lowercase__ = do_convert_rgb lowercase__ = [512, 1024, 2048, 4096] lowercase__ = patch_size if patch_size is not None else {"height": 16, "width": 16} def A__ ( self : List[str] ): return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb} def A__ ( self : Any ): lowercase__ = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg" lowercase__ = Image.open(requests.get(__lowercase, stream=__lowercase ).raw ).convert("RGB" ) return raw_image @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , ) @require_torch @require_vision class _snake_case ( lowercase__ , unittest.TestCase): UpperCamelCase__ : Any =PixaStructImageProcessor if is_vision_available() else None def A__ ( self : Any ): lowercase__ = PixaStructImageProcessingTester(self ) @property def A__ ( self : Union[str, Any] ): return self.image_processor_tester.prepare_image_processor_dict() def A__ ( self : Optional[Any] ): lowercase__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowercase, "do_normalize" ) ) self.assertTrue(hasattr(__lowercase, "do_convert_rgb" ) ) def A__ ( self : Optional[int] ): lowercase__ = self.image_processor_tester.prepare_dummy_image() lowercase__ = self.image_processing_class(**self.image_processor_dict ) lowercase__ = 2048 lowercase__ = image_processor(__lowercase, return_tensors="pt", max_patches=__lowercase ) self.assertTrue(torch.allclose(inputs.flattened_patches.mean(), torch.tensor(0.0606 ), atol=1e-3, rtol=1e-3 ) ) def A__ ( self : Union[str, Any] ): # Initialize image_processor lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase, Image.Image ) # Test not batched input lowercase__ = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input lowercase__ = image_processor( image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches self.assertEqual( encoded_images.shape, (1, max_patch, expected_hidden_dim), ) # Test batched lowercase__ = image_processor( __lowercase, return_tensors="pt", max_patches=__lowercase ).flattened_patches self.assertEqual( encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), ) def A__ ( self : int ): # Initialize image_processor lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase, Image.Image ) # Test not batched input lowercase__ = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 lowercase__ = True for max_patch in self.image_processor_tester.max_patches: # Test not batched input with self.assertRaises(__lowercase ): lowercase__ = image_processor( image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches lowercase__ = "Hello" lowercase__ = image_processor( image_inputs[0], return_tensors="pt", max_patches=__lowercase, header_text=__lowercase ).flattened_patches self.assertEqual( encoded_images.shape, (1, max_patch, expected_hidden_dim), ) # Test batched lowercase__ = image_processor( __lowercase, return_tensors="pt", max_patches=__lowercase, header_text=__lowercase ).flattened_patches self.assertEqual( encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), ) def A__ ( self : Tuple ): # Initialize image_processor lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, numpify=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase, np.ndarray ) lowercase__ = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input lowercase__ = image_processor( image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches self.assertEqual( encoded_images.shape, (1, max_patch, expected_hidden_dim), ) # Test batched lowercase__ = image_processor( __lowercase, return_tensors="pt", max_patches=__lowercase ).flattened_patches self.assertEqual( encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), ) def A__ ( self : Any ): # Initialize image_processor lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, torchify=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase, torch.Tensor ) # Test not batched input lowercase__ = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input lowercase__ = image_processor( image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches self.assertEqual( encoded_images.shape, (1, max_patch, expected_hidden_dim), ) # Test batched lowercase__ = image_processor( __lowercase, return_tensors="pt", max_patches=__lowercase ).flattened_patches self.assertEqual( encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), ) @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , ) @require_torch @require_vision class _snake_case ( lowercase__ , unittest.TestCase): UpperCamelCase__ : Optional[int] =PixaStructImageProcessor if is_vision_available() else None def A__ ( self : Optional[int] ): lowercase__ = PixaStructImageProcessingTester(self, num_channels=4 ) lowercase__ = 3 @property def A__ ( self : Union[str, Any] ): return self.image_processor_tester.prepare_image_processor_dict() def A__ ( self : Dict ): lowercase__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowercase, "do_normalize" ) ) self.assertTrue(hasattr(__lowercase, "do_convert_rgb" ) ) def A__ ( self : Union[str, Any] ): # Initialize image_processor lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase, Image.Image ) # Test not batched input lowercase__ = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * (self.image_processor_tester.num_channels - 1) ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input lowercase__ = image_processor( image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches self.assertEqual( encoded_images.shape, (1, max_patch, expected_hidden_dim), ) # Test batched lowercase__ = image_processor( __lowercase, return_tensors="pt", max_patches=__lowercase ).flattened_patches self.assertEqual( encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
37
1
import math def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): if 0 not in (x, y): # We use the relation x^y = y*log10(x), where 10 is the base. return y * math.logaa(SCREAMING_SNAKE_CASE_ ) else: if x == 0: # 0 raised to any number is 0 return 0 elif y == 0: return 1 # any number raised to 0 is 1 raise AssertionError("This should never happen" ) if __name__ == "__main__": # Main function # Read two numbers from input and typecast them to int using map function. # Here x is the base and y is the power. lowercase_ = """Enter the base and the power separated by a comma: """ lowercase_ , lowercase_ = map(int, input(prompt).split(""",""")) lowercase_ , lowercase_ = map(int, input(prompt).split(""",""")) # We find the log of each number, using the function res(), which takes two # arguments. lowercase_ = res(xa, ya) lowercase_ = res(xa, ya) # We check for the largest number if resa > resa: print("""Largest number is""", xa, """^""", ya) elif resa > resa: print("""Largest number is""", xa, """^""", ya) else: print("""Both are equal""")
37
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ , lowercase__ = len(SCREAMING_SNAKE_CASE_ ), len(grid[0] ) if ( min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) < 0 or row == row_length or col == col_length or (row, col) in visit or grid[row][col] == 1 ): return 0 if row == row_length - 1 and col == col_length - 1: return 1 visit.add((row, col) ) lowercase__ = 0 count += depth_first_search(SCREAMING_SNAKE_CASE_ , row + 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) count += depth_first_search(SCREAMING_SNAKE_CASE_ , row - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) count += depth_first_search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , col + 1 , SCREAMING_SNAKE_CASE_ ) count += depth_first_search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , col - 1 , SCREAMING_SNAKE_CASE_ ) visit.remove((row, col) ) return count if __name__ == "__main__": import doctest doctest.testmod()
37
1
import itertools import random import unittest import numpy as np from transformers import ASTFeatureExtractor from transformers.testing_utils import require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin lowercase_ = random.Random() if is_torch_available(): import torch def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=1.0 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None ): if rng is None: lowercase__ = global_rng lowercase__ = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class _snake_case ( unittest.TestCase): def __init__( self : Tuple, __lowercase : Union[str, Any], __lowercase : Dict=7, __lowercase : List[Any]=400, __lowercase : Tuple=2000, __lowercase : Dict=1, __lowercase : List[str]=0.0, __lowercase : Dict=1_6000, __lowercase : Union[str, Any]=True, __lowercase : Union[str, Any]=True, ): lowercase__ = parent lowercase__ = batch_size lowercase__ = min_seq_length lowercase__ = max_seq_length lowercase__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) lowercase__ = feature_size lowercase__ = padding_value lowercase__ = sampling_rate lowercase__ = return_attention_mask lowercase__ = do_normalize def A__ ( self : Optional[int] ): return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def A__ ( self : Optional[Any], __lowercase : List[Any]=False, __lowercase : int=False ): def _flatten(__lowercase : str ): return list(itertools.chain(*__lowercase ) ) if equal_length: lowercase__ = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size lowercase__ = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff ) ] if numpify: lowercase__ = [np.asarray(__lowercase ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class _snake_case ( lowercase__ , unittest.TestCase): UpperCamelCase__ : Tuple =ASTFeatureExtractor def A__ ( self : str ): lowercase__ = ASTFeatureExtractionTester(self ) def A__ ( self : str ): # Tests that all call wrap to encode_plus and batch_encode_plus lowercase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 lowercase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )] lowercase__ = [np.asarray(__lowercase ) for speech_input in speech_inputs] # Test not batched input lowercase__ = feat_extract(speech_inputs[0], return_tensors="np" ).input_values lowercase__ = feat_extract(np_speech_inputs[0], return_tensors="np" ).input_values self.assertTrue(np.allclose(__lowercase, __lowercase, atol=1e-3 ) ) # Test batched lowercase__ = feat_extract(__lowercase, padding=__lowercase, return_tensors="np" ).input_values lowercase__ = feat_extract(__lowercase, padding=__lowercase, return_tensors="np" ).input_values for enc_seq_a, enc_seq_a in zip(__lowercase, __lowercase ): self.assertTrue(np.allclose(__lowercase, __lowercase, atol=1e-3 ) ) # Test 2-D numpy arrays are batched. lowercase__ = [floats_list((1, x) )[0] for x in (800, 800, 800)] lowercase__ = np.asarray(__lowercase ) lowercase__ = feat_extract(__lowercase, return_tensors="np" ).input_values lowercase__ = feat_extract(__lowercase, return_tensors="np" ).input_values for enc_seq_a, enc_seq_a in zip(__lowercase, __lowercase ): self.assertTrue(np.allclose(__lowercase, __lowercase, atol=1e-3 ) ) @require_torch def A__ ( self : Dict ): import torch lowercase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowercase__ = np.random.rand(100 ).astype(np.floataa ) lowercase__ = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: lowercase__ = feature_extractor.pad([{"input_values": inputs}], return_tensors="np" ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) lowercase__ = feature_extractor.pad([{"input_values": inputs}], return_tensors="pt" ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def A__ ( self : List[str], __lowercase : List[Any] ): from datasets import load_dataset lowercase__ = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation" ) # automatic decoding with librispeech lowercase__ = ds.sort("id" ).select(range(__lowercase ) )[:num_samples]["audio"] return [x["array"] for x in speech_samples] @require_torch def A__ ( self : str ): # fmt: off lowercase__ = torch.tensor( [-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776, -1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133, -1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936, -0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] ) # fmt: on lowercase__ = self._load_datasamples(1 ) lowercase__ = ASTFeatureExtractor() lowercase__ = feature_extractor(__lowercase, return_tensors="pt" ).input_values self.assertEquals(input_values.shape, (1, 1024, 128) ) self.assertTrue(torch.allclose(input_values[0, 0, :30], __lowercase, atol=1e-4 ) )
37
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): lowercase__ = 0 for ch in input_str: lowercase__ = ord(SCREAMING_SNAKE_CASE_ ) lowercase__ = pow(2 , SCREAMING_SNAKE_CASE_ ) # If we already turned on bit for current character's unicode if bitmap >> ch_unicode & 1 == 1: return False bitmap |= ch_bit_index_on return True if __name__ == "__main__": import doctest doctest.testmod()
37
1
'''simple docstring''' import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ = TapasConfig.from_json_file(SCREAMING_SNAKE_CASE_ ) # set absolute/relative position embeddings parameter lowercase__ = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": lowercase__ = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ ) elif task == "WTQ": # run_task_main.py hparams lowercase__ = 4 lowercase__ = True # hparam_utils.py hparams lowercase__ = 0.66_4694 lowercase__ = 0.20_7951 lowercase__ = 0.12_1194 lowercase__ = True lowercase__ = True lowercase__ = False lowercase__ = 0.035_2513 lowercase__ = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ ) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams lowercase__ = 4 lowercase__ = False # hparam_utils.py hparams lowercase__ = 36.4519 lowercase__ = 0.90_3421 lowercase__ = 222.088 lowercase__ = True lowercase__ = True lowercase__ = True lowercase__ = 0.76_3141 lowercase__ = TapasForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ ) elif task == "TABFACT": lowercase__ = TapasForSequenceClassification(config=SCREAMING_SNAKE_CASE_ ) elif task == "MLM": lowercase__ = TapasForMaskedLM(config=SCREAMING_SNAKE_CASE_ ) elif task == "INTERMEDIATE_PRETRAINING": lowercase__ = TapasModel(config=SCREAMING_SNAKE_CASE_ ) else: raise ValueError(f'''Task {task} not supported.''' ) print(f'''Building PyTorch model from configuration: {config}''' ) # Load weights from tf checkpoint load_tf_weights_in_tapas(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Save pytorch-model (weights and configuration) print(f'''Save PyTorch model to {pytorch_dump_path}''' ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) # Save tokenizer files print(f'''Save tokenizer files to {pytorch_dump_path}''' ) lowercase__ = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" , model_max_length=512 ) tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ ) print("Used relative position embeddings:" , model.config.reset_position_index_per_cell ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA.""" ) parser.add_argument( """--reset_position_index_per_cell""", default=False, action="""store_true""", help="""Whether to use relative position embeddings or not. Defaults to True.""", ) parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--tapas_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained TAPAS model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) lowercase_ = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
700
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): lowercase__ = len(SCREAMING_SNAKE_CASE_ ) for i in range(SCREAMING_SNAKE_CASE_ ): for j in range(i + 1 , SCREAMING_SNAKE_CASE_ ): if numbers[j] < numbers[i]: lowercase__ , lowercase__ = numbers[j], numbers[i] return numbers if __name__ == "__main__": lowercase_ = input("""Enter numbers separated by a comma:\n""").strip() lowercase_ = [int(item) for item in user_input.split(""",""")] print(exchange_sort(unsorted))
37
0