code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ConditionalDetrImageProcessor class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __init__( self , UpperCAmelCase_ , UpperCAmelCase_=7 , UpperCAmelCase_=3 , UpperCAmelCase_=30 , UpperCAmelCase_=4_00 , UpperCAmelCase_=True , UpperCAmelCase_=None , UpperCAmelCase_=True , UpperCAmelCase_=[0.5, 0.5, 0.5] , UpperCAmelCase_=[0.5, 0.5, 0.5] , UpperCAmelCase_=True , UpperCAmelCase_=1 / 2_55 , UpperCAmelCase_=True , ): # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p lowerCAmelCase = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33} lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = num_channels lowerCAmelCase = min_resolution lowerCAmelCase = max_resolution lowerCAmelCase = do_resize lowerCAmelCase = size lowerCAmelCase = do_normalize lowerCAmelCase = image_mean lowerCAmelCase = image_std lowerCAmelCase = do_rescale lowerCAmelCase = rescale_factor lowerCAmelCase = do_pad def __snake_case ( self ): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_=False ): if not batched: lowerCAmelCase = image_inputs[0] if isinstance(UpperCAmelCase_ , Image.Image ): lowerCAmelCase , lowerCAmelCase = image.size else: lowerCAmelCase , lowerCAmelCase = image.shape[1], image.shape[2] if w < h: lowerCAmelCase = int(self.size['''shortest_edge'''] * h / w ) lowerCAmelCase = self.size['''shortest_edge'''] elif w > h: lowerCAmelCase = self.size['''shortest_edge'''] lowerCAmelCase = int(self.size['''shortest_edge'''] * w / h ) else: lowerCAmelCase = self.size['''shortest_edge'''] lowerCAmelCase = self.size['''shortest_edge'''] else: lowerCAmelCase = [] for image in image_inputs: lowerCAmelCase , lowerCAmelCase = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) lowerCAmelCase = max(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : item[0] )[0] lowerCAmelCase = max(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class __UpperCamelCase ( __UpperCAmelCase , unittest.TestCase ): '''simple docstring''' __a : Union[str, Any] =ConditionalDetrImageProcessor if is_vision_available() else None def __snake_case ( self ): lowerCAmelCase = ConditionalDetrImageProcessingTester(self ) @property def __snake_case ( self ): return self.image_processor_tester.prepare_image_processor_dict() def __snake_case ( self ): lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCAmelCase_ , '''image_mean''' ) ) self.assertTrue(hasattr(UpperCAmelCase_ , '''image_std''' ) ) self.assertTrue(hasattr(UpperCAmelCase_ , '''do_normalize''' ) ) self.assertTrue(hasattr(UpperCAmelCase_ , '''do_resize''' ) ) self.assertTrue(hasattr(UpperCAmelCase_ , '''size''' ) ) def __snake_case ( self ): lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} ) self.assertEqual(image_processor.do_pad , UpperCAmelCase_ ) lowerCAmelCase = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCAmelCase_ ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} ) self.assertEqual(image_processor.do_pad , UpperCAmelCase_ ) def __snake_case ( self ): pass def __snake_case ( self ): # Initialize image_processing lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase_ , Image.Image ) # Test not batched input lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values lowerCAmelCase , lowerCAmelCase = self.image_processor_tester.get_expected_values(UpperCAmelCase_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowerCAmelCase , lowerCAmelCase = self.image_processor_tester.get_expected_values(UpperCAmelCase_ , batched=UpperCAmelCase_ ) lowerCAmelCase = image_processing(UpperCAmelCase_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __snake_case ( self ): # Initialize image_processing lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase_ , np.ndarray ) # Test not batched input lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values lowerCAmelCase , lowerCAmelCase = self.image_processor_tester.get_expected_values(UpperCAmelCase_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowerCAmelCase = image_processing(UpperCAmelCase_ , return_tensors='''pt''' ).pixel_values lowerCAmelCase , lowerCAmelCase = self.image_processor_tester.get_expected_values(UpperCAmelCase_ , batched=UpperCAmelCase_ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __snake_case ( self ): # Initialize image_processing lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase_ , torch.Tensor ) # Test not batched input lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values lowerCAmelCase , lowerCAmelCase = self.image_processor_tester.get_expected_values(UpperCAmelCase_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowerCAmelCase = image_processing(UpperCAmelCase_ , return_tensors='''pt''' ).pixel_values lowerCAmelCase , lowerCAmelCase = self.image_processor_tester.get_expected_values(UpperCAmelCase_ , batched=UpperCAmelCase_ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def __snake_case ( self ): # prepare image and target lowerCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f: lowerCAmelCase = json.loads(f.read() ) lowerCAmelCase = {'''image_id''': 3_97_69, '''annotations''': target} # encode them lowerCAmelCase = ConditionalDetrImageProcessor.from_pretrained('''microsoft/conditional-detr-resnet-50''' ) lowerCAmelCase = image_processing(images=UpperCAmelCase_ , annotations=UpperCAmelCase_ , return_tensors='''pt''' ) # verify pixel values lowerCAmelCase = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding['''pixel_values'''].shape , UpperCAmelCase_ ) lowerCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCAmelCase_ , atol=1E-4 ) ) # verify area lowerCAmelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCAmelCase_ ) ) # verify boxes lowerCAmelCase = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCAmelCase_ ) lowerCAmelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCAmelCase_ , atol=1E-3 ) ) # verify image_id lowerCAmelCase = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCAmelCase_ ) ) # verify is_crowd lowerCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCAmelCase_ ) ) # verify class_labels lowerCAmelCase = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCAmelCase_ ) ) # verify orig_size lowerCAmelCase = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCAmelCase_ ) ) # verify size lowerCAmelCase = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCAmelCase_ ) ) @slow def __snake_case ( self ): # prepare image, target and masks_path lowerCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f: lowerCAmelCase = json.loads(f.read() ) lowerCAmelCase = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target} lowerCAmelCase = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' ) # encode them lowerCAmelCase = ConditionalDetrImageProcessor(format='''coco_panoptic''' ) lowerCAmelCase = image_processing(images=UpperCAmelCase_ , annotations=UpperCAmelCase_ , masks_path=UpperCAmelCase_ , return_tensors='''pt''' ) # verify pixel values lowerCAmelCase = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding['''pixel_values'''].shape , UpperCAmelCase_ ) lowerCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCAmelCase_ , atol=1E-4 ) ) # verify area lowerCAmelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCAmelCase_ ) ) # verify boxes lowerCAmelCase = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCAmelCase_ ) lowerCAmelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCAmelCase_ , atol=1E-3 ) ) # verify image_id lowerCAmelCase = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCAmelCase_ ) ) # verify is_crowd lowerCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCAmelCase_ ) ) # verify class_labels lowerCAmelCase = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCAmelCase_ ) ) # verify masks lowerCAmelCase = 82_28_73 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , UpperCAmelCase_ ) # verify orig_size lowerCAmelCase = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCAmelCase_ ) ) # verify size lowerCAmelCase = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCAmelCase_ ) )
33
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase_ ={ """configuration_jukebox""": [ """JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """JukeboxConfig""", """JukeboxPriorConfig""", """JukeboxVQVAEConfig""", ], """tokenization_jukebox""": ["""JukeboxTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ =[ """JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST""", """JukeboxModel""", """JukeboxPreTrainedModel""", """JukeboxVQVAE""", """JukeboxPrior""", ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys UpperCAmelCase_ =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
33
1
import importlib import inspect import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py UpperCAmelCase_ ="""src/transformers""" # This is to make sure the transformers module imported is the one in the repo. UpperCAmelCase_ =importlib.util.spec_from_file_location( """transformers""", os.path.join(PATH_TO_TRANSFORMERS, """__init__.py"""), submodule_search_locations=[PATH_TO_TRANSFORMERS], ) UpperCAmelCase_ =spec.loader.load_module() UpperCAmelCase_ =transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` UpperCAmelCase_ =re.compile("""\[(.+?)\]\((https://huggingface\.co/.+?)\)""") UpperCAmelCase_ ={ """CLIPConfigMixin""", """DecisionTransformerConfigMixin""", """EncoderDecoderConfigMixin""", """RagConfigMixin""", """SpeechEncoderDecoderConfigMixin""", """VisionEncoderDecoderConfigMixin""", """VisionTextDualEncoderConfigMixin""", } def UpperCAmelCase ( ): lowerCAmelCase = [] for config_class in list(CONFIG_MAPPING.values() ): lowerCAmelCase = False # source code of `config_class` lowerCAmelCase = inspect.getsource(_snake_case ) lowerCAmelCase = _re_checkpoint.findall(_snake_case ) for checkpoint in checkpoints: # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` lowerCAmelCase , lowerCAmelCase = checkpoint # verify the checkpoint name corresponds to the checkpoint link lowerCAmelCase = F"""https://huggingface.co/{ckpt_name}""" if ckpt_link == ckpt_link_from_name: lowerCAmelCase = True break lowerCAmelCase = config_class.__name__ if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(_snake_case ) if len(_snake_case ) > 0: lowerCAmelCase = '''\n'''.join(sorted(_snake_case ) ) raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
33
import json import os import pickle import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers import is_faiss_available from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bart.tokenization_bart import BartTokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch if is_faiss_available(): import faiss @require_faiss class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' def __snake_case ( self ): lowerCAmelCase = tempfile.mkdtemp() lowerCAmelCase = 8 # DPR tok lowerCAmelCase = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] lowerCAmelCase = os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ ) lowerCAmelCase = os.path.join(UpperCAmelCase_ , DPR_VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) # BART tok lowerCAmelCase = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] lowerCAmelCase = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) ) lowerCAmelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] lowerCAmelCase = {'''unk_token''': '''<unk>'''} lowerCAmelCase = os.path.join(self.tmpdirname , '''bart_tokenizer''' ) os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ ) lowerCAmelCase = os.path.join(UpperCAmelCase_ , BART_VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCAmelCase = os.path.join(UpperCAmelCase_ , BART_VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(UpperCAmelCase_ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(UpperCAmelCase_ ) ) def __snake_case ( self ): return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) ) def __snake_case ( self ): return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) ) def __snake_case ( self ): return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) ) def __snake_case ( self ): shutil.rmtree(self.tmpdirname ) def __snake_case ( self ): lowerCAmelCase = Dataset.from_dict( { '''id''': ['''0''', '''1'''], '''text''': ['''foo''', '''bar'''], '''title''': ['''Foo''', '''Bar'''], '''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )], } ) dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT ) return dataset def __snake_case ( self ): lowerCAmelCase = self.get_dummy_dataset() lowerCAmelCase = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , ) with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset: lowerCAmelCase = dataset lowerCAmelCase = RagRetriever( UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) return retriever def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = self.get_dummy_dataset() lowerCAmelCase = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , ) if from_disk: lowerCAmelCase = os.path.join(self.tmpdirname , '''dataset''' ) lowerCAmelCase = os.path.join(self.tmpdirname , '''index.faiss''' ) dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) ) dataset.drop_index('''embeddings''' ) dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) ) del dataset lowerCAmelCase = RagRetriever( UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) else: lowerCAmelCase = RagRetriever( UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , UpperCAmelCase_ ) , ) return retriever def __snake_case ( self ): lowerCAmelCase = Dataset.from_dict( { '''id''': ['''0''', '''1'''], '''text''': ['''foo''', '''bar'''], '''title''': ['''Foo''', '''Bar'''], '''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )], } ) dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT ) lowerCAmelCase = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' ) dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' ) pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) ) lowerCAmelCase = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' ) lowerCAmelCase = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset} pickle.dump(UpperCAmelCase_ , open(UpperCAmelCase_ , '''wb''' ) ) lowerCAmelCase = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , ) lowerCAmelCase = RagRetriever( UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() ) return retriever def __snake_case ( self ): lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_canonical_hf_index_retriever() lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(UpperCAmelCase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , UpperCAmelCase_ ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def __snake_case ( self ): lowerCAmelCase = self.get_dummy_canonical_hf_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset: lowerCAmelCase = self.get_dummy_dataset() retriever.save_pretrained(UpperCAmelCase_ ) lowerCAmelCase = RagRetriever.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=1 ) self.assertTrue(out is not None ) def __snake_case ( self ): lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(UpperCAmelCase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , UpperCAmelCase_ ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def __snake_case ( self ): lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(UpperCAmelCase_ ) lowerCAmelCase = RagRetriever.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=1 ) self.assertTrue(out is not None ) def __snake_case ( self ): lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(UpperCAmelCase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , UpperCAmelCase_ ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def __snake_case ( self ): lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(UpperCAmelCase_ ) lowerCAmelCase = RagRetriever.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=1 ) self.assertTrue(out is not None ) def __snake_case ( self ): lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_legacy_index_retriever() lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(UpperCAmelCase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''text'''] ) , UpperCAmelCase_ ) self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def __snake_case ( self ): lowerCAmelCase = self.get_dummy_legacy_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(UpperCAmelCase_ ) lowerCAmelCase = RagRetriever.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=1 ) self.assertTrue(out is not None ) @require_torch @require_tokenizers @require_sentencepiece def __snake_case ( self ): import torch lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_canonical_hf_index_retriever() lowerCAmelCase = [[5, 7], [10, 11]] lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever(UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_ ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = ( out['''context_input_ids'''], out['''context_attention_mask'''], out['''retrieved_doc_embeds'''], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , np.ndarray ) lowerCAmelCase = retriever( UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_ , return_tensors='''pt''' , ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = ( # noqa: F841 out['''context_input_ids'''], out['''context_attention_mask'''], out['''retrieved_doc_embeds'''], out['''doc_ids'''], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(UpperCAmelCase_ , torch.Tensor ) self.assertIsInstance(UpperCAmelCase_ , torch.Tensor ) self.assertIsInstance(UpperCAmelCase_ , torch.Tensor ) @require_torch @require_tokenizers @require_sentencepiece def __snake_case ( self ): lowerCAmelCase = self.get_dpr_ctx_encoder_tokenizer() lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ ) retriever.set_ctx_encoder_tokenizer(UpperCAmelCase_ ) lowerCAmelCase = [[5, 7], [10, 11]] lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever(UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_ ) self.assertEqual( len(UpperCAmelCase_ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs self.assertEqual( all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , UpperCAmelCase_ ) # check for doc token related keys in dictionary.
33
1
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ =logging.get_logger(__name__) UpperCAmelCase_ ={ """asapp/sew-tiny-100k""": """https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json""", # See all SEW models at https://huggingface.co/models?filter=sew } class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : Optional[Any] ="""sew""" def __init__( self , UpperCAmelCase_=32 , UpperCAmelCase_=7_68 , UpperCAmelCase_=12 , UpperCAmelCase_=12 , UpperCAmelCase_=30_72 , UpperCAmelCase_=2 , UpperCAmelCase_="gelu" , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.02 , UpperCAmelCase_=1E-5 , UpperCAmelCase_="group" , UpperCAmelCase_="gelu" , UpperCAmelCase_=(64, 1_28, 1_28, 1_28, 1_28, 2_56, 2_56, 2_56, 2_56, 5_12, 5_12, 5_12, 5_12) , UpperCAmelCase_=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , UpperCAmelCase_=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , UpperCAmelCase_=False , UpperCAmelCase_=1_28 , UpperCAmelCase_=16 , UpperCAmelCase_=True , UpperCAmelCase_=0.05 , UpperCAmelCase_=10 , UpperCAmelCase_=2 , UpperCAmelCase_=0.0 , UpperCAmelCase_=10 , UpperCAmelCase_=0 , UpperCAmelCase_="mean" , UpperCAmelCase_=False , UpperCAmelCase_=False , UpperCAmelCase_=2_56 , UpperCAmelCase_=0 , UpperCAmelCase_=1 , UpperCAmelCase_=2 , **UpperCAmelCase_ , ): super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ ) lowerCAmelCase = hidden_size lowerCAmelCase = feat_extract_norm lowerCAmelCase = feat_extract_activation lowerCAmelCase = list(UpperCAmelCase_ ) lowerCAmelCase = list(UpperCAmelCase_ ) lowerCAmelCase = list(UpperCAmelCase_ ) lowerCAmelCase = conv_bias lowerCAmelCase = num_conv_pos_embeddings lowerCAmelCase = num_conv_pos_embedding_groups lowerCAmelCase = len(self.conv_dim ) lowerCAmelCase = num_hidden_layers lowerCAmelCase = intermediate_size lowerCAmelCase = squeeze_factor lowerCAmelCase = hidden_act lowerCAmelCase = num_attention_heads lowerCAmelCase = hidden_dropout lowerCAmelCase = attention_dropout lowerCAmelCase = activation_dropout lowerCAmelCase = feat_proj_dropout lowerCAmelCase = final_dropout lowerCAmelCase = layerdrop lowerCAmelCase = layer_norm_eps lowerCAmelCase = initializer_range lowerCAmelCase = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect.''' '''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,''' F"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)""" F"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowerCAmelCase = apply_spec_augment lowerCAmelCase = mask_time_prob lowerCAmelCase = mask_time_length lowerCAmelCase = mask_time_min_masks lowerCAmelCase = mask_feature_prob lowerCAmelCase = mask_feature_length lowerCAmelCase = mask_feature_min_masks # ctc loss lowerCAmelCase = ctc_loss_reduction lowerCAmelCase = ctc_zero_infinity # sequence classification lowerCAmelCase = use_weighted_layer_sum lowerCAmelCase = classifier_proj_size @property def __snake_case ( self ): return functools.reduce(operator.mul , self.conv_stride , 1 )
33
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ =logging.get_logger(__name__) UpperCAmelCase_ ={ """google/switch-base-8""": """https://huggingface.co/google/switch-base-8/blob/main/config.json""", } class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : Any ="""switch_transformers""" __a : Union[str, Any] =["""past_key_values"""] __a : Dict ={"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""} def __init__( self , UpperCAmelCase_=3_21_28 , UpperCAmelCase_=7_68 , UpperCAmelCase_=64 , UpperCAmelCase_=20_48 , UpperCAmelCase_=64 , UpperCAmelCase_=12 , UpperCAmelCase_=3 , UpperCAmelCase_=12 , UpperCAmelCase_=3 , UpperCAmelCase_=12 , UpperCAmelCase_=8 , UpperCAmelCase_=False , UpperCAmelCase_=0.01 , UpperCAmelCase_="float32" , UpperCAmelCase_=False , UpperCAmelCase_=32 , UpperCAmelCase_=1_28 , UpperCAmelCase_=0.1 , UpperCAmelCase_=1E-6 , UpperCAmelCase_=0.001 , UpperCAmelCase_=0.001 , UpperCAmelCase_=1.0 , UpperCAmelCase_="relu" , UpperCAmelCase_=True , UpperCAmelCase_=False , UpperCAmelCase_=True , UpperCAmelCase_=0 , UpperCAmelCase_=1 , **UpperCAmelCase_ , ): lowerCAmelCase = vocab_size lowerCAmelCase = d_model lowerCAmelCase = d_kv lowerCAmelCase = d_ff lowerCAmelCase = num_sparse_encoder_layers lowerCAmelCase = num_layers lowerCAmelCase = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry lowerCAmelCase = num_sparse_decoder_layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_encoder_layers > 0: lowerCAmelCase = self.num_layers // self.num_sparse_encoder_layers else: lowerCAmelCase = self.num_layers # HACK: this will create 0 sparse layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_decoder_layers > 0: lowerCAmelCase = self.num_decoder_layers // self.num_sparse_decoder_layers else: lowerCAmelCase = self.num_decoder_layers # HACK: this will create 0 sparse layers lowerCAmelCase = num_heads lowerCAmelCase = num_experts lowerCAmelCase = expert_capacity lowerCAmelCase = router_bias lowerCAmelCase = router_jitter_noise if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(F"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" ) lowerCAmelCase = router_dtype lowerCAmelCase = router_ignore_padding_tokens lowerCAmelCase = relative_attention_num_buckets lowerCAmelCase = relative_attention_max_distance lowerCAmelCase = dropout_rate lowerCAmelCase = layer_norm_epsilon lowerCAmelCase = initializer_factor lowerCAmelCase = feed_forward_proj lowerCAmelCase = use_cache lowerCAmelCase = add_router_probs lowerCAmelCase = router_z_loss_coef lowerCAmelCase = router_aux_loss_coef lowerCAmelCase = self.feed_forward_proj.split('''-''' ) lowerCAmelCase = act_info[-1] lowerCAmelCase = act_info[0] == '''gated''' if len(UpperCAmelCase_ ) > 1 and act_info[0] != "gated" or len(UpperCAmelCase_ ) > 2: raise ValueError( F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.""" '''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ''' '''\'gated-gelu\' or \'relu\'''' ) # for backwards compatibility if feed_forward_proj == "gated-gelu": lowerCAmelCase = '''gelu_new''' super().__init__( pad_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , **UpperCAmelCase_ , )
33
1
from math import isqrt def UpperCAmelCase ( _snake_case ): return all(number % divisor != 0 for divisor in range(2 , isqrt(_snake_case ) + 1 ) ) def UpperCAmelCase ( _snake_case = 10**6 ): lowerCAmelCase = 0 lowerCAmelCase = 1 lowerCAmelCase = 7 while prime_candidate < max_prime: primes_count += is_prime(_snake_case ) cube_index += 1 prime_candidate += 6 * cube_index return primes_count if __name__ == "__main__": print(F'''{solution() = }''')
33
from __future__ import annotations import math from collections import Counter from string import ascii_lowercase def UpperCAmelCase ( _snake_case ): lowerCAmelCase , lowerCAmelCase = analyze_text(_snake_case ) lowerCAmelCase = list(''' ''' + ascii_lowercase ) # what is our total sum of probabilities. lowerCAmelCase = sum(single_char_strings.values() ) # one length string lowerCAmelCase = 0 # for each alpha we go in our dict and if it is in it we calculate entropy for ch in my_alphas: if ch in single_char_strings: lowerCAmelCase = single_char_strings[ch] lowerCAmelCase = my_str / all_sum my_fir_sum += prob * math.loga(_snake_case ) # entropy formula. # print entropy print(F"""{round(-1 * my_fir_sum ):.1f}""" ) # two len string lowerCAmelCase = sum(two_char_strings.values() ) lowerCAmelCase = 0 # for each alpha (two in size) calculate entropy. for cha in my_alphas: for cha in my_alphas: lowerCAmelCase = cha + cha if sequence in two_char_strings: lowerCAmelCase = two_char_strings[sequence] lowerCAmelCase = int(_snake_case ) / all_sum my_sec_sum += prob * math.loga(_snake_case ) # print second entropy print(F"""{round(-1 * my_sec_sum ):.1f}""" ) # print the difference between them print(F"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" ) def UpperCAmelCase ( _snake_case ): lowerCAmelCase = Counter() # type: ignore lowerCAmelCase = Counter() # type: ignore single_char_strings[text[-1]] += 1 # first case when we have space at start. two_char_strings[" " + text[0]] += 1 for i in range(0 , len(_snake_case ) - 1 ): single_char_strings[text[i]] += 1 two_char_strings[text[i : i + 2]] += 1 return single_char_strings, two_char_strings def UpperCAmelCase ( ): import doctest doctest.testmod() # text = ( # "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark " # "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest " # "jointure saw horrible. He private he on be imagine suppose. Fertile " # "beloved evident through no service elderly is. Blind there if every no so " # "at. Own neglected you preferred way sincerity delivered his attempted. To " # "of message cottage windows do besides against uncivil. Delightful " # "unreserved impossible few estimating men favourable see entreaties. She " # "propriety immediate was improving. He or entrance humoured likewise " # "moderate. Much nor game son say feel. Fat make met can must form into " # "gate. Me we offending prevailed discovery. " # ) # calculate_prob(text) if __name__ == "__main__": main()
33
1
# Imports import numpy as np class __UpperCamelCase : '''simple docstring''' def __init__( self , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None ): self.set_matricies(red=UpperCAmelCase_ , green=UpperCAmelCase_ , blue=UpperCAmelCase_ , red_edge=UpperCAmelCase_ , nir=UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None ): if red is not None: lowerCAmelCase = red if green is not None: lowerCAmelCase = green if blue is not None: lowerCAmelCase = blue if red_edge is not None: lowerCAmelCase = red_edge if nir is not None: lowerCAmelCase = nir return True def __snake_case ( self , UpperCAmelCase_="" , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None ): self.set_matricies(red=UpperCAmelCase_ , green=UpperCAmelCase_ , blue=UpperCAmelCase_ , red_edge=UpperCAmelCase_ , nir=UpperCAmelCase_ ) lowerCAmelCase = { '''ARVI2''': self.arvaa, '''CCCI''': self.ccci, '''CVI''': self.cvi, '''GLI''': self.gli, '''NDVI''': self.ndvi, '''BNDVI''': self.bndvi, '''redEdgeNDVI''': self.red_edge_ndvi, '''GNDVI''': self.gndvi, '''GBNDVI''': self.gbndvi, '''GRNDVI''': self.grndvi, '''RBNDVI''': self.rbndvi, '''PNDVI''': self.pndvi, '''ATSAVI''': self.atsavi, '''BWDRVI''': self.bwdrvi, '''CIgreen''': self.ci_green, '''CIrededge''': self.ci_rededge, '''CI''': self.ci, '''CTVI''': self.ctvi, '''GDVI''': self.gdvi, '''EVI''': self.evi, '''GEMI''': self.gemi, '''GOSAVI''': self.gosavi, '''GSAVI''': self.gsavi, '''Hue''': self.hue, '''IVI''': self.ivi, '''IPVI''': self.ipvi, '''I''': self.i, '''RVI''': self.rvi, '''MRVI''': self.mrvi, '''MSAVI''': self.m_savi, '''NormG''': self.norm_g, '''NormNIR''': self.norm_nir, '''NormR''': self.norm_r, '''NGRDI''': self.ngrdi, '''RI''': self.ri, '''S''': self.s, '''IF''': self._if, '''DVI''': self.dvi, '''TVI''': self.tvi, '''NDRE''': self.ndre, } try: return funcs[index]() except KeyError: print('''Index not in the list!''' ) return False def __snake_case ( self ): return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red))) def __snake_case ( self ): return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / ( (self.nir - self.red) / (self.nir + self.red) ) def __snake_case ( self ): return self.nir * (self.red / (self.green**2)) def __snake_case ( self ): return (2 * self.green - self.red - self.blue) / ( 2 * self.green + self.red + self.blue ) def __snake_case ( self ): return (self.nir - self.red) / (self.nir + self.red) def __snake_case ( self ): return (self.nir - self.blue) / (self.nir + self.blue) def __snake_case ( self ): return (self.redEdge - self.red) / (self.redEdge + self.red) def __snake_case ( self ): return (self.nir - self.green) / (self.nir + self.green) def __snake_case ( self ): return (self.nir - (self.green + self.blue)) / ( self.nir + (self.green + self.blue) ) def __snake_case ( self ): return (self.nir - (self.green + self.red)) / ( self.nir + (self.green + self.red) ) def __snake_case ( self ): return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red)) def __snake_case ( self ): return (self.nir - (self.green + self.red + self.blue)) / ( self.nir + (self.green + self.red + self.blue) ) def __snake_case ( self , UpperCAmelCase_=0.08 , UpperCAmelCase_=1.22 , UpperCAmelCase_=0.03 ): return a * ( (self.nir - a * self.red - b) / (a * self.nir + self.red - a * b + x * (1 + a**2)) ) def __snake_case ( self ): return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue) def __snake_case ( self ): return (self.nir / self.green) - 1 def __snake_case ( self ): return (self.nir / self.redEdge) - 1 def __snake_case ( self ): return (self.red - self.blue) / self.red def __snake_case ( self ): lowerCAmelCase = self.ndvi() return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2)) def __snake_case ( self ): return self.nir - self.green def __snake_case ( self ): return 2.5 * ( (self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1) ) def __snake_case ( self ): lowerCAmelCase = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / ( self.nir + self.red + 0.5 ) return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red) def __snake_case ( self , UpperCAmelCase_=0.16 ): return (self.nir - self.green) / (self.nir + self.green + y) def __snake_case ( self , UpperCAmelCase_=0.5 ): return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n) def __snake_case ( self ): return np.arctan( ((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) ) def __snake_case ( self , UpperCAmelCase_=None , UpperCAmelCase_=None ): return (self.nir - b) / (a * self.red) def __snake_case ( self ): return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1) def __snake_case ( self ): return (self.red + self.green + self.blue) / 30.5 def __snake_case ( self ): return self.nir / self.red def __snake_case ( self ): return (self.rvi() - 1) / (self.rvi() + 1) def __snake_case ( self ): return ( (2 * self.nir + 1) - ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2) ) / 2 def __snake_case ( self ): return self.green / (self.nir + self.red + self.green) def __snake_case ( self ): return self.nir / (self.nir + self.red + self.green) def __snake_case ( self ): return self.red / (self.nir + self.red + self.green) def __snake_case ( self ): return (self.green - self.red) / (self.green + self.red) def __snake_case ( self ): return (self.red - self.green) / (self.red + self.green) def __snake_case ( self ): lowerCAmelCase = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] ) lowerCAmelCase = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] ) return (max_value - min_value) / max_value def __snake_case ( self ): return (2 * self.red - self.green - self.blue) / (self.green - self.blue) def __snake_case ( self ): return self.nir / self.red def __snake_case ( self ): return (self.ndvi() + 0.5) ** (1 / 2) def __snake_case ( self ): return (self.nir - self.redEdge) / (self.nir + self.redEdge)
33
import random import unittest import torch from diffusers import IFInpaintingSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class __UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): '''simple docstring''' __a : Tuple =IFInpaintingSuperResolutionPipeline __a : Dict =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""} __a : int =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""} ) __a : Union[str, Any] =PipelineTesterMixin.required_optional_params - {"""latents"""} def __snake_case ( self ): return self._get_superresolution_dummy_components() def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_=0 ): if str(UpperCAmelCase_ ).startswith('''mps''' ): lowerCAmelCase = torch.manual_seed(UpperCAmelCase_ ) else: lowerCAmelCase = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ ) lowerCAmelCase = floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ ) lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ ) lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ ) lowerCAmelCase = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''original_image''': original_image, '''mask_image''': mask_image, '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def __snake_case ( self ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def __snake_case ( self ): self._test_save_load_optional_components() @unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' ) def __snake_case ( self ): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def __snake_case ( self ): self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def __snake_case ( self ): self._test_save_load_local() def __snake_case ( self ): self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
33
1
import os import re import shutil from argparse import ArgumentParser, Namespace from datasets.commands import BaseDatasetsCLICommand from datasets.utils.logging import get_logger UpperCAmelCase_ ="""<<<<<<< This should probably be modified because it mentions: """ UpperCAmelCase_ ="""======= >>>>>>> """ UpperCAmelCase_ =[ """TextEncoderConfig""", """ByteTextEncoder""", """SubwordTextEncoder""", """encoder_config""", """maybe_build_from_corpus""", """manual_dir""", ] UpperCAmelCase_ =[ # (pattern, replacement) # Order is important here for some replacements (R"""tfds\.core""", R"""datasets"""), (R"""tf\.io\.gfile\.GFile""", R"""open"""), (R"""tf\.([\w\d]+)""", R"""datasets.Value('\1')"""), (R"""tfds\.features\.Text\(\)""", R"""datasets.Value('string')"""), (R"""tfds\.features\.Text\(""", R"""datasets.Value('string'),"""), (R"""features\s*=\s*tfds.features.FeaturesDict\(""", R"""features=datasets.Features("""), (R"""tfds\.features\.FeaturesDict\(""", R"""dict("""), (R"""The TensorFlow Datasets Authors""", R"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""), (R"""tfds\.""", R"""datasets."""), (R"""dl_manager\.manual_dir""", R"""self.config.data_dir"""), (R"""self\.builder_config""", R"""self.config"""), ] def UpperCAmelCase ( _snake_case ): return ConvertCommand(args.tfds_path , args.datasets_directory ) class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' @staticmethod def __snake_case ( UpperCAmelCase_ ): lowerCAmelCase = parser.add_parser( '''convert''' , help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' , ) train_parser.add_argument( '''--tfds_path''' , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' , ) train_parser.add_argument( '''--datasets_directory''' , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help='''Path to the HuggingFace Datasets folder.''' ) train_parser.set_defaults(func=UpperCAmelCase_ ) def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ , *UpperCAmelCase_ ): lowerCAmelCase = get_logger('''datasets-cli/converting''' ) lowerCAmelCase = tfds_path lowerCAmelCase = datasets_directory def __snake_case ( self ): if os.path.isdir(self._tfds_path ): lowerCAmelCase = os.path.abspath(self._tfds_path ) elif os.path.isfile(self._tfds_path ): lowerCAmelCase = os.path.dirname(self._tfds_path ) else: raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' ) lowerCAmelCase = os.path.abspath(self._datasets_directory ) self._logger.info(F"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""" ) lowerCAmelCase = [] lowerCAmelCase = [] lowerCAmelCase = {} if os.path.isdir(self._tfds_path ): lowerCAmelCase = os.listdir(UpperCAmelCase_ ) else: lowerCAmelCase = [os.path.basename(self._tfds_path )] for f_name in file_names: self._logger.info(F"""Looking at file {f_name}""" ) lowerCAmelCase = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) if not os.path.isfile(UpperCAmelCase_ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name: self._logger.info('''Skipping file''' ) continue with open(UpperCAmelCase_ , encoding='''utf-8''' ) as f: lowerCAmelCase = f.readlines() lowerCAmelCase = [] lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = [] for line in lines: lowerCAmelCase = line # Convert imports if "import tensorflow.compat.v2 as tf" in out_line: continue elif "@tfds.core" in out_line: continue elif "builder=self" in out_line: continue elif "import tensorflow_datasets.public_api as tfds" in out_line: lowerCAmelCase = '''import datasets\n''' elif "import tensorflow" in out_line: # order is important here lowerCAmelCase = '''''' continue elif "from absl import logging" in out_line: lowerCAmelCase = '''from datasets import logging\n''' elif "getLogger" in out_line: lowerCAmelCase = out_line.replace('''getLogger''' , '''get_logger''' ) elif any(expression in out_line for expression in TO_HIGHLIGHT ): lowerCAmelCase = True lowerCAmelCase = list(filter(lambda UpperCAmelCase_ : e in out_line , UpperCAmelCase_ ) ) out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(UpperCAmelCase_ ) + '''\n''' ) out_lines.append(UpperCAmelCase_ ) out_lines.append(UpperCAmelCase_ ) continue else: for pattern, replacement in TO_CONVERT: lowerCAmelCase = re.sub(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # Take care of saving utilities (to later move them together with main script) if "tensorflow_datasets" in out_line: lowerCAmelCase = re.match(r'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' , UpperCAmelCase_ ) tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) ) lowerCAmelCase = '''from . import ''' + match.group(1 ) # Check we have not forget anything if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line: raise ValueError(F"""Error converting {out_line.strip()}""" ) if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line: lowerCAmelCase = True out_lines.append(UpperCAmelCase_ ) if is_builder or "wmt" in f_name: # We create a new directory for each dataset lowerCAmelCase = f_name.replace('''.py''' , '''''' ) lowerCAmelCase = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ ) self._logger.info(F"""Adding directory {output_dir}""" ) imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} ) else: # Utilities will be moved at the end utils_files.append(UpperCAmelCase_ ) if needs_manual_update: with_manual_update.append(UpperCAmelCase_ ) with open(UpperCAmelCase_ , '''w''' , encoding='''utf-8''' ) as f: f.writelines(UpperCAmelCase_ ) self._logger.info(F"""Converted in {output_file}""" ) for utils_file in utils_files: try: lowerCAmelCase = os.path.basename(UpperCAmelCase_ ) lowerCAmelCase = imports_to_builder_map[f_name.replace('''.py''' , '''''' )] self._logger.info(F"""Moving {dest_folder} to {utils_file}""" ) shutil.copy(UpperCAmelCase_ , UpperCAmelCase_ ) except KeyError: self._logger.error(F"""Cannot find destination folder for {utils_file}. Please copy manually.""" ) if with_manual_update: for file_path in with_manual_update: self._logger.warning( F"""You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.""" )
33
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) UpperCAmelCase_ ={ """configuration_efficientformer""": [ """EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """EfficientFormerConfig""", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ =["""EfficientFormerImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ =[ """EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """EfficientFormerForImageClassification""", """EfficientFormerForImageClassificationWithTeacher""", """EfficientFormerModel""", """EfficientFormerPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ =[ """TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFEfficientFormerForImageClassification""", """TFEfficientFormerForImageClassificationWithTeacher""", """TFEfficientFormerModel""", """TFEfficientFormerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientformer import EfficientFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientformer import ( EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientFormerForImageClassification, EfficientFormerForImageClassificationWithTeacher, EfficientFormerModel, EfficientFormerPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, TFEfficientFormerPreTrainedModel, ) else: import sys UpperCAmelCase_ =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
33
1
def UpperCAmelCase ( _snake_case , _snake_case , _snake_case ): lowerCAmelCase = len(_snake_case ) lowerCAmelCase = [[0] * n for i in range(_snake_case )] for i in range(_snake_case ): lowerCAmelCase = y_points[i] for i in range(2 , _snake_case ): for j in range(_snake_case , _snake_case ): lowerCAmelCase = ( (xa - x_points[j - i + 1]) * q[j][i - 1] - (xa - x_points[j]) * q[j - 1][i - 1] ) / (x_points[j] - x_points[j - i + 1]) return [q[n - 1][n - 1], q] if __name__ == "__main__": import doctest doctest.testmod()
33
import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline UpperCAmelCase_ =datasets.utils.logging.get_logger(__name__) @dataclass class __UpperCamelCase ( datasets.BuilderConfig ): '''simple docstring''' __a : Optional[datasets.Features] =None __a : str ="utf-8" __a : Optional[str] =None __a : Optional[str] =None __a : bool =True # deprecated __a : Optional[int] =None # deprecated __a : int =1_0 << 2_0 # 10MB __a : Optional[bool] =None class __UpperCamelCase ( datasets.ArrowBasedBuilder ): '''simple docstring''' __a : str =JsonConfig def __snake_case ( self ): if self.config.block_size is not None: logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' ) lowerCAmelCase = self.config.block_size if self.config.use_threads is not True: logger.warning( '''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' ) if self.config.newlines_in_values is not None: raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' ) return datasets.DatasetInfo(features=self.config.features ) def __snake_case ( self , UpperCAmelCase_ ): if not self.config.data_files: raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) lowerCAmelCase = dl_manager.download_and_extract(self.config.data_files ) if isinstance(UpperCAmelCase_ , (str, list, tuple) ): lowerCAmelCase = data_files if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): lowerCAmelCase = [files] lowerCAmelCase = [dl_manager.iter_files(UpperCAmelCase_ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] lowerCAmelCase = [] for split_name, files in data_files.items(): if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): lowerCAmelCase = [files] lowerCAmelCase = [dl_manager.iter_files(UpperCAmelCase_ ) for file in files] splits.append(datasets.SplitGenerator(name=UpperCAmelCase_ , gen_kwargs={'''files''': files} ) ) return splits def __snake_case ( self , UpperCAmelCase_ ): if self.config.features is not None: # adding missing columns for column_name in set(self.config.features ) - set(pa_table.column_names ): lowerCAmelCase = self.config.features.arrow_schema.field(UpperCAmelCase_ ).type lowerCAmelCase = pa_table.append_column(UpperCAmelCase_ , pa.array([None] * len(UpperCAmelCase_ ) , type=UpperCAmelCase_ ) ) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example lowerCAmelCase = table_cast(UpperCAmelCase_ , self.config.features.arrow_schema ) return pa_table def __snake_case ( self , UpperCAmelCase_ ): for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase_ ) ): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(UpperCAmelCase_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: lowerCAmelCase = json.load(UpperCAmelCase_ ) # We keep only the field we are interested in lowerCAmelCase = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(UpperCAmelCase_ , (list, tuple) ): lowerCAmelCase = set().union(*[row.keys() for row in dataset] ) lowerCAmelCase = {col: [row.get(UpperCAmelCase_ ) for row in dataset] for col in keys} else: lowerCAmelCase = dataset lowerCAmelCase = pa.Table.from_pydict(UpperCAmelCase_ ) yield file_idx, self._cast_table(UpperCAmelCase_ ) # If the file has one json object per line else: with open(UpperCAmelCase_ , '''rb''' ) as f: lowerCAmelCase = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small lowerCAmelCase = max(self.config.chunksize // 32 , 16 << 10 ) lowerCAmelCase = ( self.config.encoding_errors if self.config.encoding_errors is not None else '''strict''' ) while True: lowerCAmelCase = f.read(self.config.chunksize ) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(UpperCAmelCase_ ) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": lowerCAmelCase = batch.decode(self.config.encoding , errors=UpperCAmelCase_ ).encode('''utf-8''' ) try: while True: try: lowerCAmelCase = paj.read_json( io.BytesIO(UpperCAmelCase_ ) , read_options=paj.ReadOptions(block_size=UpperCAmelCase_ ) ) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(UpperCAmelCase_ , pa.ArrowInvalid ) and "straddling" not in str(UpperCAmelCase_ ) or block_size > len(UpperCAmelCase_ ) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( F"""Batch of {len(UpperCAmelCase_ )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" ) block_size *= 2 except pa.ArrowInvalid as e: try: with open( UpperCAmelCase_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: lowerCAmelCase = json.load(UpperCAmelCase_ ) except json.JSONDecodeError: logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase_ )}: {e}""" ) raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): # list is the only sequence type supported in JSON try: lowerCAmelCase = set().union(*[row.keys() for row in dataset] ) lowerCAmelCase = {col: [row.get(UpperCAmelCase_ ) for row in dataset] for col in keys} lowerCAmelCase = pa.Table.from_pydict(UpperCAmelCase_ ) except (pa.ArrowInvalid, AttributeError) as e: logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase_ )}: {e}""" ) raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None yield file_idx, self._cast_table(UpperCAmelCase_ ) break else: logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase_ )}: {e}""" ) raise ValueError( F"""Not able to read records in the JSON file at {file}. """ F"""You should probably indicate the field of the JSON file containing your records. """ F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """ F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(UpperCAmelCase_ ) batch_idx += 1
33
1
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_realm import RealmTokenizer UpperCAmelCase_ =logging.get_logger(__name__) UpperCAmelCase_ ={"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} UpperCAmelCase_ ={ """vocab_file""": { """google/realm-cc-news-pretrained-embedder""": ( """https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt""" ), """google/realm-cc-news-pretrained-encoder""": ( """https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt""" ), """google/realm-cc-news-pretrained-scorer""": ( """https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt""" ), """google/realm-cc-news-pretrained-openqa""": ( """https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt""" ), """google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""", """google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""", """google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""", """google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""", }, """tokenizer_file""": { """google/realm-cc-news-pretrained-embedder""": ( """https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont""" ), """google/realm-cc-news-pretrained-encoder""": ( """https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json""" ), """google/realm-cc-news-pretrained-scorer""": ( """https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json""" ), """google/realm-cc-news-pretrained-openqa""": ( """https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json""" ), """google/realm-orqa-nq-openqa""": ( """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json""" ), """google/realm-orqa-nq-reader""": ( """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json""" ), """google/realm-orqa-wq-openqa""": ( """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json""" ), """google/realm-orqa-wq-reader""": ( """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json""" ), }, } UpperCAmelCase_ ={ """google/realm-cc-news-pretrained-embedder""": 512, """google/realm-cc-news-pretrained-encoder""": 512, """google/realm-cc-news-pretrained-scorer""": 512, """google/realm-cc-news-pretrained-openqa""": 512, """google/realm-orqa-nq-openqa""": 512, """google/realm-orqa-nq-reader""": 512, """google/realm-orqa-wq-openqa""": 512, """google/realm-orqa-wq-reader""": 512, } UpperCAmelCase_ ={ """google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True}, """google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True}, """google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True}, """google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True}, """google/realm-orqa-nq-openqa""": {"""do_lower_case""": True}, """google/realm-orqa-nq-reader""": {"""do_lower_case""": True}, """google/realm-orqa-wq-openqa""": {"""do_lower_case""": True}, """google/realm-orqa-wq-reader""": {"""do_lower_case""": True}, } class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : Union[str, Any] =VOCAB_FILES_NAMES __a : int =PRETRAINED_VOCAB_FILES_MAP __a : Any =PRETRAINED_INIT_CONFIGURATION __a : Optional[int] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __a : Optional[int] =RealmTokenizer def __init__( self , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=True , UpperCAmelCase_="[UNK]" , UpperCAmelCase_="[SEP]" , UpperCAmelCase_="[PAD]" , UpperCAmelCase_="[CLS]" , UpperCAmelCase_="[MASK]" , UpperCAmelCase_=True , UpperCAmelCase_=None , **UpperCAmelCase_ , ): super().__init__( UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , tokenize_chinese_chars=UpperCAmelCase_ , strip_accents=UpperCAmelCase_ , **UpperCAmelCase_ , ) lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , UpperCAmelCase_ ) != do_lower_case or normalizer_state.get('''strip_accents''' , UpperCAmelCase_ ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , UpperCAmelCase_ ) != tokenize_chinese_chars ): lowerCAmelCase = getattr(UpperCAmelCase_ , normalizer_state.pop('''type''' ) ) lowerCAmelCase = do_lower_case lowerCAmelCase = strip_accents lowerCAmelCase = tokenize_chinese_chars lowerCAmelCase = normalizer_class(**UpperCAmelCase_ ) lowerCAmelCase = do_lower_case def __snake_case ( self , UpperCAmelCase_ , **UpperCAmelCase_ ): lowerCAmelCase = PaddingStrategy.MAX_LENGTH lowerCAmelCase = text lowerCAmelCase = kwargs.pop('''text_pair''' , UpperCAmelCase_ ) lowerCAmelCase = kwargs.pop('''return_tensors''' , UpperCAmelCase_ ) lowerCAmelCase = { '''input_ids''': [], '''attention_mask''': [], '''token_type_ids''': [], } for idx, candidate_text in enumerate(UpperCAmelCase_ ): if batch_text_pair is not None: lowerCAmelCase = batch_text_pair[idx] else: lowerCAmelCase = None lowerCAmelCase = super().__call__(UpperCAmelCase_ , UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ ) lowerCAmelCase = encoded_candidates.get('''input_ids''' ) lowerCAmelCase = encoded_candidates.get('''attention_mask''' ) lowerCAmelCase = encoded_candidates.get('''token_type_ids''' ) if encoded_input_ids is not None: output_data["input_ids"].append(UpperCAmelCase_ ) if encoded_attention_mask is not None: output_data["attention_mask"].append(UpperCAmelCase_ ) if encoded_token_type_ids is not None: output_data["token_type_ids"].append(UpperCAmelCase_ ) lowerCAmelCase = {key: item for key, item in output_data.items() if len(UpperCAmelCase_ ) != 0} return BatchEncoding(UpperCAmelCase_ , tensor_type=UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_=None ): lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ): lowerCAmelCase = [self.sep_token_id] lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ): lowerCAmelCase = self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_ ) return tuple(UpperCAmelCase_ )
33
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices UpperCAmelCase_ =logging.get_logger(__name__) class __UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __a : Optional[Any] ="""maskformer-swin""" __a : Optional[int] ={ """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self , UpperCAmelCase_=2_24 , UpperCAmelCase_=4 , UpperCAmelCase_=3 , UpperCAmelCase_=96 , UpperCAmelCase_=[2, 2, 6, 2] , UpperCAmelCase_=[3, 6, 12, 24] , UpperCAmelCase_=7 , UpperCAmelCase_=4.0 , UpperCAmelCase_=True , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.1 , UpperCAmelCase_="gelu" , UpperCAmelCase_=False , UpperCAmelCase_=0.02 , UpperCAmelCase_=1E-5 , UpperCAmelCase_=None , UpperCAmelCase_=None , **UpperCAmelCase_ , ): super().__init__(**UpperCAmelCase_ ) lowerCAmelCase = image_size lowerCAmelCase = patch_size lowerCAmelCase = num_channels lowerCAmelCase = embed_dim lowerCAmelCase = depths lowerCAmelCase = len(UpperCAmelCase_ ) lowerCAmelCase = num_heads lowerCAmelCase = window_size lowerCAmelCase = mlp_ratio lowerCAmelCase = qkv_bias lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = drop_path_rate lowerCAmelCase = hidden_act lowerCAmelCase = use_absolute_embeddings lowerCAmelCase = layer_norm_eps lowerCAmelCase = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model lowerCAmelCase = int(embed_dim * 2 ** (len(UpperCAmelCase_ ) - 1) ) lowerCAmelCase = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(UpperCAmelCase_ ) + 1 )] lowerCAmelCase , lowerCAmelCase = get_aligned_output_features_output_indices( out_features=UpperCAmelCase_ , out_indices=UpperCAmelCase_ , stage_names=self.stage_names )
33
1
import argparse import os import re UpperCAmelCase_ ="""src/transformers/models/auto""" # re pattern that matches mapping introductions: # SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict UpperCAmelCase_ =re.compile(R"""[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict""") # re pattern that matches identifiers in mappings UpperCAmelCase_ =re.compile(R"""\s*\(\s*\"(\S[^\"]+)\"""") def UpperCAmelCase ( _snake_case , _snake_case = False ): with open(_snake_case , '''r''' , encoding='''utf-8''' ) as f: lowerCAmelCase = f.read() lowerCAmelCase = content.split('''\n''' ) lowerCAmelCase = [] lowerCAmelCase = 0 while line_idx < len(_snake_case ): if _re_intro_mapping.search(lines[line_idx] ) is not None: lowerCAmelCase = len(re.search(R'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8 # Start of a new mapping! while not lines[line_idx].startswith(''' ''' * indent + '''(''' ): new_lines.append(lines[line_idx] ) line_idx += 1 lowerCAmelCase = [] while lines[line_idx].strip() != "]": # Blocks either fit in one line or not if lines[line_idx].strip() == "(": lowerCAmelCase = line_idx while not lines[line_idx].startswith(''' ''' * indent + ''')''' ): line_idx += 1 blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) ) else: blocks.append(lines[line_idx] ) line_idx += 1 # Sort blocks by their identifiers lowerCAmelCase = sorted(_snake_case , key=lambda _snake_case : _re_identifier.search(_snake_case ).groups()[0] ) new_lines += blocks else: new_lines.append(lines[line_idx] ) line_idx += 1 if overwrite: with open(_snake_case , '''w''' , encoding='''utf-8''' ) as f: f.write('''\n'''.join(_snake_case ) ) elif "\n".join(_snake_case ) != content: return True def UpperCAmelCase ( _snake_case = False ): lowerCAmelCase = [os.path.join(_snake_case , _snake_case ) for f in os.listdir(_snake_case ) if f.endswith('''.py''' )] lowerCAmelCase = [sort_auto_mapping(_snake_case , overwrite=_snake_case ) for fname in fnames] if not overwrite and any(_snake_case ): lowerCAmelCase = [f for f, d in zip(_snake_case , _snake_case ) if d] raise ValueError( F"""The following files have auto mappings that need sorting: {", ".join(_snake_case )}. Run `make style` to fix""" ''' this.''' ) if __name__ == "__main__": UpperCAmelCase_ =argparse.ArgumentParser() parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""") UpperCAmelCase_ =parser.parse_args() sort_all_auto_mappings(not args.check_only)
33
from collections.abc import Sequence def UpperCAmelCase ( _snake_case , _snake_case = False ): if not arr: return 0 lowerCAmelCase = 0 if allow_empty_subarrays else float('''-inf''' ) lowerCAmelCase = 0.0 for num in arr: lowerCAmelCase = max(0 if allow_empty_subarrays else num , curr_sum + num ) lowerCAmelCase = max(_snake_case , _snake_case ) return max_sum if __name__ == "__main__": from doctest import testmod testmod() UpperCAmelCase_ =[-2, 1, -3, 4, -1, 2, 1, -5, 4] print(F'''{max_subarray_sum(nums) = }''')
33
1
from __future__ import annotations import math from collections.abc import Callable def UpperCAmelCase ( _snake_case , _snake_case , _snake_case , _snake_case = 100 , ): lowerCAmelCase = x_start lowerCAmelCase = fnc(_snake_case ) lowerCAmelCase = 0.0 for _ in range(_snake_case ): # Approximates curve as a sequence of linear lines and sums their length lowerCAmelCase = (x_end - x_start) / steps + xa lowerCAmelCase = fnc(_snake_case ) length += math.hypot(xa - xa , fxa - fxa ) # Increment step lowerCAmelCase = xa lowerCAmelCase = fxa return length if __name__ == "__main__": def UpperCAmelCase ( _snake_case ): return math.sin(10 * x ) print("""f(x) = sin(10 * x)""") print("""The length of the curve from x = -10 to x = 10 is:""") UpperCAmelCase_ =10 while i <= 10_0000: print(F'''With {i} steps: {line_length(f, -10, 10, i)}''') i *= 10
33
import os import pickle import unittest from transformers import AutoTokenizer from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.models.bert_japanese.tokenization_bert_japanese import ( VOCAB_FILES_NAMES, BertJapaneseTokenizer, CharacterTokenizer, JumanppTokenizer, MecabTokenizer, SudachiTokenizer, WordpieceTokenizer, ) from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi from ...test_tokenization_common import TokenizerTesterMixin @custom_tokenizers class __UpperCamelCase ( __UpperCAmelCase , unittest.TestCase ): '''simple docstring''' __a : Any =BertJapaneseTokenizer __a : Optional[int] =False __a : int =True def __snake_case ( self ): super().setUp() lowerCAmelCase = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは''', '''世界''', '''##世界''', '''、''', '''##、''', '''。''', '''##。''', ] lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = '''こんにちは、世界。 \nこんばんは、世界。''' lowerCAmelCase = '''こんにちは 、 世界 。 こんばんは 、 世界 。''' return input_text, output_text def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase , lowerCAmelCase = self.get_input_output_texts(UpperCAmelCase_ ) lowerCAmelCase = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) lowerCAmelCase = tokenizer.decode(UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ ) return text, ids def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class(self.vocab_file ) lowerCAmelCase = tokenizer.tokenize('''こんにちは、世界。\nこんばんは、世界。''' ) self.assertListEqual(UpperCAmelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''mecab''' ) self.assertIsNotNone(UpperCAmelCase_ ) lowerCAmelCase = '''こんにちは、世界。\nこんばんは、世界。''' lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) lowerCAmelCase = os.path.join(self.tmpdirname , '''tokenizer.bin''' ) with open(UpperCAmelCase_ , '''wb''' ) as handle: pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ ) with open(UpperCAmelCase_ , '''rb''' ) as handle: lowerCAmelCase = pickle.load(UpperCAmelCase_ ) lowerCAmelCase = tokenizer_new.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def __snake_case ( self ): lowerCAmelCase = MecabTokenizer(mecab_dic='''ipadic''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) def __snake_case ( self ): try: lowerCAmelCase = MecabTokenizer(mecab_dic='''unidic_lite''' ) except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) def __snake_case ( self ): try: lowerCAmelCase = MecabTokenizer(mecab_dic='''unidic''' ) except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) def __snake_case ( self ): lowerCAmelCase = MecabTokenizer(do_lower_case=UpperCAmelCase_ , mecab_dic='''ipadic''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iphone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) def __snake_case ( self ): try: lowerCAmelCase = MecabTokenizer( do_lower_case=UpperCAmelCase_ , normalize_text=UpperCAmelCase_ , mecab_option='''-d /usr/local/lib/mecab/dic/jumandic''' ) except RuntimeError: # if dict doesn't exist in the system, previous code raises this error. return self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , ) def __snake_case ( self ): lowerCAmelCase = MecabTokenizer(normalize_text=UpperCAmelCase_ , mecab_dic='''ipadic''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。'''] , ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''sudachi''' ) self.assertIsNotNone(UpperCAmelCase_ ) lowerCAmelCase = '''こんにちは、世界。\nこんばんは、世界。''' lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) lowerCAmelCase = os.path.join(self.tmpdirname , '''tokenizer.bin''' ) with open(UpperCAmelCase_ , '''wb''' ) as handle: pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ ) with open(UpperCAmelCase_ , '''rb''' ) as handle: lowerCAmelCase = pickle.load(UpperCAmelCase_ ) lowerCAmelCase = tokenizer_new.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='''core''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''A''' ) self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国''', '''人''', '''参政''', '''権'''] ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''B''' ) self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人''', '''参政権'''] ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''C''' ) self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人参政権'''] ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(do_lower_case=UpperCAmelCase_ , sudachi_dict_type='''core''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(normalize_text=UpperCAmelCase_ , sudachi_dict_type='''core''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', '''\u3000''', '''。''', ''' ''', ''' '''] , ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(trim_whitespace=UpperCAmelCase_ , sudachi_dict_type='''core''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''jumanpp''' ) self.assertIsNotNone(UpperCAmelCase_ ) lowerCAmelCase = '''こんにちは、世界。\nこんばんは、世界。''' lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) lowerCAmelCase = os.path.join(self.tmpdirname , '''tokenizer.bin''' ) with open(UpperCAmelCase_ , '''wb''' ) as handle: pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ ) with open(UpperCAmelCase_ , '''rb''' ) as handle: lowerCAmelCase = pickle.load(UpperCAmelCase_ ) lowerCAmelCase = tokenizer_new.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = JumanppTokenizer(do_lower_case=UpperCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = JumanppTokenizer(normalize_text=UpperCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''ア''', '''ッ''', '''フ''', '''゚''', '''ル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = JumanppTokenizer(trim_whitespace=UpperCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''。'''] , ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize('''ありがとうございますm(_ _)m見つけるのが大変です。''' ) , ['''ありがとう''', '''ございます''', '''m(_ _)m''', '''見つける''', '''の''', '''が''', '''大変です''', '''。'''] , ) def __snake_case ( self ): lowerCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは'''] lowerCAmelCase = {} for i, token in enumerate(UpperCAmelCase_ ): lowerCAmelCase = i lowerCAmelCase = WordpieceTokenizer(vocab=UpperCAmelCase_ , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こんにちは'''] ) self.assertListEqual(tokenizer.tokenize('''こんばんは''' ) , ['''こん''', '''##ばんは'''] ) self.assertListEqual(tokenizer.tokenize('''こんばんは こんばんにちは こんにちは''' ) , ['''こん''', '''##ばんは''', '''[UNK]''', '''こんにちは'''] ) def __snake_case ( self ): lowerCAmelCase = BertJapaneseTokenizer.from_pretrained('''nlp-waseda/roberta-base-japanese-with-auto-jumanpp''' ) lowerCAmelCase = tokenizer.subword_tokenizer lowerCAmelCase = subword_tokenizer.tokenize('''国境 の 長い トンネル を 抜ける と 雪国 であった 。''' ) self.assertListEqual(UpperCAmelCase_ , ['''▁国境''', '''▁の''', '''▁長い''', '''▁トンネル''', '''▁を''', '''▁抜ける''', '''▁と''', '''▁雪''', '''国''', '''▁であった''', '''▁。'''] ) lowerCAmelCase = subword_tokenizer.tokenize('''こんばんは こんばん にち は こんにちは''' ) self.assertListEqual(UpperCAmelCase_ , ['''▁こん''', '''ばん''', '''は''', '''▁こん''', '''ばん''', '''▁に''', '''ち''', '''▁は''', '''▁こんにちは'''] ) def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese''' ) lowerCAmelCase = tokenizer.encode('''ありがとう。''' , add_special_tokens=UpperCAmelCase_ ) lowerCAmelCase = tokenizer.encode('''どういたしまして。''' , add_special_tokens=UpperCAmelCase_ ) lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ ) lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_ ) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_a + [3] @custom_tokenizers class __UpperCamelCase ( __UpperCAmelCase , unittest.TestCase ): '''simple docstring''' __a : Union[str, Any] =BertJapaneseTokenizer __a : Optional[int] =False def __snake_case ( self ): super().setUp() lowerCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。'''] lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def __snake_case ( self , **UpperCAmelCase_ ): return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='''character''' , **UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = '''こんにちは、世界。 \nこんばんは、世界。''' lowerCAmelCase = '''こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。''' return input_text, output_text def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='''character''' ) lowerCAmelCase = tokenizer.tokenize('''こんにちは、世界。 \nこんばんは、世界。''' ) self.assertListEqual( UpperCAmelCase_ , ['''こ''', '''ん''', '''に''', '''ち''', '''は''', '''、''', '''世''', '''界''', '''。''', '''こ''', '''ん''', '''ば''', '''ん''', '''は''', '''、''', '''世''', '''界''', '''。'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] ) def __snake_case ( self ): lowerCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。'''] lowerCAmelCase = {} for i, token in enumerate(UpperCAmelCase_ ): lowerCAmelCase = i lowerCAmelCase = CharacterTokenizer(vocab=UpperCAmelCase_ , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''は'''] ) self.assertListEqual(tokenizer.tokenize('''こんにちほ''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''[UNK]'''] ) def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese-char''' ) lowerCAmelCase = tokenizer.encode('''ありがとう。''' , add_special_tokens=UpperCAmelCase_ ) lowerCAmelCase = tokenizer.encode('''どういたしまして。''' , add_special_tokens=UpperCAmelCase_ ) lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ ) lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_ ) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_a + [3] @custom_tokenizers class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __snake_case ( self ): lowerCAmelCase = '''cl-tohoku/bert-base-japanese''' lowerCAmelCase = AutoTokenizer.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __snake_case ( self ): lowerCAmelCase = '''cl-tohoku/bert-base-japanese''' with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm: BertTokenizer.from_pretrained(UpperCAmelCase_ ) self.assertTrue( cm.records[0].message.startswith( '''The tokenizer class you load from this checkpoint is not the same type as the class this function''' ''' is called from.''' ) ) lowerCAmelCase = '''bert-base-cased''' with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm: BertJapaneseTokenizer.from_pretrained(UpperCAmelCase_ ) self.assertTrue( cm.records[0].message.startswith( '''The tokenizer class you load from this checkpoint is not the same type as the class this function''' ''' is called from.''' ) )
33
1
import datasets import faiss import numpy as np import streamlit as st import torch from elasticsearch import Elasticsearch from elia_utils import ( embed_questions_for_retrieval, make_qa_sas_model, qa_sas_generate, query_es_index, query_qa_dense_index, ) import transformers from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer UpperCAmelCase_ ="""bart""" UpperCAmelCase_ =True @st.cache(allow_output_mutation=_snake_case ) def UpperCAmelCase ( ): if LOAD_DENSE_INDEX: lowerCAmelCase = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' ) lowerCAmelCase = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' ) lowerCAmelCase = qar_model.eval() else: lowerCAmelCase , lowerCAmelCase = (None, None) if MODEL_TYPE == "bart": lowerCAmelCase = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' ) lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' ) lowerCAmelCase = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' ) sas_model.load_state_dict(save_dict['''model'''] ) lowerCAmelCase = sas_model.eval() else: lowerCAmelCase , lowerCAmelCase = make_qa_sas_model( model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' ) return (qar_tokenizer, qar_model, sas_tokenizer, sas_model) @st.cache(allow_output_mutation=_snake_case ) def UpperCAmelCase ( ): if LOAD_DENSE_INDEX: lowerCAmelCase = faiss.StandardGpuResources() lowerCAmelCase = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train'''] lowerCAmelCase = np.memmap( '''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , ) lowerCAmelCase = faiss.IndexFlatIP(128 ) lowerCAmelCase = faiss.index_cpu_to_gpu(_snake_case , 1 , _snake_case ) wikiaab_gpu_index_flat.add(_snake_case ) # TODO fix for larger GPU else: lowerCAmelCase , lowerCAmelCase = (None, None) lowerCAmelCase = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] ) return (wikiaab_passages, wikiaab_gpu_index_flat, es_client) @st.cache(allow_output_mutation=_snake_case ) def UpperCAmelCase ( ): lowerCAmelCase = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' ) lowerCAmelCase = elia['''train_eli5'''] lowerCAmelCase = np.memmap( '''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) ) lowerCAmelCase = faiss.IndexFlatIP(128 ) eli5_train_q_index.add(_snake_case ) return (elia_train, eli5_train_q_index) UpperCAmelCase_,UpperCAmelCase_,UpperCAmelCase_ =load_indexes() UpperCAmelCase_,UpperCAmelCase_,UpperCAmelCase_,UpperCAmelCase_ =load_models() UpperCAmelCase_,UpperCAmelCase_ =load_train_data() def UpperCAmelCase ( _snake_case , _snake_case=10 ): lowerCAmelCase = embed_questions_for_retrieval([question] , _snake_case , _snake_case ) lowerCAmelCase , lowerCAmelCase = eli5_train_q_index.search(_snake_case , _snake_case ) lowerCAmelCase = [elia_train[int(_snake_case )] for i in I[0]] return nn_examples def UpperCAmelCase ( _snake_case , _snake_case="wiki40b" , _snake_case="dense" , _snake_case=10 ): if source == "none": lowerCAmelCase , lowerCAmelCase = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), []) else: if method == "dense": lowerCAmelCase , lowerCAmelCase = query_qa_dense_index( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) else: lowerCAmelCase , lowerCAmelCase = query_es_index( _snake_case , _snake_case , index_name='''english_wiki40b_snippets_100w''' , n_results=_snake_case , ) lowerCAmelCase = [ (res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst ] lowerCAmelCase = '''question: {} context: {}'''.format(_snake_case , _snake_case ) return question_doc, support_list @st.cache( hash_funcs={ torch.Tensor: (lambda _snake_case : None), transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _snake_case : None), } ) def UpperCAmelCase ( _snake_case , _snake_case , _snake_case , _snake_case=64 , _snake_case=256 , _snake_case=False , _snake_case=2 , _snake_case=0.95 , _snake_case=0.8 ): with torch.no_grad(): lowerCAmelCase = qa_sas_generate( _snake_case , _snake_case , _snake_case , num_answers=1 , num_beams=_snake_case , min_len=_snake_case , max_len=_snake_case , do_sample=_snake_case , temp=_snake_case , top_p=_snake_case , top_k=_snake_case , max_input_length=1024 , device='''cuda:0''' , )[0] return (answer, support_list) st.title("""Long Form Question Answering with ELI5""") # Start sidebar UpperCAmelCase_ ="""<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>""" UpperCAmelCase_ =""" <html> <head> <style> .img-container { padding-left: 90px; padding-right: 90px; padding-top: 50px; padding-bottom: 50px; background-color: #f0f3f9; } </style> </head> <body> <span class=\"img-container\"> <!-- Inline parent element --> %s </span> </body> </html> """ % ( header_html, ) st.sidebar.markdown( header_full, unsafe_allow_html=True, ) # Long Form QA with ELI5 and Wikipedia UpperCAmelCase_ =""" This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html). First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset, a pre-processed fixed snapshot of Wikipedia. """ st.sidebar.markdown(description, unsafe_allow_html=True) UpperCAmelCase_ =[ """Answer the question""", """View the retrieved document only""", """View the most similar ELI5 question and answer""", """Show me everything, please!""", ] UpperCAmelCase_ =st.sidebar.checkbox("""Demo options""") if demo_options: UpperCAmelCase_ =st.sidebar.selectbox( """""", action_list, index=3, ) UpperCAmelCase_ =action_list.index(action_st) UpperCAmelCase_ =st.sidebar.selectbox( """""", ["""Show full text of passages""", """Show passage section titles"""], index=0, ) UpperCAmelCase_ =show_type == """Show full text of passages""" else: UpperCAmelCase_ =3 UpperCAmelCase_ =True UpperCAmelCase_ =st.sidebar.checkbox("""Retrieval options""") if retrieval_options: UpperCAmelCase_ =""" ### Information retriever options The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs. The answer is then generated by sequence to sequence model which takes the question and retrieved document as input. """ st.sidebar.markdown(retriever_info) UpperCAmelCase_ =st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""]) UpperCAmelCase_ =st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""]) else: UpperCAmelCase_ ="""wiki40b""" UpperCAmelCase_ ="""dense""" UpperCAmelCase_ ="""beam""" UpperCAmelCase_ =2 UpperCAmelCase_ =64 UpperCAmelCase_ =256 UpperCAmelCase_ =None UpperCAmelCase_ =None UpperCAmelCase_ =st.sidebar.checkbox("""Generation options""") if generate_options: UpperCAmelCase_ =""" ### Answer generation options The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large) weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with **beam** search, or **sample** from the decoder's output probabilities. """ st.sidebar.markdown(generate_info) UpperCAmelCase_ =st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""]) UpperCAmelCase_ =st.sidebar.slider( """Minimum generation length""", min_value=8, max_value=256, value=64, step=8, format=None, key=None ) UpperCAmelCase_ =st.sidebar.slider( """Maximum generation length""", min_value=64, max_value=512, value=256, step=16, format=None, key=None ) if sampled == "beam": UpperCAmelCase_ =st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None) else: UpperCAmelCase_ =st.sidebar.slider( """Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None ) UpperCAmelCase_ =st.sidebar.slider( """Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None ) UpperCAmelCase_ =None # start main text UpperCAmelCase_ =[ """<MY QUESTION>""", """How do people make chocolate?""", """Why do we get a fever when we are sick?""", """How can different animals perceive different colors?""", """What is natural language processing?""", """What's the best way to treat a sunburn?""", """What exactly are vitamins ?""", """How does nuclear energy provide electricity?""", """What's the difference between viruses and bacteria?""", """Why are flutes classified as woodwinds when most of them are made out of metal ?""", """Why do people like drinking coffee even though it tastes so bad?""", """What happens when wine ages? How does it make the wine taste better?""", """If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""", """How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""", """How does New Zealand have so many large bird predators?""", ] UpperCAmelCase_ =st.selectbox( """What would you like to ask? ---- select <MY QUESTION> to enter a new query""", questions_list, index=1, ) if question_s == "<MY QUESTION>": UpperCAmelCase_ =st.text_input("""Enter your question here:""", """""") else: UpperCAmelCase_ =question_s if st.button("""Show me!"""): if action in [0, 1, 3]: if index_type == "mixed": UpperCAmelCase_,UpperCAmelCase_ =make_support(question, source=wiki_source, method="""dense""", n_results=10) UpperCAmelCase_,UpperCAmelCase_ =make_support(question, source=wiki_source, method="""sparse""", n_results=10) UpperCAmelCase_ =[] for res_d, res_s in zip(support_list_dense, support_list_sparse): if tuple(res_d) not in support_list: support_list += [tuple(res_d)] if tuple(res_s) not in support_list: support_list += [tuple(res_s)] UpperCAmelCase_ =support_list[:10] UpperCAmelCase_ ="""<P> """ + """ <P> """.join([res[-1] for res in support_list]) else: UpperCAmelCase_,UpperCAmelCase_ =make_support(question, source=wiki_source, method=index_type, n_results=10) if action in [0, 3]: UpperCAmelCase_,UpperCAmelCase_ =answer_question( question_doc, sas_model, sas_tokenizer, min_len=min_len, max_len=int(max_len), sampling=(sampled == """sampled"""), n_beams=n_beams, top_p=top_p, temp=temp, ) st.markdown("""### The model generated answer is:""") st.write(answer) if action in [0, 1, 3] and wiki_source != "none": st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""") for i, res in enumerate(support_list): UpperCAmelCase_ ="""https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_""")) UpperCAmelCase_ =res[1].strip() if sec_titles == "": UpperCAmelCase_ ="""[{}]({})""".format(res[0], wiki_url) else: UpperCAmelCase_ =sec_titles.split(""" & """) UpperCAmelCase_ =""" & """.join( ["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list] ) st.markdown( """{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections), unsafe_allow_html=True, ) if show_passages: st.write( """> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True ) if action in [2, 3]: UpperCAmelCase_ =find_nearest_training(question) UpperCAmelCase_ =nn_train_list[0] st.markdown( """--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""]) ) UpperCAmelCase_ =[ """{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""])) for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""])) if i == 0 or sc > 2 ] st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st))) UpperCAmelCase_ =""" --- **Disclaimer** *The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system. Evaluating biases of such a model and ensuring factual generations are still very much open research problems. Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.* """ st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
33
import json import os import tempfile import unittest import unittest.mock as mock from pathlib import Path from requests.exceptions import HTTPError from transformers.utils import ( CONFIG_NAME, FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TRANSFORMERS_CACHE, WEIGHTS_NAME, cached_file, get_file_from_repo, has_file, ) UpperCAmelCase_ ="""hf-internal-testing/tiny-random-bert""" UpperCAmelCase_ =os.path.join(TRANSFORMERS_CACHE, """models--hf-internal-testing--tiny-random-bert""") UpperCAmelCase_ ="""9b8c223d42b2188cb49d29af482996f9d0f3e5a6""" class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __snake_case ( self ): lowerCAmelCase = cached_file(UpperCAmelCase_ , UpperCAmelCase_ ) # Should have downloaded the file in here self.assertTrue(os.path.isdir(UpperCAmelCase_ ) ) # Cache should contain at least those three subfolders: for subfolder in ["blobs", "refs", "snapshots"]: self.assertTrue(os.path.isdir(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) ) ) with open(os.path.join(UpperCAmelCase_ , '''refs''' , '''main''' ) ) as f: lowerCAmelCase = f.read() self.assertEqual(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , '''snapshots''' , UpperCAmelCase_ , UpperCAmelCase_ ) ) self.assertTrue(os.path.isfile(UpperCAmelCase_ ) ) # File is cached at the same place the second time. lowerCAmelCase = cached_file(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) # Using a specific revision to test the full commit hash. lowerCAmelCase = cached_file(UpperCAmelCase_ , UpperCAmelCase_ , revision='''9b8c223''' ) self.assertEqual(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , '''snapshots''' , UpperCAmelCase_ , UpperCAmelCase_ ) ) def __snake_case ( self ): with self.assertRaisesRegex(UpperCAmelCase_ , '''is not a valid model identifier''' ): lowerCAmelCase = cached_file('''tiny-random-bert''' , UpperCAmelCase_ ) with self.assertRaisesRegex(UpperCAmelCase_ , '''is not a valid git identifier''' ): lowerCAmelCase = cached_file(UpperCAmelCase_ , UpperCAmelCase_ , revision='''aaaa''' ) with self.assertRaisesRegex(UpperCAmelCase_ , '''does not appear to have a file named''' ): lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' ) def __snake_case ( self ): with self.assertRaisesRegex(UpperCAmelCase_ , '''does not appear to have a file named''' ): lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' ) with open(os.path.join(UpperCAmelCase_ , '''refs''' , '''main''' ) ) as f: lowerCAmelCase = f.read() self.assertTrue(os.path.isfile(os.path.join(UpperCAmelCase_ , '''.no_exist''' , UpperCAmelCase_ , '''conf''' ) ) ) lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' , _raise_exceptions_for_missing_entries=UpperCAmelCase_ ) self.assertIsNone(UpperCAmelCase_ ) lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' , local_files_only=UpperCAmelCase_ , _raise_exceptions_for_missing_entries=UpperCAmelCase_ ) self.assertIsNone(UpperCAmelCase_ ) lowerCAmelCase = mock.Mock() lowerCAmelCase = 5_00 lowerCAmelCase = {} lowerCAmelCase = HTTPError lowerCAmelCase = {} # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('''requests.Session.request''' , return_value=UpperCAmelCase_ ) as mock_head: lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' , _raise_exceptions_for_connection_errors=UpperCAmelCase_ ) self.assertIsNone(UpperCAmelCase_ ) # This check we did call the fake head request mock_head.assert_called() def __snake_case ( self ): self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , UpperCAmelCase_ ) ) self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , UpperCAmelCase_ ) ) self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , UpperCAmelCase_ ) ) def __snake_case ( self ): # `get_file_from_repo` returns None if the file does not exist self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) ) # The function raises if the repository does not exist. with self.assertRaisesRegex(UpperCAmelCase_ , '''is not a valid model identifier''' ): get_file_from_repo('''bert-base-case''' , UpperCAmelCase_ ) # The function raises if the revision does not exist. with self.assertRaisesRegex(UpperCAmelCase_ , '''is not a valid git identifier''' ): get_file_from_repo('''bert-base-cased''' , UpperCAmelCase_ , revision='''ahaha''' ) lowerCAmelCase = get_file_from_repo('''bert-base-cased''' , UpperCAmelCase_ ) # The name is the cached name which is not very easy to test, so instead we load the content. lowerCAmelCase = json.loads(open(UpperCAmelCase_ , '''r''' ).read() ) self.assertEqual(config['''hidden_size'''] , 7_68 ) def __snake_case ( self ): with tempfile.TemporaryDirectory() as tmp_dir: lowerCAmelCase = Path(UpperCAmelCase_ ) / '''a.txt''' filename.touch() self.assertEqual(get_file_from_repo(UpperCAmelCase_ , '''a.txt''' ) , str(UpperCAmelCase_ ) ) self.assertIsNone(get_file_from_repo(UpperCAmelCase_ , '''b.txt''' ) )
33
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase_ ={ """configuration_x_clip""": [ """XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XCLIPConfig""", """XCLIPTextConfig""", """XCLIPVisionConfig""", ], """processing_x_clip""": ["""XCLIPProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ =[ """XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """XCLIPModel""", """XCLIPPreTrainedModel""", """XCLIPTextModel""", """XCLIPVisionModel""", ] if TYPE_CHECKING: from .configuration_x_clip import ( XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, XCLIPConfig, XCLIPTextConfig, XCLIPVisionConfig, ) from .processing_x_clip import XCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_x_clip import ( XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, XCLIPModel, XCLIPPreTrainedModel, XCLIPTextModel, XCLIPVisionModel, ) else: import sys UpperCAmelCase_ =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
33
from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = True , UpperCAmelCase_ = None , UpperCAmelCase_ = False , UpperCAmelCase_ = None , UpperCAmelCase_ = True , UpperCAmelCase_ = "arrow" , **UpperCAmelCase_ , ): super().__init__( split=UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , keep_in_memory=UpperCAmelCase_ , streaming=UpperCAmelCase_ , **UpperCAmelCase_ , ) lowerCAmelCase = load_from_cache_file lowerCAmelCase = file_format lowerCAmelCase = Spark( df=UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , working_dir=UpperCAmelCase_ , **UpperCAmelCase_ , ) def __snake_case ( self ): if self.streaming: return self.builder.as_streaming_dataset(split=self.split ) lowerCAmelCase = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=UpperCAmelCase_ , file_format=self._file_format , ) return self.builder.as_dataset(split=self.split )
33
1
from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = True , UpperCAmelCase_ = None , UpperCAmelCase_ = False , UpperCAmelCase_ = None , UpperCAmelCase_ = True , UpperCAmelCase_ = "arrow" , **UpperCAmelCase_ , ): super().__init__( split=UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , keep_in_memory=UpperCAmelCase_ , streaming=UpperCAmelCase_ , **UpperCAmelCase_ , ) lowerCAmelCase = load_from_cache_file lowerCAmelCase = file_format lowerCAmelCase = Spark( df=UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , working_dir=UpperCAmelCase_ , **UpperCAmelCase_ , ) def __snake_case ( self ): if self.streaming: return self.builder.as_streaming_dataset(split=self.split ) lowerCAmelCase = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=UpperCAmelCase_ , file_format=self._file_format , ) return self.builder.as_dataset(split=self.split )
33
import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def UpperCAmelCase ( _snake_case = 3 ): if isinstance(_snake_case , _snake_case ): raise TypeError('''number of qubits must be a integer.''' ) if number_of_qubits <= 0: raise ValueError('''number of qubits must be > 0.''' ) if math.floor(_snake_case ) != number_of_qubits: raise ValueError('''number of qubits must be exact integer.''' ) if number_of_qubits > 10: raise ValueError('''number of qubits too large to simulate(>10).''' ) lowerCAmelCase = QuantumRegister(_snake_case , '''qr''' ) lowerCAmelCase = ClassicalRegister(_snake_case , '''cr''' ) lowerCAmelCase = QuantumCircuit(_snake_case , _snake_case ) lowerCAmelCase = number_of_qubits for i in range(_snake_case ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(_snake_case ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , _snake_case , _snake_case ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(_snake_case , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(_snake_case , _snake_case ) # simulate with 10000 shots lowerCAmelCase = Aer.get_backend('''qasm_simulator''' ) lowerCAmelCase = execute(_snake_case , _snake_case , shots=10000 ) return job.result().get_counts(_snake_case ) if __name__ == "__main__": print( F'''Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}''' )
33
1
import warnings from ...utils import logging from .image_processing_glpn import GLPNImageProcessor UpperCAmelCase_ =logging.get_logger(__name__) class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ): warnings.warn( '''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use GLPNImageProcessor instead.''' , UpperCAmelCase_ , ) super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
33
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from typing import Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import randn_tensor from .scheduling_utils import SchedulerMixin class __UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __a : Any =1 @register_to_config def __init__( self , UpperCAmelCase_=20_00 , UpperCAmelCase_=0.1 , UpperCAmelCase_=20 , UpperCAmelCase_=1E-3 ): lowerCAmelCase = None lowerCAmelCase = None lowerCAmelCase = None def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ): lowerCAmelCase = torch.linspace(1 , self.config.sampling_eps , UpperCAmelCase_ , device=UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None ): if self.timesteps is None: raise ValueError( '''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' ) # TODO(Patrick) better comments + non-PyTorch # postprocess model score lowerCAmelCase = ( -0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min ) lowerCAmelCase = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) ) lowerCAmelCase = std.flatten() while len(std.shape ) < len(score.shape ): lowerCAmelCase = std.unsqueeze(-1 ) lowerCAmelCase = -score / std # compute lowerCAmelCase = -1.0 / len(self.timesteps ) lowerCAmelCase = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min) lowerCAmelCase = beta_t.flatten() while len(beta_t.shape ) < len(x.shape ): lowerCAmelCase = beta_t.unsqueeze(-1 ) lowerCAmelCase = -0.5 * beta_t * x lowerCAmelCase = torch.sqrt(UpperCAmelCase_ ) lowerCAmelCase = drift - diffusion**2 * score lowerCAmelCase = x + drift * dt # add noise lowerCAmelCase = randn_tensor(x.shape , layout=x.layout , generator=UpperCAmelCase_ , device=x.device , dtype=x.dtype ) lowerCAmelCase = x_mean + diffusion * math.sqrt(-dt ) * noise return x, x_mean def __len__( self ): return self.config.num_train_timesteps
33
1
import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import List import timm import torch import torch.nn as nn from huggingface_hub import hf_hub_download from torch import Tensor from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase_ =logging.get_logger() @dataclass class __UpperCamelCase : '''simple docstring''' __a : nn.Module __a : List[nn.Module] =field(default_factory=__UpperCAmelCase ) __a : list =field(default_factory=__UpperCAmelCase ) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): lowerCAmelCase = len(list(m.modules() ) ) == 1 or isinstance(UpperCAmelCase_ , nn.Convad ) or isinstance(UpperCAmelCase_ , nn.BatchNormad ) if has_not_submodules: self.traced.append(UpperCAmelCase_ ) def __call__( self , UpperCAmelCase_ ): for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(UpperCAmelCase_ ) [x.remove() for x in self.handles] return self @property def __snake_case ( self ): # check the len of the state_dict keys to see if we have learnable params return list(filter(lambda UpperCAmelCase_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class __UpperCamelCase : '''simple docstring''' __a : nn.Module __a : nn.Module __a : int =0 __a : List =field(default_factory=__UpperCAmelCase ) __a : List =field(default_factory=__UpperCAmelCase ) def __call__( self , UpperCAmelCase_ ): lowerCAmelCase = Tracker(self.dest )(UpperCAmelCase_ ).parametrized lowerCAmelCase = Tracker(self.src )(UpperCAmelCase_ ).parametrized lowerCAmelCase = list(filter(lambda UpperCAmelCase_ : type(UpperCAmelCase_ ) not in self.src_skip , UpperCAmelCase_ ) ) lowerCAmelCase = list(filter(lambda UpperCAmelCase_ : type(UpperCAmelCase_ ) not in self.dest_skip , UpperCAmelCase_ ) ) if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ): raise Exception( F"""Numbers of operations are different. Source module has {len(UpperCAmelCase_ )} operations while""" F""" destination module has {len(UpperCAmelCase_ )}.""" ) for dest_m, src_m in zip(UpperCAmelCase_ , UpperCAmelCase_ ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(F"""Transfered from={src_m} to={dest_m}""" ) def UpperCAmelCase ( _snake_case , _snake_case , _snake_case , _snake_case = True ): print(F"""Converting {name}...""" ) with torch.no_grad(): lowerCAmelCase = timm.create_model(_snake_case , pretrained=_snake_case ).eval() lowerCAmelCase = ResNetForImageClassification(_snake_case ).eval() lowerCAmelCase = ModuleTransfer(src=_snake_case , dest=_snake_case ) lowerCAmelCase = torch.randn((1, 3, 224, 224) ) module_transfer(_snake_case ) assert torch.allclose(from_model(_snake_case ) , our_model(_snake_case ).logits ), "The model logits don't match the original one." lowerCAmelCase = F"""resnet{"-".join(name.split("resnet" ) )}""" print(_snake_case ) if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add model''' , use_temp_dir=_snake_case , ) # we can use the convnext one lowerCAmelCase = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' ) image_processor.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add image processor''' , use_temp_dir=_snake_case , ) print(F"""Pushed {checkpoint_name}""" ) def UpperCAmelCase ( _snake_case , _snake_case = None , _snake_case = True ): lowerCAmelCase = '''imagenet-1k-id2label.json''' lowerCAmelCase = 1000 lowerCAmelCase = (1, num_labels) lowerCAmelCase = '''huggingface/label-files''' lowerCAmelCase = num_labels lowerCAmelCase = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type='''dataset''' ) , '''r''' ) ) lowerCAmelCase = {int(_snake_case ): v for k, v in idalabel.items()} lowerCAmelCase = idalabel lowerCAmelCase = {v: k for k, v in idalabel.items()} lowerCAmelCase = partial(_snake_case , num_labels=_snake_case , idalabel=_snake_case , labelaid=_snake_case ) lowerCAmelCase = { '''resnet18''': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ), '''resnet26''': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ), '''resnet34''': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ), '''resnet50''': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ), '''resnet101''': ImageNetPreTrainedConfig( depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ), '''resnet152''': ImageNetPreTrainedConfig( depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ), } if model_name: convert_weight_and_push(_snake_case , names_to_config[model_name] , _snake_case , _snake_case ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(_snake_case , _snake_case , _snake_case , _snake_case ) return config, expected_shape if __name__ == "__main__": UpperCAmelCase_ =argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default=None, type=str, help=( """The name of the model you wish to convert, it must be one of the supported resnet* architecture,""" """ currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=Path, required=True, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", default=True, type=bool, required=False, help="""If True, push model and image processor to the hub.""", ) UpperCAmelCase_ =parser.parse_args() UpperCAmelCase_ =args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
33
from collections import Counter from pathlib import Path from typing import Optional, Tuple import yaml class __UpperCamelCase ( yaml.SafeLoader ): '''simple docstring''' def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = [self.constructed_objects[key_node] for key_node, _ in node.value] lowerCAmelCase = [tuple(UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else key for key in keys] lowerCAmelCase = Counter(UpperCAmelCase_ ) lowerCAmelCase = [key for key in counter if counter[key] > 1] if duplicate_keys: raise TypeError(F"""Got duplicate yaml keys: {duplicate_keys}""" ) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_=False ): lowerCAmelCase = super().construct_mapping(UpperCAmelCase_ , deep=UpperCAmelCase_ ) self._check_no_duplicates_on_constructed_node(UpperCAmelCase_ ) return mapping def UpperCAmelCase ( _snake_case ): lowerCAmelCase = list(readme_content.splitlines() ) if full_content and full_content[0] == "---" and "---" in full_content[1:]: lowerCAmelCase = full_content[1:].index('''---''' ) + 1 lowerCAmelCase = '''\n'''.join(full_content[1:sep_idx] ) return yamlblock, "\n".join(full_content[sep_idx + 1 :] ) return None, "\n".join(_snake_case ) class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : Any ={"""train_eval_index"""} # train-eval-index in the YAML metadata @classmethod def __snake_case ( cls , UpperCAmelCase_ ): with open(UpperCAmelCase_ , encoding='''utf-8''' ) as readme_file: lowerCAmelCase , lowerCAmelCase = _split_yaml_from_readme(readme_file.read() ) if yaml_string is not None: return cls.from_yaml_string(UpperCAmelCase_ ) else: return cls() def __snake_case ( self , UpperCAmelCase_ ): if path.exists(): with open(UpperCAmelCase_ , encoding='''utf-8''' ) as readme_file: lowerCAmelCase = readme_file.read() else: lowerCAmelCase = None lowerCAmelCase = self._to_readme(UpperCAmelCase_ ) with open(UpperCAmelCase_ , '''w''' , encoding='''utf-8''' ) as readme_file: readme_file.write(UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_ = None ): if readme_content is not None: lowerCAmelCase , lowerCAmelCase = _split_yaml_from_readme(UpperCAmelCase_ ) lowerCAmelCase = '''---\n''' + self.to_yaml_string() + '''---\n''' + content else: lowerCAmelCase = '''---\n''' + self.to_yaml_string() + '''---\n''' return full_content @classmethod def __snake_case ( cls , UpperCAmelCase_ ): lowerCAmelCase = yaml.load(UpperCAmelCase_ , Loader=_NoDuplicateSafeLoader ) or {} # Convert the YAML keys to DatasetMetadata fields lowerCAmelCase = { (key.replace('''-''' , '''_''' ) if key.replace('''-''' , '''_''' ) in cls._FIELDS_WITH_DASHES else key): value for key, value in metadata_dict.items() } return cls(**UpperCAmelCase_ ) def __snake_case ( self ): return yaml.safe_dump( { (key.replace('''_''' , '''-''' ) if key in self._FIELDS_WITH_DASHES else key): value for key, value in self.items() } , sort_keys=UpperCAmelCase_ , allow_unicode=UpperCAmelCase_ , encoding='''utf-8''' , ).decode('''utf-8''' ) UpperCAmelCase_ ={ """image-classification""": [], """translation""": [], """image-segmentation""": [], """fill-mask""": [], """automatic-speech-recognition""": [], """token-classification""": [], """sentence-similarity""": [], """audio-classification""": [], """question-answering""": [], """summarization""": [], """zero-shot-classification""": [], """table-to-text""": [], """feature-extraction""": [], """other""": [], """multiple-choice""": [], """text-classification""": [], """text-to-image""": [], """text2text-generation""": [], """zero-shot-image-classification""": [], """tabular-classification""": [], """tabular-regression""": [], """image-to-image""": [], """tabular-to-text""": [], """unconditional-image-generation""": [], """text-retrieval""": [], """text-to-speech""": [], """object-detection""": [], """audio-to-audio""": [], """text-generation""": [], """conversational""": [], """table-question-answering""": [], """visual-question-answering""": [], """image-to-text""": [], """reinforcement-learning""": [], """voice-activity-detection""": [], """time-series-forecasting""": [], """document-question-answering""": [], } if __name__ == "__main__": from argparse import ArgumentParser UpperCAmelCase_ =ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""") ap.add_argument("""readme_filepath""") UpperCAmelCase_ =ap.parse_args() UpperCAmelCase_ =Path(args.readme_filepath) UpperCAmelCase_ =DatasetMetadata.from_readme(readme_filepath) print(dataset_metadata) dataset_metadata.to_readme(readme_filepath)
33
1
# tests directory-specific settings - this file is run automatically # by pytest before any tests are run import doctest import sys import warnings from os.path import abspath, dirname, join import _pytest from transformers.testing_utils import HfDoctestModule, HfDocTestParser # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. UpperCAmelCase_ =abspath(join(dirname(__file__), """src""")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="""ignore""", category=FutureWarning) def UpperCAmelCase ( _snake_case ): config.addinivalue_line( '''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' ) config.addinivalue_line( '''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' ) config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' ) config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' ) config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' ) config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' ) def UpperCAmelCase ( _snake_case ): from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(_snake_case ) def UpperCAmelCase ( _snake_case ): from transformers.testing_utils import pytest_terminal_summary_main lowerCAmelCase = terminalreporter.config.getoption('''--make-reports''' ) if make_reports: pytest_terminal_summary_main(_snake_case , id=_snake_case ) def UpperCAmelCase ( _snake_case , _snake_case ): # If no tests are collected, pytest exists with code 5, which makes the CI fail. if exitstatus == 5: lowerCAmelCase = 0 # Doctest custom flag to ignore output. UpperCAmelCase_ =doctest.register_optionflag("""IGNORE_RESULT""") UpperCAmelCase_ =doctest.OutputChecker class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): if IGNORE_RESULT & optionflags: return True return OutputChecker.check_output(self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) UpperCAmelCase_ =CustomOutputChecker UpperCAmelCase_ =HfDoctestModule UpperCAmelCase_ =HfDocTestParser
33
import unittest from huggingface_hub import hf_hub_download from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor from transformers.pipelines import VideoClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_decord, require_tf, require_torch, require_torch_or_tf, require_vision, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf @require_vision @require_decord class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' __a : str =MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): lowerCAmelCase = hf_hub_download( repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' ) lowerCAmelCase = VideoClassificationPipeline(model=UpperCAmelCase_ , image_processor=UpperCAmelCase_ , top_k=2 ) lowerCAmelCase = [ example_video_filepath, '''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''', ] return video_classifier, examples def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ): for example in examples: lowerCAmelCase = video_classifier(UpperCAmelCase_ ) self.assertEqual( UpperCAmelCase_ , [ {'''score''': ANY(UpperCAmelCase_ ), '''label''': ANY(UpperCAmelCase_ )}, {'''score''': ANY(UpperCAmelCase_ ), '''label''': ANY(UpperCAmelCase_ )}, ] , ) @require_torch def __snake_case ( self ): lowerCAmelCase = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification''' lowerCAmelCase = VideoMAEFeatureExtractor( size={'''shortest_edge''': 10} , crop_size={'''height''': 10, '''width''': 10} ) lowerCAmelCase = pipeline( '''video-classification''' , model=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , frame_sampling_rate=4 ) lowerCAmelCase = hf_hub_download(repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' ) lowerCAmelCase = video_classifier(UpperCAmelCase_ , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase_ , decimals=4 ) , [{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}] , ) lowerCAmelCase = video_classifier( [ video_file_path, video_file_path, ] , top_k=2 , ) self.assertEqual( nested_simplify(UpperCAmelCase_ , decimals=4 ) , [ [{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}], [{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}], ] , ) @require_tf def __snake_case ( self ): pass
33
1
import unittest from transformers import BigBirdTokenizer, BigBirdTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin UpperCAmelCase_ ="""▁""" UpperCAmelCase_ =get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece @require_tokenizers class __UpperCamelCase ( __UpperCAmelCase , unittest.TestCase ): '''simple docstring''' __a : Union[str, Any] =BigBirdTokenizer __a : Any =BigBirdTokenizerFast __a : str =True __a : Optional[Any] =True def __snake_case ( self ): super().setUp() lowerCAmelCase = self.tokenizer_class(UpperCAmelCase_ , keep_accents=UpperCAmelCase_ ) tokenizer.save_pretrained(self.tmpdirname ) def __snake_case ( self ): lowerCAmelCase = '''<s>''' lowerCAmelCase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) , UpperCAmelCase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) , UpperCAmelCase_ ) def __snake_case ( self ): lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<unk>''' ) self.assertEqual(vocab_keys[1] , '''<s>''' ) self.assertEqual(vocab_keys[-1] , '''[MASK]''' ) self.assertEqual(len(UpperCAmelCase_ ) , 10_04 ) def __snake_case ( self ): self.assertEqual(self.get_tokenizer().vocab_size , 10_00 ) def __snake_case ( self ): if not self.test_rust_tokenizer: return lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = self.get_rust_tokenizer() lowerCAmelCase = '''I was born in 92000, and this is falsé.''' lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase_ ) lowerCAmelCase = rust_tokenizer.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) lowerCAmelCase = rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase = self.get_rust_tokenizer() lowerCAmelCase = tokenizer.encode(UpperCAmelCase_ ) lowerCAmelCase = rust_tokenizer.encode(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def __snake_case ( self ): lowerCAmelCase = BigBirdTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_ ) lowerCAmelCase = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(UpperCAmelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [2_85, 46, 10, 1_70, 3_82] , ) lowerCAmelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( UpperCAmelCase_ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) lowerCAmelCase = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) self.assertListEqual( UpperCAmelCase_ , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) lowerCAmelCase = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ ) self.assertListEqual( UpperCAmelCase_ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) @cached_property def __snake_case ( self ): return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' ) @slow def __snake_case ( self ): lowerCAmelCase = '''Hello World!''' lowerCAmelCase = [65, 1_85_36, 22_60, 1_01, 66] self.assertListEqual(UpperCAmelCase_ , self.big_tokenizer.encode(UpperCAmelCase_ ) ) @slow def __snake_case ( self ): lowerCAmelCase = ( '''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will''' ''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth''' ) # fmt: off lowerCAmelCase = [65, 8_71, 4_19, 3_58, 9_46, 9_91, 25_21, 4_52, 3_58, 13_57, 3_87, 77_51, 35_36, 1_12, 9_85, 4_56, 1_26, 8_65, 9_38, 54_00, 57_34, 4_58, 13_68, 4_67, 7_86, 24_62, 52_46, 11_59, 6_33, 8_65, 45_19, 4_57, 5_82, 8_52, 25_57, 4_27, 9_16, 5_08, 4_05, 3_43_24, 4_97, 3_91, 4_08, 1_13_42, 12_44, 3_85, 1_00, 9_38, 9_85, 4_56, 5_74, 3_62, 1_25_97, 32_00, 31_29, 11_72, 66] # noqa: E231 # fmt: on self.assertListEqual(UpperCAmelCase_ , self.big_tokenizer.encode(UpperCAmelCase_ ) ) @require_torch @slow def __snake_case ( self ): import torch from transformers import BigBirdConfig, BigBirdModel # Build sequence lowerCAmelCase = list(self.big_tokenizer.get_vocab().keys() )[:10] lowerCAmelCase = ''' '''.join(UpperCAmelCase_ ) lowerCAmelCase = self.big_tokenizer.encode_plus(UpperCAmelCase_ , return_tensors='''pt''' , return_token_type_ids=UpperCAmelCase_ ) lowerCAmelCase = self.big_tokenizer.batch_encode_plus( [sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=UpperCAmelCase_ ) lowerCAmelCase = BigBirdConfig(attention_type='''original_full''' ) lowerCAmelCase = BigBirdModel(UpperCAmelCase_ ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**UpperCAmelCase_ ) model(**UpperCAmelCase_ ) @slow def __snake_case ( self ): lowerCAmelCase = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' ) lowerCAmelCase = tokenizer.decode(tokenizer('''Paris is the [MASK].''' ).input_ids ) self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''' ) @slow def __snake_case ( self ): # fmt: off lowerCAmelCase = {'''input_ids''': [[65, 3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14, 66], [65, 4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCAmelCase_ , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
33
import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class __UpperCamelCase ( __UpperCAmelCase , unittest.TestCase ): '''simple docstring''' __a : Any ="""hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline""" def __snake_case ( self , UpperCAmelCase_=0 ): lowerCAmelCase = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(UpperCAmelCase_ ) ) lowerCAmelCase = np.random.RandomState(UpperCAmelCase_ ) lowerCAmelCase = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 3, '''strength''': 0.75, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def __snake_case ( self ): lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 1_28, 1_28, 3) lowerCAmelCase = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087] ) assert np.abs(image_slice - expected_slice ).max() < 1E-1 def __snake_case ( self ): lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowerCAmelCase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=UpperCAmelCase_ ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) lowerCAmelCase = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __snake_case ( self ): lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowerCAmelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) # warmup pass to apply optimizations lowerCAmelCase = pipe(**self.get_dummy_inputs() ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) lowerCAmelCase = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __snake_case ( self ): lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowerCAmelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) lowerCAmelCase = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __snake_case ( self ): lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowerCAmelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) lowerCAmelCase = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __snake_case ( self ): lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) lowerCAmelCase = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 @nightly @require_onnxruntime @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' @property def __snake_case ( self ): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def __snake_case ( self ): lowerCAmelCase = ort.SessionOptions() lowerCAmelCase = False return options def __snake_case ( self ): lowerCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) lowerCAmelCase = init_image.resize((7_68, 5_12) ) # using the PNDM scheduler by default lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = '''A fantasy landscape, trending on artstation''' lowerCAmelCase = np.random.RandomState(0 ) lowerCAmelCase = pipe( prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCAmelCase_ , output_type='''np''' , ) lowerCAmelCase = output.images lowerCAmelCase = images[0, 2_55:2_58, 3_83:3_86, -1] assert images.shape == (1, 5_12, 7_68, 3) lowerCAmelCase = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def __snake_case ( self ): lowerCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) lowerCAmelCase = init_image.resize((7_68, 5_12) ) lowerCAmelCase = LMSDiscreteScheduler.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' ) lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = '''A fantasy landscape, trending on artstation''' lowerCAmelCase = np.random.RandomState(0 ) lowerCAmelCase = pipe( prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=UpperCAmelCase_ , output_type='''np''' , ) lowerCAmelCase = output.images lowerCAmelCase = images[0, 2_55:2_58, 3_83:3_86, -1] assert images.shape == (1, 5_12, 7_68, 3) lowerCAmelCase = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
33
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase_ =logging.get_logger(__name__) UpperCAmelCase_ ={ """camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""", """umberto-commoncrawl-cased-v1""": ( """https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json""" ), """umberto-wikipedia-uncased-v1""": ( """https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json""" ), } class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : int ="""camembert""" def __init__( self , UpperCAmelCase_=3_05_22 , UpperCAmelCase_=7_68 , UpperCAmelCase_=12 , UpperCAmelCase_=12 , UpperCAmelCase_=30_72 , UpperCAmelCase_="gelu" , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=5_12 , UpperCAmelCase_=2 , UpperCAmelCase_=0.02 , UpperCAmelCase_=1E-1_2 , UpperCAmelCase_=1 , UpperCAmelCase_=0 , UpperCAmelCase_=2 , UpperCAmelCase_="absolute" , UpperCAmelCase_=True , UpperCAmelCase_=None , **UpperCAmelCase_ , ): super().__init__(pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ ) lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = hidden_act lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = max_position_embeddings lowerCAmelCase = type_vocab_size lowerCAmelCase = initializer_range lowerCAmelCase = layer_norm_eps lowerCAmelCase = position_embedding_type lowerCAmelCase = use_cache lowerCAmelCase = classifier_dropout class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' @property def __snake_case ( self ): if self.task == "multiple-choice": lowerCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: lowerCAmelCase = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
33
import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def UpperCAmelCase ( _snake_case ): lowerCAmelCase = args.pruning_method lowerCAmelCase = args.threshold lowerCAmelCase = args.model_name_or_path.rstrip('''/''' ) lowerCAmelCase = args.target_model_path print(F"""Load fine-pruned model from {model_name_or_path}""" ) lowerCAmelCase = torch.load(os.path.join(_snake_case , '''pytorch_model.bin''' ) ) lowerCAmelCase = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: lowerCAmelCase = tensor print(F"""Copied layer {name}""" ) elif "classifier" in name or "qa_output" in name: lowerCAmelCase = tensor print(F"""Copied layer {name}""" ) elif "bias" in name: lowerCAmelCase = tensor print(F"""Copied layer {name}""" ) else: if pruning_method == "magnitude": lowerCAmelCase = MagnitudeBinarizer.apply(inputs=_snake_case , threshold=_snake_case ) lowerCAmelCase = tensor * mask print(F"""Pruned layer {name}""" ) elif pruning_method == "topK": if "mask_scores" in name: continue lowerCAmelCase = name[:-6] lowerCAmelCase = model[F"""{prefix_}mask_scores"""] lowerCAmelCase = TopKBinarizer.apply(_snake_case , _snake_case ) lowerCAmelCase = tensor * mask print(F"""Pruned layer {name}""" ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue lowerCAmelCase = name[:-6] lowerCAmelCase = model[F"""{prefix_}mask_scores"""] lowerCAmelCase = ThresholdBinarizer.apply(_snake_case , _snake_case , _snake_case ) lowerCAmelCase = tensor * mask print(F"""Pruned layer {name}""" ) elif pruning_method == "l0": if "mask_scores" in name: continue lowerCAmelCase = name[:-6] lowerCAmelCase = model[F"""{prefix_}mask_scores"""] lowerCAmelCase , lowerCAmelCase = -0.1, 1.1 lowerCAmelCase = torch.sigmoid(_snake_case ) lowerCAmelCase = s * (r - l) + l lowerCAmelCase = s_bar.clamp(min=0.0 , max=1.0 ) lowerCAmelCase = tensor * mask print(F"""Pruned layer {name}""" ) else: raise ValueError('''Unknown pruning method''' ) if target_model_path is None: lowerCAmelCase = os.path.join( os.path.dirname(_snake_case ) , F"""bertarized_{os.path.basename(_snake_case )}""" ) if not os.path.isdir(_snake_case ): shutil.copytree(_snake_case , _snake_case ) print(F"""\nCreated folder {target_model_path}""" ) torch.save(_snake_case , os.path.join(_snake_case , '''pytorch_model.bin''' ) ) print('''\nPruned model saved! See you later!''' ) if __name__ == "__main__": UpperCAmelCase_ =argparse.ArgumentParser() parser.add_argument( """--pruning_method""", choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""], type=str, required=True, help=( """Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,""" """ sigmoied_threshold = Soft movement pruning)""" ), ) parser.add_argument( """--threshold""", type=float, required=False, help=( """For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.""" """For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.""" """Not needed for `l0`""" ), ) parser.add_argument( """--model_name_or_path""", type=str, required=True, help="""Folder containing the model that was previously fine-pruned""", ) parser.add_argument( """--target_model_path""", default=None, type=str, required=False, help="""Folder containing the model that was previously fine-pruned""", ) UpperCAmelCase_ =parser.parse_args() main(args)
33
1
def UpperCAmelCase ( _snake_case , _snake_case ): if a < 0 or b < 0: raise ValueError('''the value of both inputs must be positive''' ) lowerCAmelCase = str(bin(_snake_case ) )[2:] # remove the leading "0b" lowerCAmelCase = str(bin(_snake_case ) )[2:] # remove the leading "0b" lowerCAmelCase = max(len(_snake_case ) , len(_snake_case ) ) return "0b" + "".join( str(int(char_a == '''1''' and char_b == '''1''' ) ) for char_a, char_b in zip(a_binary.zfill(_snake_case ) , b_binary.zfill(_snake_case ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
33
import os import re from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCAmelCase_ =logging.get_logger(__name__) UpperCAmelCase_ ={ """vocab_file""": """vocab.txt""", """merges_file""": """bpe.codes""", } UpperCAmelCase_ ={ """vocab_file""": { """vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt""", """vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt""", }, """merges_file""": { """vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes""", """vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes""", }, } UpperCAmelCase_ ={ """vinai/phobert-base""": 256, """vinai/phobert-large""": 256, } def UpperCAmelCase ( _snake_case ): lowerCAmelCase = set() lowerCAmelCase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCAmelCase = char lowerCAmelCase = set(_snake_case ) return pairs class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : Union[str, Any] =VOCAB_FILES_NAMES __a : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP __a : Optional[int] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_="<s>" , UpperCAmelCase_="</s>" , UpperCAmelCase_="</s>" , UpperCAmelCase_="<s>" , UpperCAmelCase_="<unk>" , UpperCAmelCase_="<pad>" , UpperCAmelCase_="<mask>" , **UpperCAmelCase_ , ): super().__init__( bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , **UpperCAmelCase_ , ) lowerCAmelCase = vocab_file lowerCAmelCase = merges_file lowerCAmelCase = {} lowerCAmelCase = 0 lowerCAmelCase = 1 lowerCAmelCase = 2 lowerCAmelCase = 3 self.add_from_file(UpperCAmelCase_ ) lowerCAmelCase = {v: k for k, v in self.encoder.items()} with open(UpperCAmelCase_ , encoding='''utf-8''' ) as merges_handle: lowerCAmelCase = merges_handle.read().split('''\n''' )[:-1] lowerCAmelCase = [tuple(merge.split()[:-1] ) for merge in merges] lowerCAmelCase = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) ) lowerCAmelCase = {} def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCAmelCase = [self.cls_token_id] lowerCAmelCase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ ) if token_ids_a is None: return [1] + ([0] * len(UpperCAmelCase_ )) + [1] return [1] + ([0] * len(UpperCAmelCase_ )) + [1, 1] + ([0] * len(UpperCAmelCase_ )) + [1] def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ): lowerCAmelCase = [self.sep_token_id] lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def __snake_case ( self ): return len(self.encoder ) def __snake_case ( self ): return dict(self.encoder , **self.added_tokens_encoder ) def __snake_case ( self , UpperCAmelCase_ ): if token in self.cache: return self.cache[token] lowerCAmelCase = tuple(UpperCAmelCase_ ) lowerCAmelCase = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] ) lowerCAmelCase = get_pairs(UpperCAmelCase_ ) if not pairs: return token while True: lowerCAmelCase = min(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : self.bpe_ranks.get(UpperCAmelCase_ , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break lowerCAmelCase , lowerCAmelCase = bigram lowerCAmelCase = [] lowerCAmelCase = 0 while i < len(UpperCAmelCase_ ): try: lowerCAmelCase = word.index(UpperCAmelCase_ , UpperCAmelCase_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCAmelCase = j if word[i] == first and i < len(UpperCAmelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCAmelCase = tuple(UpperCAmelCase_ ) lowerCAmelCase = new_word if len(UpperCAmelCase_ ) == 1: break else: lowerCAmelCase = get_pairs(UpperCAmelCase_ ) lowerCAmelCase = '''@@ '''.join(UpperCAmelCase_ ) lowerCAmelCase = word[:-4] lowerCAmelCase = word return word def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = [] lowerCAmelCase = re.findall(r'''\S+\n?''' , UpperCAmelCase_ ) for token in words: split_tokens.extend(list(self.bpe(UpperCAmelCase_ ).split(''' ''' ) ) ) return split_tokens def __snake_case ( self , UpperCAmelCase_ ): return self.encoder.get(UpperCAmelCase_ , self.encoder.get(self.unk_token ) ) def __snake_case ( self , UpperCAmelCase_ ): return self.decoder.get(UpperCAmelCase_ , self.unk_token ) def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = ''' '''.join(UpperCAmelCase_ ).replace('''@@ ''' , '''''' ).strip() return out_string def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ): if not os.path.isdir(UpperCAmelCase_ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase = os.path.join( UpperCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCAmelCase = os.path.join( UpperCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ): copyfile(self.vocab_file , UpperCAmelCase_ ) if os.path.abspath(self.merges_file ) != os.path.abspath(UpperCAmelCase_ ): copyfile(self.merges_file , UpperCAmelCase_ ) return out_vocab_file, out_merge_file def __snake_case ( self , UpperCAmelCase_ ): if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): try: with open(UpperCAmelCase_ , '''r''' , encoding='''utf-8''' ) as fd: self.add_from_file(UpperCAmelCase_ ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception(F"""Incorrect encoding detected in {f}, please rebuild the dataset""" ) return lowerCAmelCase = f.readlines() for lineTmp in lines: lowerCAmelCase = lineTmp.strip() lowerCAmelCase = line.rfind(''' ''' ) if idx == -1: raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt>\'''' ) lowerCAmelCase = line[:idx] lowerCAmelCase = len(self.encoder )
33
1
from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional from packaging import version if TYPE_CHECKING: from ... import PreTrainedTokenizer, TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import is_torch_available, logging UpperCAmelCase_ =logging.get_logger(__name__) UpperCAmelCase_ ={ """bigscience/bloom""": """https://huggingface.co/bigscience/bloom/resolve/main/config.json""", """bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/config.json""", """bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json""", """bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json""", """bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/config.json""", """bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json""", } class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : Union[str, Any] ="""bloom""" __a : Any =["""past_key_values"""] __a : str ={ """num_hidden_layers""": """n_layer""", """num_attention_heads""": """n_head""", } def __init__( self , UpperCAmelCase_=25_08_80 , UpperCAmelCase_=64 , UpperCAmelCase_=2 , UpperCAmelCase_=8 , UpperCAmelCase_=1E-5 , UpperCAmelCase_=0.02 , UpperCAmelCase_=True , UpperCAmelCase_=1 , UpperCAmelCase_=2 , UpperCAmelCase_=False , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.0 , UpperCAmelCase_=1 , UpperCAmelCase_=False , **UpperCAmelCase_ , ): lowerCAmelCase = vocab_size # Backward compatibility with n_embed kwarg lowerCAmelCase = kwargs.pop('''n_embed''' , UpperCAmelCase_ ) lowerCAmelCase = hidden_size if n_embed is None else n_embed lowerCAmelCase = n_layer lowerCAmelCase = n_head lowerCAmelCase = layer_norm_epsilon lowerCAmelCase = initializer_range lowerCAmelCase = use_cache lowerCAmelCase = pretraining_tp lowerCAmelCase = apply_residual_connection_post_layernorm lowerCAmelCase = hidden_dropout lowerCAmelCase = attention_dropout lowerCAmelCase = bos_token_id lowerCAmelCase = eos_token_id lowerCAmelCase = slow_but_exact super().__init__(bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ ) class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : List[Any] =version.parse("""1.12""" ) def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ = "default" , UpperCAmelCase_ = None , UpperCAmelCase_ = False , ): super().__init__(UpperCAmelCase_ , task=UpperCAmelCase_ , patching_specs=UpperCAmelCase_ , use_past=UpperCAmelCase_ ) if not getattr(self._config , '''pad_token_id''' , UpperCAmelCase_ ): # TODO: how to do that better? lowerCAmelCase = 0 @property def __snake_case ( self ): lowerCAmelCase = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} ) if self.use_past: # BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344 self.fill_with_past_key_values_(UpperCAmelCase_ , direction='''inputs''' , inverted_values_shape=UpperCAmelCase_ ) lowerCAmelCase = {0: '''batch''', 1: '''past_sequence + sequence'''} else: lowerCAmelCase = {0: '''batch''', 1: '''sequence'''} return common_inputs @property def __snake_case ( self ): return self._config.n_layer @property def __snake_case ( self ): return self._config.n_head @property def __snake_case ( self ): return 1E-3 def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = -1 , UpperCAmelCase_ = -1 , UpperCAmelCase_ = False , UpperCAmelCase_ = None , ): lowerCAmelCase = super(UpperCAmelCase_ , self ).generate_dummy_inputs( UpperCAmelCase_ , batch_size=UpperCAmelCase_ , seq_length=UpperCAmelCase_ , is_pair=UpperCAmelCase_ , framework=UpperCAmelCase_ ) # We need to order the input in the way they appears in the forward() lowerCAmelCase = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch lowerCAmelCase , lowerCAmelCase = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values lowerCAmelCase = seqlen + 2 lowerCAmelCase = self._config.hidden_size // self.num_attention_heads lowerCAmelCase = ( batch * self.num_attention_heads, head_dim, past_key_values_length, ) lowerCAmelCase = ( batch * self.num_attention_heads, past_key_values_length, head_dim, ) lowerCAmelCase = [ (torch.zeros(UpperCAmelCase_ ), torch.zeros(UpperCAmelCase_ )) for _ in range(self.num_layers ) ] lowerCAmelCase = common_inputs['''attention_mask'''] if self.use_past: lowerCAmelCase = ordered_inputs['''attention_mask'''].dtype lowerCAmelCase = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(UpperCAmelCase_ , UpperCAmelCase_ , dtype=UpperCAmelCase_ )] , dim=1 ) return ordered_inputs @property def __snake_case ( self ): return 13
33
from __future__ import annotations from typing import Generic, TypeVar UpperCAmelCase_ =TypeVar("""T""") class __UpperCamelCase ( Generic[T] ): '''simple docstring''' def __init__( self , UpperCAmelCase_ ): lowerCAmelCase = data lowerCAmelCase = self lowerCAmelCase = 0 class __UpperCamelCase ( Generic[T] ): '''simple docstring''' def __init__( self ): # map from node name to the node object lowerCAmelCase = {} def __snake_case ( self , UpperCAmelCase_ ): # create a new set with x as its member lowerCAmelCase = DisjointSetTreeNode(UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_ ): # find the set x belongs to (with path-compression) lowerCAmelCase = self.map[data] if elem_ref != elem_ref.parent: lowerCAmelCase = self.find_set(elem_ref.parent.data ) return elem_ref.parent def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ): # helper function for union operation if nodea.rank > nodea.rank: lowerCAmelCase = nodea else: lowerCAmelCase = nodea if nodea.rank == nodea.rank: nodea.rank += 1 def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ): # merge 2 disjoint sets self.link(self.find_set(UpperCAmelCase_ ) , self.find_set(UpperCAmelCase_ ) ) class __UpperCamelCase ( Generic[T] ): '''simple docstring''' def __init__( self ): # connections: map from the node to the neighbouring nodes (with weights) lowerCAmelCase = {} def __snake_case ( self , UpperCAmelCase_ ): # add a node ONLY if its not present in the graph if node not in self.connections: lowerCAmelCase = {} def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): # add an edge with the given weight self.add_node(UpperCAmelCase_ ) self.add_node(UpperCAmelCase_ ) lowerCAmelCase = weight lowerCAmelCase = weight def __snake_case ( self ): lowerCAmelCase = [] lowerCAmelCase = set() for start in self.connections: for end in self.connections[start]: if (start, end) not in seen: seen.add((end, start) ) edges.append((start, end, self.connections[start][end]) ) edges.sort(key=lambda UpperCAmelCase_ : x[2] ) # creating the disjoint set lowerCAmelCase = DisjointSetTree[T]() for node in self.connections: disjoint_set.make_set(UpperCAmelCase_ ) # MST generation lowerCAmelCase = 0 lowerCAmelCase = 0 lowerCAmelCase = GraphUndirectedWeighted[T]() while num_edges < len(self.connections ) - 1: lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = edges[index] index += 1 lowerCAmelCase = disjoint_set.find_set(UpperCAmelCase_ ) lowerCAmelCase = disjoint_set.find_set(UpperCAmelCase_ ) if parent_u != parent_v: num_edges += 1 graph.add_edge(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) disjoint_set.union(UpperCAmelCase_ , UpperCAmelCase_ ) return graph
33
1
import unittest from transformers import MPNetConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) class __UpperCamelCase : '''simple docstring''' def __init__( self , UpperCAmelCase_ , UpperCAmelCase_=13 , UpperCAmelCase_=7 , UpperCAmelCase_=True , UpperCAmelCase_=True , UpperCAmelCase_=False , UpperCAmelCase_=True , UpperCAmelCase_=99 , UpperCAmelCase_=64 , UpperCAmelCase_=5 , UpperCAmelCase_=4 , UpperCAmelCase_=64 , UpperCAmelCase_="gelu" , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=5_12 , UpperCAmelCase_=16 , UpperCAmelCase_=2 , UpperCAmelCase_=0.02 , UpperCAmelCase_=3 , UpperCAmelCase_=4 , UpperCAmelCase_=None , ): lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = seq_length lowerCAmelCase = is_training lowerCAmelCase = use_input_mask lowerCAmelCase = use_token_type_ids lowerCAmelCase = use_labels lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_act lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = max_position_embeddings lowerCAmelCase = type_vocab_size lowerCAmelCase = type_sequence_label_size lowerCAmelCase = initializer_range lowerCAmelCase = num_labels lowerCAmelCase = num_choices lowerCAmelCase = scope def __snake_case ( self ): return MPNetConfig.from_pretrained('''microsoft/mpnet-base''' ) def __snake_case ( self ): lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase = None if self.use_input_mask: lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase = None lowerCAmelCase = None lowerCAmelCase = None if self.use_labels: lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def __snake_case ( self ): return MPNetConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): lowerCAmelCase = MPNetModel(config=UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() lowerCAmelCase = model(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase = model(UpperCAmelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): lowerCAmelCase = MPNetForQuestionAnswering(config=UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() lowerCAmelCase = model( UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): lowerCAmelCase = self.num_labels lowerCAmelCase = MPNetForSequenceClassification(UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() lowerCAmelCase = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): lowerCAmelCase = self.num_choices lowerCAmelCase = MPNetForMultipleChoice(config=UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase = model( UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): lowerCAmelCase = self.num_labels lowerCAmelCase = MPNetForTokenClassification(config=UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() lowerCAmelCase = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __snake_case ( self ): lowerCAmelCase = self.prepare_config_and_inputs() ((lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase)) = config_and_inputs lowerCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): '''simple docstring''' __a : int =( ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) if is_torch_available() else () ) __a : Any =( { """feature-extraction""": MPNetModel, """fill-mask""": MPNetForMaskedLM, """question-answering""": MPNetForQuestionAnswering, """text-classification""": MPNetForSequenceClassification, """token-classification""": MPNetForTokenClassification, """zero-shot""": MPNetForSequenceClassification, } if is_torch_available() else {} ) __a : int =False __a : List[str] =True def __snake_case ( self ): lowerCAmelCase = MPNetModelTester(self ) lowerCAmelCase = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 ) def __snake_case ( self ): self.config_tester.run_common_tests() def __snake_case ( self ): lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_model(*UpperCAmelCase_ ) def __snake_case ( self ): lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_sequence_classification(*UpperCAmelCase_ ) def __snake_case ( self ): lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_multiple_choice(*UpperCAmelCase_ ) def __snake_case ( self ): lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_token_classification(*UpperCAmelCase_ ) def __snake_case ( self ): lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_question_answering(*UpperCAmelCase_ ) @require_torch class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' @slow def __snake_case ( self ): lowerCAmelCase = MPNetModel.from_pretrained('''microsoft/mpnet-base''' ) lowerCAmelCase = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] ) lowerCAmelCase = model(UpperCAmelCase_ )[0] lowerCAmelCase = torch.Size((1, 11, 7_68) ) self.assertEqual(output.shape , UpperCAmelCase_ ) lowerCAmelCase = torch.tensor( [[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] ) # compare the actual values for a slice. self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
33
def UpperCAmelCase ( _snake_case , _snake_case , _snake_case ): def count_of_possible_combinations(_snake_case ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(_snake_case ) def UpperCAmelCase ( _snake_case , _snake_case , _snake_case ): def count_of_possible_combinations_with_dp_array( _snake_case , _snake_case ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] lowerCAmelCase = sum( count_of_possible_combinations_with_dp_array(target - item , _snake_case ) for item in array ) lowerCAmelCase = answer return answer lowerCAmelCase = [-1] * (target + 1) return count_of_possible_combinations_with_dp_array(_snake_case , _snake_case ) def UpperCAmelCase ( _snake_case , _snake_case , _snake_case ): lowerCAmelCase = [0] * (target + 1) lowerCAmelCase = 1 for i in range(1 , target + 1 ): for j in range(_snake_case ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase_ =3 UpperCAmelCase_ =5 UpperCAmelCase_ =[1, 2, 5] print(combination_sum_iv(n, array, target))
33
1
import json import os import pickle import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers import is_faiss_available from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bart.tokenization_bart import BartTokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch if is_faiss_available(): import faiss @require_faiss class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' def __snake_case ( self ): lowerCAmelCase = tempfile.mkdtemp() lowerCAmelCase = 8 # DPR tok lowerCAmelCase = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] lowerCAmelCase = os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ ) lowerCAmelCase = os.path.join(UpperCAmelCase_ , DPR_VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) # BART tok lowerCAmelCase = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] lowerCAmelCase = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) ) lowerCAmelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] lowerCAmelCase = {'''unk_token''': '''<unk>'''} lowerCAmelCase = os.path.join(self.tmpdirname , '''bart_tokenizer''' ) os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ ) lowerCAmelCase = os.path.join(UpperCAmelCase_ , BART_VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCAmelCase = os.path.join(UpperCAmelCase_ , BART_VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(UpperCAmelCase_ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(UpperCAmelCase_ ) ) def __snake_case ( self ): return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) ) def __snake_case ( self ): return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) ) def __snake_case ( self ): return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) ) def __snake_case ( self ): shutil.rmtree(self.tmpdirname ) def __snake_case ( self ): lowerCAmelCase = Dataset.from_dict( { '''id''': ['''0''', '''1'''], '''text''': ['''foo''', '''bar'''], '''title''': ['''Foo''', '''Bar'''], '''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )], } ) dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT ) return dataset def __snake_case ( self ): lowerCAmelCase = self.get_dummy_dataset() lowerCAmelCase = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , ) with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset: lowerCAmelCase = dataset lowerCAmelCase = RagRetriever( UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) return retriever def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = self.get_dummy_dataset() lowerCAmelCase = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , ) if from_disk: lowerCAmelCase = os.path.join(self.tmpdirname , '''dataset''' ) lowerCAmelCase = os.path.join(self.tmpdirname , '''index.faiss''' ) dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) ) dataset.drop_index('''embeddings''' ) dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) ) del dataset lowerCAmelCase = RagRetriever( UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) else: lowerCAmelCase = RagRetriever( UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , UpperCAmelCase_ ) , ) return retriever def __snake_case ( self ): lowerCAmelCase = Dataset.from_dict( { '''id''': ['''0''', '''1'''], '''text''': ['''foo''', '''bar'''], '''title''': ['''Foo''', '''Bar'''], '''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )], } ) dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT ) lowerCAmelCase = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' ) dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' ) pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) ) lowerCAmelCase = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' ) lowerCAmelCase = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset} pickle.dump(UpperCAmelCase_ , open(UpperCAmelCase_ , '''wb''' ) ) lowerCAmelCase = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , ) lowerCAmelCase = RagRetriever( UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() ) return retriever def __snake_case ( self ): lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_canonical_hf_index_retriever() lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(UpperCAmelCase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , UpperCAmelCase_ ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def __snake_case ( self ): lowerCAmelCase = self.get_dummy_canonical_hf_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset: lowerCAmelCase = self.get_dummy_dataset() retriever.save_pretrained(UpperCAmelCase_ ) lowerCAmelCase = RagRetriever.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=1 ) self.assertTrue(out is not None ) def __snake_case ( self ): lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(UpperCAmelCase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , UpperCAmelCase_ ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def __snake_case ( self ): lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(UpperCAmelCase_ ) lowerCAmelCase = RagRetriever.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=1 ) self.assertTrue(out is not None ) def __snake_case ( self ): lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(UpperCAmelCase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , UpperCAmelCase_ ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def __snake_case ( self ): lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(UpperCAmelCase_ ) lowerCAmelCase = RagRetriever.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=1 ) self.assertTrue(out is not None ) def __snake_case ( self ): lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_legacy_index_retriever() lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(UpperCAmelCase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''text'''] ) , UpperCAmelCase_ ) self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def __snake_case ( self ): lowerCAmelCase = self.get_dummy_legacy_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(UpperCAmelCase_ ) lowerCAmelCase = RagRetriever.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=1 ) self.assertTrue(out is not None ) @require_torch @require_tokenizers @require_sentencepiece def __snake_case ( self ): import torch lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_canonical_hf_index_retriever() lowerCAmelCase = [[5, 7], [10, 11]] lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever(UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_ ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = ( out['''context_input_ids'''], out['''context_attention_mask'''], out['''retrieved_doc_embeds'''], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , np.ndarray ) lowerCAmelCase = retriever( UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_ , return_tensors='''pt''' , ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = ( # noqa: F841 out['''context_input_ids'''], out['''context_attention_mask'''], out['''retrieved_doc_embeds'''], out['''doc_ids'''], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(UpperCAmelCase_ , torch.Tensor ) self.assertIsInstance(UpperCAmelCase_ , torch.Tensor ) self.assertIsInstance(UpperCAmelCase_ , torch.Tensor ) @require_torch @require_tokenizers @require_sentencepiece def __snake_case ( self ): lowerCAmelCase = self.get_dpr_ctx_encoder_tokenizer() lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ ) retriever.set_ctx_encoder_tokenizer(UpperCAmelCase_ ) lowerCAmelCase = [[5, 7], [10, 11]] lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever(UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_ ) self.assertEqual( len(UpperCAmelCase_ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs self.assertEqual( all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , UpperCAmelCase_ ) # check for doc token related keys in dictionary.
33
import torch from diffusers import StableDiffusionPipeline UpperCAmelCase_ ="""path-to-your-trained-model""" UpperCAmelCase_ =StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("""cuda""") UpperCAmelCase_ ="""A photo of sks dog in a bucket""" UpperCAmelCase_ =pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0] image.save("""dog-bucket.png""")
33
1
import argparse import torch from torch import nn from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration def UpperCAmelCase ( _snake_case ): lowerCAmelCase = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''encoder.embed_positions._float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(_snake_case , _snake_case ) def UpperCAmelCase ( _snake_case ): lowerCAmelCase , lowerCAmelCase = emb.weight.shape lowerCAmelCase = nn.Linear(_snake_case , _snake_case , bias=_snake_case ) lowerCAmelCase = emb.weight.data return lin_layer def UpperCAmelCase ( _snake_case ): lowerCAmelCase = torch.load(_snake_case , map_location='''cpu''' ) lowerCAmelCase = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model'''] lowerCAmelCase = mam_aaa['''model'''] remove_ignore_keys_(_snake_case ) lowerCAmelCase = state_dict['''encoder.embed_tokens.weight'''].shape[0] lowerCAmelCase = MaMaaaConfig( vocab_size=_snake_case , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , ) lowerCAmelCase = state_dict['''decoder.embed_tokens.weight'''] lowerCAmelCase = MaMaaaForConditionalGeneration(_snake_case ) model.model.load_state_dict(_snake_case , strict=_snake_case ) lowerCAmelCase = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": UpperCAmelCase_ =argparse.ArgumentParser() # Required parameters parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""") parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") UpperCAmelCase_ =parser.parse_args() UpperCAmelCase_ =convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß) model.save_pretrained(args.pytorch_dump_folder_path)
33
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase_ ={ """configuration_jukebox""": [ """JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """JukeboxConfig""", """JukeboxPriorConfig""", """JukeboxVQVAEConfig""", ], """tokenization_jukebox""": ["""JukeboxTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ =[ """JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST""", """JukeboxModel""", """JukeboxPreTrainedModel""", """JukeboxVQVAE""", """JukeboxPrior""", ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys UpperCAmelCase_ =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
33
1
# Note: if you intend to run this script make sure you look under scripts/fsmt/ # to locate the appropriate script to do the work correctly. There is a set of scripts to: # - download and prepare data and run the conversion script # - perform eval to get the best hparam into the config # - generate model_cards - useful if you have multiple models from the same paper import argparse import json import os import re from collections import OrderedDict from os.path import basename, dirname import fairseq import torch from fairseq import hub_utils from fairseq.data.dictionary import Dictionary from transformers import FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE from transformers.utils import WEIGHTS_NAME, logging logging.set_verbosity_warning() UpperCAmelCase_ =2 # based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping` # values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults: # # * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users) # * `early_stopping`: `False` consistently scored better # * `length_penalty` varied, so will assign the best one depending on the model UpperCAmelCase_ ={ # fairseq: """wmt19-ru-en""": {"""length_penalty""": 1.1}, """wmt19-en-ru""": {"""length_penalty""": 1.15}, """wmt19-en-de""": {"""length_penalty""": 1.0}, """wmt19-de-en""": {"""length_penalty""": 1.1}, # allenai: """wmt16-en-de-dist-12-1""": {"""length_penalty""": 0.6}, """wmt16-en-de-dist-6-1""": {"""length_penalty""": 0.6}, """wmt16-en-de-12-1""": {"""length_penalty""": 0.8}, """wmt19-de-en-6-6-base""": {"""length_penalty""": 0.6}, """wmt19-de-en-6-6-big""": {"""length_penalty""": 0.6}, } # this remaps the different models to their organization names UpperCAmelCase_ ={} for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: UpperCAmelCase_ ="""facebook""" for m in [ "wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1", "wmt19-de-en-6-6-base", "wmt19-de-en-6-6-big", ]: UpperCAmelCase_ ="""allenai""" def UpperCAmelCase ( _snake_case ): # (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up, # e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7} lowerCAmelCase = dict((re.sub(R'''@@$''' , '''''' , _snake_case ), v) if k.endswith('''@@''' ) else (re.sub(R'''$''' , '''</w>''' , _snake_case ), v) for k, v in d.items() ) lowerCAmelCase = '''<s> <pad> </s> <unk>'''.split() # restore the special tokens for k in keep_keys: del da[F"""{k}</w>"""] lowerCAmelCase = d[k] # restore return da def UpperCAmelCase ( _snake_case , _snake_case ): # prep assert os.path.exists(_snake_case ) os.makedirs(_snake_case , exist_ok=_snake_case ) print(F"""Writing results to {pytorch_dump_folder_path}""" ) # handle various types of models lowerCAmelCase = basename(_snake_case ) lowerCAmelCase = dirname(_snake_case ) lowerCAmelCase = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel lowerCAmelCase = cls.hub_models() lowerCAmelCase = {'''bpe''': '''fastbpe''', '''tokenizer''': '''moses'''} lowerCAmelCase = '''.''' # note: since the model dump is old, fairseq has upgraded its model some # time later, and it does a whole lot of rewrites and splits on the saved # weights, therefore we can't use torch.load() directly on the model file. # see: upgrade_state_dict(state_dict) in fairseq_model.py print(F"""using checkpoint {checkpoint_file}""" ) lowerCAmelCase = hub_utils.from_pretrained( _snake_case , _snake_case , _snake_case , archive_map=_snake_case , **_snake_case ) lowerCAmelCase = vars(chkpt['''args''']['''model'''] ) lowerCAmelCase = args['''source_lang'''] lowerCAmelCase = args['''target_lang'''] lowerCAmelCase = dirname(_snake_case ) lowerCAmelCase = basename(_snake_case ) # dicts lowerCAmelCase = os.path.join(_snake_case , F"""dict.{src_lang}.txt""" ) lowerCAmelCase = os.path.join(_snake_case , F"""dict.{tgt_lang}.txt""" ) lowerCAmelCase = Dictionary.load(_snake_case ) lowerCAmelCase = rewrite_dict_keys(src_dict.indices ) lowerCAmelCase = len(_snake_case ) lowerCAmelCase = os.path.join(_snake_case , '''vocab-src.json''' ) print(F"""Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records""" ) with open(_snake_case , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(_snake_case , ensure_ascii=_snake_case , indent=_snake_case ) ) # detect whether this is a do_lower_case situation, which can be derived by checking whether we # have at least one uppercase letter in the source vocab lowerCAmelCase = True for k in src_vocab.keys(): if not k.islower(): lowerCAmelCase = False break lowerCAmelCase = Dictionary.load(_snake_case ) lowerCAmelCase = rewrite_dict_keys(tgt_dict.indices ) lowerCAmelCase = len(_snake_case ) lowerCAmelCase = os.path.join(_snake_case , '''vocab-tgt.json''' ) print(F"""Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records""" ) with open(_snake_case , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(_snake_case , ensure_ascii=_snake_case , indent=_snake_case ) ) # merges_file (bpecodes) lowerCAmelCase = os.path.join(_snake_case , VOCAB_FILES_NAMES['''merges_file'''] ) for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code" lowerCAmelCase = os.path.join(_snake_case , _snake_case ) if os.path.exists(_snake_case ): break with open(_snake_case , encoding='''utf-8''' ) as fin: lowerCAmelCase = fin.read() lowerCAmelCase = re.sub(R''' \d+$''' , '''''' , _snake_case , 0 , re.M ) # remove frequency number print(F"""Generating {merges_file}""" ) with open(_snake_case , '''w''' , encoding='''utf-8''' ) as fout: fout.write(_snake_case ) # model config lowerCAmelCase = os.path.join(_snake_case , '''config.json''' ) # validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe - # may have to modify the tokenizer if a different type is used by a future model assert args["bpe"] == "fastbpe", F"""need to extend tokenizer to support bpe={args["bpe"]}""" assert args["tokenizer"] == "moses", F"""need to extend tokenizer to support bpe={args["tokenizer"]}""" lowerCAmelCase = { '''architectures''': ['''FSMTForConditionalGeneration'''], '''model_type''': '''fsmt''', '''activation_dropout''': args['''activation_dropout'''], '''activation_function''': '''relu''', '''attention_dropout''': args['''attention_dropout'''], '''d_model''': args['''decoder_embed_dim'''], '''dropout''': args['''dropout'''], '''init_std''': 0.02, '''max_position_embeddings''': args['''max_source_positions'''], '''num_hidden_layers''': args['''encoder_layers'''], '''src_vocab_size''': src_vocab_size, '''tgt_vocab_size''': tgt_vocab_size, '''langs''': [src_lang, tgt_lang], '''encoder_attention_heads''': args['''encoder_attention_heads'''], '''encoder_ffn_dim''': args['''encoder_ffn_embed_dim'''], '''encoder_layerdrop''': args['''encoder_layerdrop'''], '''encoder_layers''': args['''encoder_layers'''], '''decoder_attention_heads''': args['''decoder_attention_heads'''], '''decoder_ffn_dim''': args['''decoder_ffn_embed_dim'''], '''decoder_layerdrop''': args['''decoder_layerdrop'''], '''decoder_layers''': args['''decoder_layers'''], '''bos_token_id''': 0, '''pad_token_id''': 1, '''eos_token_id''': 2, '''is_encoder_decoder''': True, '''scale_embedding''': not args['''no_scale_embedding'''], '''tie_word_embeddings''': args['''share_all_embeddings'''], } # good hparam defaults to start with lowerCAmelCase = 5 lowerCAmelCase = False if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]: lowerCAmelCase = best_score_hparams[model_dir]['''length_penalty'''] else: lowerCAmelCase = 1.0 print(F"""Generating {fsmt_model_config_file}""" ) with open(_snake_case , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(_snake_case , ensure_ascii=_snake_case , indent=_snake_case ) ) # tokenizer config lowerCAmelCase = os.path.join(_snake_case , _snake_case ) lowerCAmelCase = { '''langs''': [src_lang, tgt_lang], '''model_max_length''': 1024, '''do_lower_case''': do_lower_case, } print(F"""Generating {fsmt_tokenizer_config_file}""" ) with open(_snake_case , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(_snake_case , ensure_ascii=_snake_case , indent=_snake_case ) ) # model lowerCAmelCase = chkpt['''models'''][0] lowerCAmelCase = model.state_dict() # rename keys to start with 'model.' lowerCAmelCase = OrderedDict(('''model.''' + k, v) for k, v in model_state_dict.items() ) # remove unneeded keys lowerCAmelCase = [ '''model.model''', '''model.encoder.version''', '''model.decoder.version''', '''model.encoder_embed_tokens.weight''', '''model.decoder_embed_tokens.weight''', '''model.encoder.embed_positions._float_tensor''', '''model.decoder.embed_positions._float_tensor''', ] for k in ignore_keys: model_state_dict.pop(_snake_case , _snake_case ) lowerCAmelCase = FSMTConfig.from_pretrained(_snake_case ) lowerCAmelCase = FSMTForConditionalGeneration(_snake_case ) # check that it loads ok model_new.load_state_dict(_snake_case , strict=_snake_case ) # save lowerCAmelCase = os.path.join(_snake_case , _snake_case ) print(F"""Generating {pytorch_weights_dump_path}""" ) torch.save(_snake_case , _snake_case ) print('''Conversion is done!''' ) print('''\nLast step is to upload the files to s3''' ) print(F"""cd {data_root}""" ) print(F"""transformers-cli upload {model_dir}""" ) if __name__ == "__main__": UpperCAmelCase_ =argparse.ArgumentParser() # Required parameters parser.add_argument( """--fsmt_checkpoint_path""", default=None, type=str, required=True, help=( """Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,""" """ bpecodes, etc.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) UpperCAmelCase_ =parser.parse_args() convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
33
import json import os import pickle import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers import is_faiss_available from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bart.tokenization_bart import BartTokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch if is_faiss_available(): import faiss @require_faiss class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' def __snake_case ( self ): lowerCAmelCase = tempfile.mkdtemp() lowerCAmelCase = 8 # DPR tok lowerCAmelCase = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] lowerCAmelCase = os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ ) lowerCAmelCase = os.path.join(UpperCAmelCase_ , DPR_VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) # BART tok lowerCAmelCase = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] lowerCAmelCase = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) ) lowerCAmelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] lowerCAmelCase = {'''unk_token''': '''<unk>'''} lowerCAmelCase = os.path.join(self.tmpdirname , '''bart_tokenizer''' ) os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ ) lowerCAmelCase = os.path.join(UpperCAmelCase_ , BART_VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCAmelCase = os.path.join(UpperCAmelCase_ , BART_VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(UpperCAmelCase_ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(UpperCAmelCase_ ) ) def __snake_case ( self ): return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) ) def __snake_case ( self ): return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) ) def __snake_case ( self ): return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) ) def __snake_case ( self ): shutil.rmtree(self.tmpdirname ) def __snake_case ( self ): lowerCAmelCase = Dataset.from_dict( { '''id''': ['''0''', '''1'''], '''text''': ['''foo''', '''bar'''], '''title''': ['''Foo''', '''Bar'''], '''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )], } ) dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT ) return dataset def __snake_case ( self ): lowerCAmelCase = self.get_dummy_dataset() lowerCAmelCase = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , ) with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset: lowerCAmelCase = dataset lowerCAmelCase = RagRetriever( UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) return retriever def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = self.get_dummy_dataset() lowerCAmelCase = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , ) if from_disk: lowerCAmelCase = os.path.join(self.tmpdirname , '''dataset''' ) lowerCAmelCase = os.path.join(self.tmpdirname , '''index.faiss''' ) dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) ) dataset.drop_index('''embeddings''' ) dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) ) del dataset lowerCAmelCase = RagRetriever( UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) else: lowerCAmelCase = RagRetriever( UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , UpperCAmelCase_ ) , ) return retriever def __snake_case ( self ): lowerCAmelCase = Dataset.from_dict( { '''id''': ['''0''', '''1'''], '''text''': ['''foo''', '''bar'''], '''title''': ['''Foo''', '''Bar'''], '''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )], } ) dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT ) lowerCAmelCase = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' ) dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' ) pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) ) lowerCAmelCase = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' ) lowerCAmelCase = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset} pickle.dump(UpperCAmelCase_ , open(UpperCAmelCase_ , '''wb''' ) ) lowerCAmelCase = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , ) lowerCAmelCase = RagRetriever( UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() ) return retriever def __snake_case ( self ): lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_canonical_hf_index_retriever() lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(UpperCAmelCase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , UpperCAmelCase_ ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def __snake_case ( self ): lowerCAmelCase = self.get_dummy_canonical_hf_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset: lowerCAmelCase = self.get_dummy_dataset() retriever.save_pretrained(UpperCAmelCase_ ) lowerCAmelCase = RagRetriever.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=1 ) self.assertTrue(out is not None ) def __snake_case ( self ): lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(UpperCAmelCase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , UpperCAmelCase_ ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def __snake_case ( self ): lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(UpperCAmelCase_ ) lowerCAmelCase = RagRetriever.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=1 ) self.assertTrue(out is not None ) def __snake_case ( self ): lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(UpperCAmelCase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , UpperCAmelCase_ ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def __snake_case ( self ): lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(UpperCAmelCase_ ) lowerCAmelCase = RagRetriever.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=1 ) self.assertTrue(out is not None ) def __snake_case ( self ): lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_legacy_index_retriever() lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(UpperCAmelCase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''text'''] ) , UpperCAmelCase_ ) self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def __snake_case ( self ): lowerCAmelCase = self.get_dummy_legacy_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(UpperCAmelCase_ ) lowerCAmelCase = RagRetriever.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=1 ) self.assertTrue(out is not None ) @require_torch @require_tokenizers @require_sentencepiece def __snake_case ( self ): import torch lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_canonical_hf_index_retriever() lowerCAmelCase = [[5, 7], [10, 11]] lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever(UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_ ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = ( out['''context_input_ids'''], out['''context_attention_mask'''], out['''retrieved_doc_embeds'''], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , np.ndarray ) lowerCAmelCase = retriever( UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_ , return_tensors='''pt''' , ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = ( # noqa: F841 out['''context_input_ids'''], out['''context_attention_mask'''], out['''retrieved_doc_embeds'''], out['''doc_ids'''], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(UpperCAmelCase_ , torch.Tensor ) self.assertIsInstance(UpperCAmelCase_ , torch.Tensor ) self.assertIsInstance(UpperCAmelCase_ , torch.Tensor ) @require_torch @require_tokenizers @require_sentencepiece def __snake_case ( self ): lowerCAmelCase = self.get_dpr_ctx_encoder_tokenizer() lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ ) retriever.set_ctx_encoder_tokenizer(UpperCAmelCase_ ) lowerCAmelCase = [[5, 7], [10, 11]] lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever(UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_ ) self.assertEqual( len(UpperCAmelCase_ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs self.assertEqual( all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , UpperCAmelCase_ ) # check for doc token related keys in dictionary.
33
1
import os import pytest from transformers.dynamic_module_utils import get_imports UpperCAmelCase_ =""" import os """ UpperCAmelCase_ =""" def foo(): import os return False """ UpperCAmelCase_ =""" def foo(): def bar(): if True: import os return False return bar() """ UpperCAmelCase_ =""" import os try: import bar except ImportError: raise ValueError() """ UpperCAmelCase_ =""" import os def foo(): try: import bar except ImportError: raise ValueError() """ UpperCAmelCase_ =""" import os try: import bar except (ImportError, AttributeError): raise ValueError() """ UpperCAmelCase_ =""" import os try: import bar except ImportError as e: raise ValueError() """ UpperCAmelCase_ =""" import os try: import bar except: raise ValueError() """ UpperCAmelCase_ =""" import os try: import bar import baz except ImportError: raise ValueError() """ UpperCAmelCase_ =""" import os try: import bar import baz except ImportError: x = 1 raise ValueError() """ UpperCAmelCase_ =[ TOP_LEVEL_IMPORT, IMPORT_IN_FUNCTION, DEEPLY_NESTED_IMPORT, TOP_LEVEL_TRY_IMPORT, GENERIC_EXCEPT_IMPORT, MULTILINE_TRY_IMPORT, MULTILINE_BOTH_IMPORT, MULTIPLE_EXCEPTS_IMPORT, EXCEPT_AS_IMPORT, TRY_IMPORT_IN_FUNCTION, ] @pytest.mark.parametrize('''case''' , _snake_case ) def UpperCAmelCase ( _snake_case , _snake_case ): lowerCAmelCase = os.path.join(_snake_case , '''test_file.py''' ) with open(_snake_case , '''w''' ) as _tmp_file: _tmp_file.write(_snake_case ) lowerCAmelCase = get_imports(_snake_case ) assert parsed_imports == ["os"]
33
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ =logging.get_logger(__name__) UpperCAmelCase_ ={ """google/switch-base-8""": """https://huggingface.co/google/switch-base-8/blob/main/config.json""", } class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : Any ="""switch_transformers""" __a : Union[str, Any] =["""past_key_values"""] __a : Dict ={"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""} def __init__( self , UpperCAmelCase_=3_21_28 , UpperCAmelCase_=7_68 , UpperCAmelCase_=64 , UpperCAmelCase_=20_48 , UpperCAmelCase_=64 , UpperCAmelCase_=12 , UpperCAmelCase_=3 , UpperCAmelCase_=12 , UpperCAmelCase_=3 , UpperCAmelCase_=12 , UpperCAmelCase_=8 , UpperCAmelCase_=False , UpperCAmelCase_=0.01 , UpperCAmelCase_="float32" , UpperCAmelCase_=False , UpperCAmelCase_=32 , UpperCAmelCase_=1_28 , UpperCAmelCase_=0.1 , UpperCAmelCase_=1E-6 , UpperCAmelCase_=0.001 , UpperCAmelCase_=0.001 , UpperCAmelCase_=1.0 , UpperCAmelCase_="relu" , UpperCAmelCase_=True , UpperCAmelCase_=False , UpperCAmelCase_=True , UpperCAmelCase_=0 , UpperCAmelCase_=1 , **UpperCAmelCase_ , ): lowerCAmelCase = vocab_size lowerCAmelCase = d_model lowerCAmelCase = d_kv lowerCAmelCase = d_ff lowerCAmelCase = num_sparse_encoder_layers lowerCAmelCase = num_layers lowerCAmelCase = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry lowerCAmelCase = num_sparse_decoder_layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_encoder_layers > 0: lowerCAmelCase = self.num_layers // self.num_sparse_encoder_layers else: lowerCAmelCase = self.num_layers # HACK: this will create 0 sparse layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_decoder_layers > 0: lowerCAmelCase = self.num_decoder_layers // self.num_sparse_decoder_layers else: lowerCAmelCase = self.num_decoder_layers # HACK: this will create 0 sparse layers lowerCAmelCase = num_heads lowerCAmelCase = num_experts lowerCAmelCase = expert_capacity lowerCAmelCase = router_bias lowerCAmelCase = router_jitter_noise if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(F"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" ) lowerCAmelCase = router_dtype lowerCAmelCase = router_ignore_padding_tokens lowerCAmelCase = relative_attention_num_buckets lowerCAmelCase = relative_attention_max_distance lowerCAmelCase = dropout_rate lowerCAmelCase = layer_norm_epsilon lowerCAmelCase = initializer_factor lowerCAmelCase = feed_forward_proj lowerCAmelCase = use_cache lowerCAmelCase = add_router_probs lowerCAmelCase = router_z_loss_coef lowerCAmelCase = router_aux_loss_coef lowerCAmelCase = self.feed_forward_proj.split('''-''' ) lowerCAmelCase = act_info[-1] lowerCAmelCase = act_info[0] == '''gated''' if len(UpperCAmelCase_ ) > 1 and act_info[0] != "gated" or len(UpperCAmelCase_ ) > 2: raise ValueError( F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.""" '''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ''' '''\'gated-gelu\' or \'relu\'''' ) # for backwards compatibility if feed_forward_proj == "gated-gelu": lowerCAmelCase = '''gelu_new''' super().__init__( pad_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , **UpperCAmelCase_ , )
33
1
from bisect import bisect from itertools import accumulate def UpperCAmelCase ( _snake_case , _snake_case , _snake_case , _snake_case ): lowerCAmelCase = sorted(zip(_snake_case , _snake_case ) , key=lambda _snake_case : x[0] / x[1] , reverse=_snake_case ) lowerCAmelCase , lowerCAmelCase = [i[0] for i in r], [i[1] for i in r] lowerCAmelCase = list(accumulate(_snake_case ) ) lowerCAmelCase = bisect(_snake_case , _snake_case ) return ( 0 if k == 0 else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k]) if k != n else sum(vl[:k] ) ) if __name__ == "__main__": import doctest doctest.testmod()
33
from __future__ import annotations import math from collections import Counter from string import ascii_lowercase def UpperCAmelCase ( _snake_case ): lowerCAmelCase , lowerCAmelCase = analyze_text(_snake_case ) lowerCAmelCase = list(''' ''' + ascii_lowercase ) # what is our total sum of probabilities. lowerCAmelCase = sum(single_char_strings.values() ) # one length string lowerCAmelCase = 0 # for each alpha we go in our dict and if it is in it we calculate entropy for ch in my_alphas: if ch in single_char_strings: lowerCAmelCase = single_char_strings[ch] lowerCAmelCase = my_str / all_sum my_fir_sum += prob * math.loga(_snake_case ) # entropy formula. # print entropy print(F"""{round(-1 * my_fir_sum ):.1f}""" ) # two len string lowerCAmelCase = sum(two_char_strings.values() ) lowerCAmelCase = 0 # for each alpha (two in size) calculate entropy. for cha in my_alphas: for cha in my_alphas: lowerCAmelCase = cha + cha if sequence in two_char_strings: lowerCAmelCase = two_char_strings[sequence] lowerCAmelCase = int(_snake_case ) / all_sum my_sec_sum += prob * math.loga(_snake_case ) # print second entropy print(F"""{round(-1 * my_sec_sum ):.1f}""" ) # print the difference between them print(F"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" ) def UpperCAmelCase ( _snake_case ): lowerCAmelCase = Counter() # type: ignore lowerCAmelCase = Counter() # type: ignore single_char_strings[text[-1]] += 1 # first case when we have space at start. two_char_strings[" " + text[0]] += 1 for i in range(0 , len(_snake_case ) - 1 ): single_char_strings[text[i]] += 1 two_char_strings[text[i : i + 2]] += 1 return single_char_strings, two_char_strings def UpperCAmelCase ( ): import doctest doctest.testmod() # text = ( # "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark " # "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest " # "jointure saw horrible. He private he on be imagine suppose. Fertile " # "beloved evident through no service elderly is. Blind there if every no so " # "at. Own neglected you preferred way sincerity delivered his attempted. To " # "of message cottage windows do besides against uncivil. Delightful " # "unreserved impossible few estimating men favourable see entreaties. She " # "propriety immediate was improving. He or entrance humoured likewise " # "moderate. Much nor game son say feel. Fat make met can must form into " # "gate. Me we offending prevailed discovery. " # ) # calculate_prob(text) if __name__ == "__main__": main()
33
1
from __future__ import annotations from typing import Any class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' pass class __UpperCamelCase : '''simple docstring''' def __init__( self , UpperCAmelCase_ ): lowerCAmelCase = data lowerCAmelCase = None def __iter__( self ): lowerCAmelCase = self lowerCAmelCase = [] while node: if node in visited: raise ContainsLoopError visited.append(UpperCAmelCase_ ) yield node.data lowerCAmelCase = node.next_node @property def __snake_case ( self ): try: list(self ) return False except ContainsLoopError: return True if __name__ == "__main__": UpperCAmelCase_ =Node(1) UpperCAmelCase_ =Node(2) UpperCAmelCase_ =Node(3) UpperCAmelCase_ =Node(4) print(root_node.has_loop) # False UpperCAmelCase_ =root_node.next_node print(root_node.has_loop) # True UpperCAmelCase_ =Node(5) UpperCAmelCase_ =Node(6) UpperCAmelCase_ =Node(5) UpperCAmelCase_ =Node(6) print(root_node.has_loop) # False UpperCAmelCase_ =Node(1) print(root_node.has_loop) # False
33
import random import unittest import torch from diffusers import IFInpaintingSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class __UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): '''simple docstring''' __a : Tuple =IFInpaintingSuperResolutionPipeline __a : Dict =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""} __a : int =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""} ) __a : Union[str, Any] =PipelineTesterMixin.required_optional_params - {"""latents"""} def __snake_case ( self ): return self._get_superresolution_dummy_components() def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_=0 ): if str(UpperCAmelCase_ ).startswith('''mps''' ): lowerCAmelCase = torch.manual_seed(UpperCAmelCase_ ) else: lowerCAmelCase = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ ) lowerCAmelCase = floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ ) lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ ) lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ ) lowerCAmelCase = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''original_image''': original_image, '''mask_image''': mask_image, '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def __snake_case ( self ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def __snake_case ( self ): self._test_save_load_optional_components() @unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' ) def __snake_case ( self ): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def __snake_case ( self ): self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def __snake_case ( self ): self._test_save_load_local() def __snake_case ( self ): self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
33
1
import argparse import glob import importlib.util import os import re import black from doc_builder.style_doc import style_docstrings_in_code # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py UpperCAmelCase_ ="""src/diffusers""" UpperCAmelCase_ =""".""" # This is to make sure the diffusers module imported is the one in the repo. UpperCAmelCase_ =importlib.util.spec_from_file_location( """diffusers""", os.path.join(DIFFUSERS_PATH, """__init__.py"""), submodule_search_locations=[DIFFUSERS_PATH], ) UpperCAmelCase_ =spec.loader.load_module() def UpperCAmelCase ( _snake_case , _snake_case ): return line.startswith(_snake_case ) or len(_snake_case ) <= 1 or re.search(R'''^\s*\)(\s*->.*:|:)\s*$''' , _snake_case ) is not None def UpperCAmelCase ( _snake_case ): lowerCAmelCase = object_name.split('''.''' ) lowerCAmelCase = 0 # First let's find the module where our object lives. lowerCAmelCase = parts[i] while i < len(_snake_case ) and not os.path.isfile(os.path.join(_snake_case , F"""{module}.py""" ) ): i += 1 if i < len(_snake_case ): lowerCAmelCase = os.path.join(_snake_case , parts[i] ) if i >= len(_snake_case ): raise ValueError(F"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" ) with open(os.path.join(_snake_case , F"""{module}.py""" ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: lowerCAmelCase = f.readlines() # Now let's find the class / func in the code! lowerCAmelCase = '''''' lowerCAmelCase = 0 for name in parts[i + 1 :]: while ( line_index < len(_snake_case ) and re.search(RF"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None ): line_index += 1 indent += " " line_index += 1 if line_index >= len(_snake_case ): raise ValueError(F""" {object_name} does not match any function or class in {module}.""" ) # We found the beginning of the class / func, now let's find the end (when the indent diminishes). lowerCAmelCase = line_index while line_index < len(_snake_case ) and _should_continue(lines[line_index] , _snake_case ): line_index += 1 # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 lowerCAmelCase = lines[start_index:line_index] return "".join(_snake_case ) UpperCAmelCase_ =re.compile(R"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""") UpperCAmelCase_ =re.compile(R"""^\s*(\S+)->(\S+)(\s+.*|$)""") UpperCAmelCase_ =re.compile(R"""<FILL\s+[^>]*>""") def UpperCAmelCase ( _snake_case ): lowerCAmelCase = code.split('''\n''' ) lowerCAmelCase = 0 while idx < len(_snake_case ) and len(lines[idx] ) == 0: idx += 1 if idx < len(_snake_case ): return re.search(R'''^(\s*)\S''' , lines[idx] ).groups()[0] return "" def UpperCAmelCase ( _snake_case ): lowerCAmelCase = len(get_indent(_snake_case ) ) > 0 if has_indent: lowerCAmelCase = F"""class Bla:\n{code}""" lowerCAmelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=_snake_case ) lowerCAmelCase = black.format_str(_snake_case , mode=_snake_case ) lowerCAmelCase , lowerCAmelCase = style_docstrings_in_code(_snake_case ) return result[len('''class Bla:\n''' ) :] if has_indent else result def UpperCAmelCase ( _snake_case , _snake_case=False ): with open(_snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: lowerCAmelCase = f.readlines() lowerCAmelCase = [] lowerCAmelCase = 0 # Not a for loop cause `lines` is going to change (if `overwrite=True`). while line_index < len(_snake_case ): lowerCAmelCase = _re_copy_warning.search(lines[line_index] ) if search is None: line_index += 1 continue # There is some copied code here, let's retrieve the original. lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = search.groups() lowerCAmelCase = find_code_in_diffusers(_snake_case ) lowerCAmelCase = get_indent(_snake_case ) lowerCAmelCase = line_index + 1 if indent == theoretical_indent else line_index + 2 lowerCAmelCase = theoretical_indent lowerCAmelCase = start_index # Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment. lowerCAmelCase = True while line_index < len(_snake_case ) and should_continue: line_index += 1 if line_index >= len(_snake_case ): break lowerCAmelCase = lines[line_index] lowerCAmelCase = _should_continue(_snake_case , _snake_case ) and re.search(F"""^{indent}# End copy""" , _snake_case ) is None # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 lowerCAmelCase = lines[start_index:line_index] lowerCAmelCase = ''''''.join(_snake_case ) # Remove any nested `Copied from` comments to avoid circular copies lowerCAmelCase = [line for line in theoretical_code.split('''\n''' ) if _re_copy_warning.search(_snake_case ) is None] lowerCAmelCase = '''\n'''.join(_snake_case ) # Before comparing, use the `replace_pattern` on the original code. if len(_snake_case ) > 0: lowerCAmelCase = replace_pattern.replace('''with''' , '''''' ).split(''',''' ) lowerCAmelCase = [_re_replace_pattern.search(_snake_case ) for p in patterns] for pattern in patterns: if pattern is None: continue lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = pattern.groups() lowerCAmelCase = re.sub(_snake_case , _snake_case , _snake_case ) if option.strip() == "all-casing": lowerCAmelCase = re.sub(obja.lower() , obja.lower() , _snake_case ) lowerCAmelCase = re.sub(obja.upper() , obja.upper() , _snake_case ) # Blackify after replacement. To be able to do that, we need the header (class or function definition) # from the previous line lowerCAmelCase = blackify(lines[start_index - 1] + theoretical_code ) lowerCAmelCase = theoretical_code[len(lines[start_index - 1] ) :] # Test for a diff and act accordingly. if observed_code != theoretical_code: diffs.append([object_name, start_index] ) if overwrite: lowerCAmelCase = lines[:start_index] + [theoretical_code] + lines[line_index:] lowerCAmelCase = start_index + 1 if overwrite and len(_snake_case ) > 0: # Warn the user a file has been modified. print(F"""Detected changes, rewriting {filename}.""" ) with open(_snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(_snake_case ) return diffs def UpperCAmelCase ( _snake_case = False ): lowerCAmelCase = glob.glob(os.path.join(_snake_case , '''**/*.py''' ) , recursive=_snake_case ) lowerCAmelCase = [] for filename in all_files: lowerCAmelCase = is_copy_consistent(_snake_case , _snake_case ) diffs += [F"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs] if not overwrite and len(_snake_case ) > 0: lowerCAmelCase = '''\n'''.join(_snake_case ) raise Exception( '''Found the following copy inconsistencies:\n''' + diff + '''\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.''' ) if __name__ == "__main__": UpperCAmelCase_ =argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") UpperCAmelCase_ =parser.parse_args() check_copies(args.fix_and_overwrite)
33
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) UpperCAmelCase_ ={ """configuration_efficientformer""": [ """EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """EfficientFormerConfig""", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ =["""EfficientFormerImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ =[ """EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """EfficientFormerForImageClassification""", """EfficientFormerForImageClassificationWithTeacher""", """EfficientFormerModel""", """EfficientFormerPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ =[ """TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFEfficientFormerForImageClassification""", """TFEfficientFormerForImageClassificationWithTeacher""", """TFEfficientFormerModel""", """TFEfficientFormerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientformer import EfficientFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientformer import ( EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientFormerForImageClassification, EfficientFormerForImageClassificationWithTeacher, EfficientFormerModel, EfficientFormerPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, TFEfficientFormerPreTrainedModel, ) else: import sys UpperCAmelCase_ =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
33
1
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCAmelCase_ =logging.get_logger(__name__) UpperCAmelCase_ ="""▁""" UpperCAmelCase_ ={"""vocab_file""": """sentencepiece.bpe.model"""} UpperCAmelCase_ ={ """vocab_file""": { """facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model""", } } UpperCAmelCase_ ={ """facebook/xglm-564M""": 2048, } class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : Optional[Any] =VOCAB_FILES_NAMES __a : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP __a : Optional[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __a : str =["""input_ids""", """attention_mask"""] def __init__( self , UpperCAmelCase_ , UpperCAmelCase_="<s>" , UpperCAmelCase_="</s>" , UpperCAmelCase_="</s>" , UpperCAmelCase_="<s>" , UpperCAmelCase_="<unk>" , UpperCAmelCase_="<pad>" , UpperCAmelCase_ = None , **UpperCAmelCase_ , ): lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs # Compatibility with the original tokenizer lowerCAmelCase = 7 lowerCAmelCase = [F"""<madeupword{i}>""" for i in range(self.num_madeup_words )] lowerCAmelCase = kwargs.get('''additional_special_tokens''' , [] ) kwargs["additional_special_tokens"] += [ word for word in madeup_words if word not in kwargs["additional_special_tokens"] ] super().__init__( bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , ) lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(UpperCAmelCase_ ) ) lowerCAmelCase = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab lowerCAmelCase = 1 # Mimic fairseq token-to-id alignment for the first 4 token lowerCAmelCase = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3} lowerCAmelCase = len(self.sp_model ) lowerCAmelCase = {F"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )} self.fairseq_tokens_to_ids.update(UpperCAmelCase_ ) lowerCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ): lowerCAmelCase = self.__dict__.copy() lowerCAmelCase = None lowerCAmelCase = self.sp_model.serialized_model_proto() return state def __setstate__( self , UpperCAmelCase_ ): lowerCAmelCase = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): lowerCAmelCase = {} lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ): if token_ids_a is None: return [self.sep_token_id] + token_ids_a lowerCAmelCase = [self.sep_token_id] return sep + token_ids_a + sep + sep + token_ids_a def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ ) if token_ids_a is None: return [1] + ([0] * len(UpperCAmelCase_ )) return [1] + ([0] * len(UpperCAmelCase_ )) + [1, 1] + ([0] * len(UpperCAmelCase_ )) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ): lowerCAmelCase = [self.sep_token_id] if token_ids_a is None: return len(sep + token_ids_a ) * [0] return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0] @property def __snake_case ( self ): return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words def __snake_case ( self ): lowerCAmelCase = {self.convert_ids_to_tokens(UpperCAmelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __snake_case ( self , UpperCAmelCase_ ): return self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_ ): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] lowerCAmelCase = self.sp_model.PieceToId(UpperCAmelCase_ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def __snake_case ( self , UpperCAmelCase_ ): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = ''''''.join(UpperCAmelCase_ ).replace(UpperCAmelCase_ , ''' ''' ).strip() return out_string def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ): if not os.path.isdir(UpperCAmelCase_ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase = os.path.join( UpperCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCAmelCase_ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCAmelCase_ , '''wb''' ) as fi: lowerCAmelCase = self.sp_model.serialized_model_proto() fi.write(UpperCAmelCase_ ) return (out_vocab_file,)
33
import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline UpperCAmelCase_ =datasets.utils.logging.get_logger(__name__) @dataclass class __UpperCamelCase ( datasets.BuilderConfig ): '''simple docstring''' __a : Optional[datasets.Features] =None __a : str ="utf-8" __a : Optional[str] =None __a : Optional[str] =None __a : bool =True # deprecated __a : Optional[int] =None # deprecated __a : int =1_0 << 2_0 # 10MB __a : Optional[bool] =None class __UpperCamelCase ( datasets.ArrowBasedBuilder ): '''simple docstring''' __a : str =JsonConfig def __snake_case ( self ): if self.config.block_size is not None: logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' ) lowerCAmelCase = self.config.block_size if self.config.use_threads is not True: logger.warning( '''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' ) if self.config.newlines_in_values is not None: raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' ) return datasets.DatasetInfo(features=self.config.features ) def __snake_case ( self , UpperCAmelCase_ ): if not self.config.data_files: raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) lowerCAmelCase = dl_manager.download_and_extract(self.config.data_files ) if isinstance(UpperCAmelCase_ , (str, list, tuple) ): lowerCAmelCase = data_files if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): lowerCAmelCase = [files] lowerCAmelCase = [dl_manager.iter_files(UpperCAmelCase_ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] lowerCAmelCase = [] for split_name, files in data_files.items(): if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): lowerCAmelCase = [files] lowerCAmelCase = [dl_manager.iter_files(UpperCAmelCase_ ) for file in files] splits.append(datasets.SplitGenerator(name=UpperCAmelCase_ , gen_kwargs={'''files''': files} ) ) return splits def __snake_case ( self , UpperCAmelCase_ ): if self.config.features is not None: # adding missing columns for column_name in set(self.config.features ) - set(pa_table.column_names ): lowerCAmelCase = self.config.features.arrow_schema.field(UpperCAmelCase_ ).type lowerCAmelCase = pa_table.append_column(UpperCAmelCase_ , pa.array([None] * len(UpperCAmelCase_ ) , type=UpperCAmelCase_ ) ) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example lowerCAmelCase = table_cast(UpperCAmelCase_ , self.config.features.arrow_schema ) return pa_table def __snake_case ( self , UpperCAmelCase_ ): for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase_ ) ): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(UpperCAmelCase_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: lowerCAmelCase = json.load(UpperCAmelCase_ ) # We keep only the field we are interested in lowerCAmelCase = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(UpperCAmelCase_ , (list, tuple) ): lowerCAmelCase = set().union(*[row.keys() for row in dataset] ) lowerCAmelCase = {col: [row.get(UpperCAmelCase_ ) for row in dataset] for col in keys} else: lowerCAmelCase = dataset lowerCAmelCase = pa.Table.from_pydict(UpperCAmelCase_ ) yield file_idx, self._cast_table(UpperCAmelCase_ ) # If the file has one json object per line else: with open(UpperCAmelCase_ , '''rb''' ) as f: lowerCAmelCase = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small lowerCAmelCase = max(self.config.chunksize // 32 , 16 << 10 ) lowerCAmelCase = ( self.config.encoding_errors if self.config.encoding_errors is not None else '''strict''' ) while True: lowerCAmelCase = f.read(self.config.chunksize ) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(UpperCAmelCase_ ) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": lowerCAmelCase = batch.decode(self.config.encoding , errors=UpperCAmelCase_ ).encode('''utf-8''' ) try: while True: try: lowerCAmelCase = paj.read_json( io.BytesIO(UpperCAmelCase_ ) , read_options=paj.ReadOptions(block_size=UpperCAmelCase_ ) ) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(UpperCAmelCase_ , pa.ArrowInvalid ) and "straddling" not in str(UpperCAmelCase_ ) or block_size > len(UpperCAmelCase_ ) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( F"""Batch of {len(UpperCAmelCase_ )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" ) block_size *= 2 except pa.ArrowInvalid as e: try: with open( UpperCAmelCase_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: lowerCAmelCase = json.load(UpperCAmelCase_ ) except json.JSONDecodeError: logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase_ )}: {e}""" ) raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): # list is the only sequence type supported in JSON try: lowerCAmelCase = set().union(*[row.keys() for row in dataset] ) lowerCAmelCase = {col: [row.get(UpperCAmelCase_ ) for row in dataset] for col in keys} lowerCAmelCase = pa.Table.from_pydict(UpperCAmelCase_ ) except (pa.ArrowInvalid, AttributeError) as e: logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase_ )}: {e}""" ) raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None yield file_idx, self._cast_table(UpperCAmelCase_ ) break else: logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase_ )}: {e}""" ) raise ValueError( F"""Not able to read records in the JSON file at {file}. """ F"""You should probably indicate the field of the JSON file containing your records. """ F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """ F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(UpperCAmelCase_ ) batch_idx += 1
33
1
import warnings from ...utils import logging from .image_processing_beit import BeitImageProcessor UpperCAmelCase_ =logging.get_logger(__name__) class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ): warnings.warn( '''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use BeitImageProcessor instead.''' , UpperCAmelCase_ , ) super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
33
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices UpperCAmelCase_ =logging.get_logger(__name__) class __UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __a : Optional[Any] ="""maskformer-swin""" __a : Optional[int] ={ """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self , UpperCAmelCase_=2_24 , UpperCAmelCase_=4 , UpperCAmelCase_=3 , UpperCAmelCase_=96 , UpperCAmelCase_=[2, 2, 6, 2] , UpperCAmelCase_=[3, 6, 12, 24] , UpperCAmelCase_=7 , UpperCAmelCase_=4.0 , UpperCAmelCase_=True , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.1 , UpperCAmelCase_="gelu" , UpperCAmelCase_=False , UpperCAmelCase_=0.02 , UpperCAmelCase_=1E-5 , UpperCAmelCase_=None , UpperCAmelCase_=None , **UpperCAmelCase_ , ): super().__init__(**UpperCAmelCase_ ) lowerCAmelCase = image_size lowerCAmelCase = patch_size lowerCAmelCase = num_channels lowerCAmelCase = embed_dim lowerCAmelCase = depths lowerCAmelCase = len(UpperCAmelCase_ ) lowerCAmelCase = num_heads lowerCAmelCase = window_size lowerCAmelCase = mlp_ratio lowerCAmelCase = qkv_bias lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = drop_path_rate lowerCAmelCase = hidden_act lowerCAmelCase = use_absolute_embeddings lowerCAmelCase = layer_norm_eps lowerCAmelCase = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model lowerCAmelCase = int(embed_dim * 2 ** (len(UpperCAmelCase_ ) - 1) ) lowerCAmelCase = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(UpperCAmelCase_ ) + 1 )] lowerCAmelCase , lowerCAmelCase = get_aligned_output_features_output_indices( out_features=UpperCAmelCase_ , out_indices=UpperCAmelCase_ , stage_names=self.stage_names )
33
1
UpperCAmelCase_ =[sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_0000)] def UpperCAmelCase ( _snake_case ): lowerCAmelCase = 0 while number: # Increased Speed Slightly by checking every 5 digits together. sum_of_digits_squared += DIGITS_SQUARED[number % 100000] number //= 100000 return sum_of_digits_squared # There are 2 Chains made, # One ends with 89 with the chain member 58 being the one which when declared first, # there will be the least number of iterations for all the members to be checked. # The other one ends with 1 and has only one element 1. # So 58 and 1 are chosen to be declared at the starting. # Changed dictionary to an array to quicken the solution UpperCAmelCase_ =[None] * 1000_0000 UpperCAmelCase_ =True UpperCAmelCase_ =False def UpperCAmelCase ( _snake_case ): if CHAINS[number - 1] is not None: return CHAINS[number - 1] # type: ignore lowerCAmelCase = chain(next_number(_snake_case ) ) lowerCAmelCase = number_chain while number < 10000000: lowerCAmelCase = number_chain number *= 10 return number_chain def UpperCAmelCase ( _snake_case = 10000000 ): for i in range(1 , _snake_case ): if CHAINS[i] is None: chain(i + 1 ) return CHAINS[:number].count(_snake_case ) if __name__ == "__main__": import doctest doctest.testmod() print(F'''{solution() = }''')
33
from collections.abc import Sequence def UpperCAmelCase ( _snake_case , _snake_case = False ): if not arr: return 0 lowerCAmelCase = 0 if allow_empty_subarrays else float('''-inf''' ) lowerCAmelCase = 0.0 for num in arr: lowerCAmelCase = max(0 if allow_empty_subarrays else num , curr_sum + num ) lowerCAmelCase = max(_snake_case , _snake_case ) return max_sum if __name__ == "__main__": from doctest import testmod testmod() UpperCAmelCase_ =[-2, 1, -3, 4, -1, 2, 1, -5, 4] print(F'''{max_subarray_sum(nums) = }''')
33
1
def UpperCAmelCase ( _snake_case , _snake_case , _snake_case , _snake_case ): if height >= 1: move_tower(height - 1 , _snake_case , _snake_case , _snake_case ) move_disk(_snake_case , _snake_case ) move_tower(height - 1 , _snake_case , _snake_case , _snake_case ) def UpperCAmelCase ( _snake_case , _snake_case ): print('''moving disk from''' , _snake_case , '''to''' , _snake_case ) def UpperCAmelCase ( ): lowerCAmelCase = int(input('''Height of hanoi: ''' ).strip() ) move_tower(_snake_case , '''A''' , '''B''' , '''C''' ) if __name__ == "__main__": main()
33
import os import pickle import unittest from transformers import AutoTokenizer from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.models.bert_japanese.tokenization_bert_japanese import ( VOCAB_FILES_NAMES, BertJapaneseTokenizer, CharacterTokenizer, JumanppTokenizer, MecabTokenizer, SudachiTokenizer, WordpieceTokenizer, ) from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi from ...test_tokenization_common import TokenizerTesterMixin @custom_tokenizers class __UpperCamelCase ( __UpperCAmelCase , unittest.TestCase ): '''simple docstring''' __a : Any =BertJapaneseTokenizer __a : Optional[int] =False __a : int =True def __snake_case ( self ): super().setUp() lowerCAmelCase = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは''', '''世界''', '''##世界''', '''、''', '''##、''', '''。''', '''##。''', ] lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = '''こんにちは、世界。 \nこんばんは、世界。''' lowerCAmelCase = '''こんにちは 、 世界 。 こんばんは 、 世界 。''' return input_text, output_text def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase , lowerCAmelCase = self.get_input_output_texts(UpperCAmelCase_ ) lowerCAmelCase = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) lowerCAmelCase = tokenizer.decode(UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ ) return text, ids def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class(self.vocab_file ) lowerCAmelCase = tokenizer.tokenize('''こんにちは、世界。\nこんばんは、世界。''' ) self.assertListEqual(UpperCAmelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''mecab''' ) self.assertIsNotNone(UpperCAmelCase_ ) lowerCAmelCase = '''こんにちは、世界。\nこんばんは、世界。''' lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) lowerCAmelCase = os.path.join(self.tmpdirname , '''tokenizer.bin''' ) with open(UpperCAmelCase_ , '''wb''' ) as handle: pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ ) with open(UpperCAmelCase_ , '''rb''' ) as handle: lowerCAmelCase = pickle.load(UpperCAmelCase_ ) lowerCAmelCase = tokenizer_new.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def __snake_case ( self ): lowerCAmelCase = MecabTokenizer(mecab_dic='''ipadic''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) def __snake_case ( self ): try: lowerCAmelCase = MecabTokenizer(mecab_dic='''unidic_lite''' ) except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) def __snake_case ( self ): try: lowerCAmelCase = MecabTokenizer(mecab_dic='''unidic''' ) except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) def __snake_case ( self ): lowerCAmelCase = MecabTokenizer(do_lower_case=UpperCAmelCase_ , mecab_dic='''ipadic''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iphone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) def __snake_case ( self ): try: lowerCAmelCase = MecabTokenizer( do_lower_case=UpperCAmelCase_ , normalize_text=UpperCAmelCase_ , mecab_option='''-d /usr/local/lib/mecab/dic/jumandic''' ) except RuntimeError: # if dict doesn't exist in the system, previous code raises this error. return self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , ) def __snake_case ( self ): lowerCAmelCase = MecabTokenizer(normalize_text=UpperCAmelCase_ , mecab_dic='''ipadic''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。'''] , ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''sudachi''' ) self.assertIsNotNone(UpperCAmelCase_ ) lowerCAmelCase = '''こんにちは、世界。\nこんばんは、世界。''' lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) lowerCAmelCase = os.path.join(self.tmpdirname , '''tokenizer.bin''' ) with open(UpperCAmelCase_ , '''wb''' ) as handle: pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ ) with open(UpperCAmelCase_ , '''rb''' ) as handle: lowerCAmelCase = pickle.load(UpperCAmelCase_ ) lowerCAmelCase = tokenizer_new.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='''core''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''A''' ) self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国''', '''人''', '''参政''', '''権'''] ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''B''' ) self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人''', '''参政権'''] ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''C''' ) self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人参政権'''] ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(do_lower_case=UpperCAmelCase_ , sudachi_dict_type='''core''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(normalize_text=UpperCAmelCase_ , sudachi_dict_type='''core''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', '''\u3000''', '''。''', ''' ''', ''' '''] , ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(trim_whitespace=UpperCAmelCase_ , sudachi_dict_type='''core''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''jumanpp''' ) self.assertIsNotNone(UpperCAmelCase_ ) lowerCAmelCase = '''こんにちは、世界。\nこんばんは、世界。''' lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) lowerCAmelCase = os.path.join(self.tmpdirname , '''tokenizer.bin''' ) with open(UpperCAmelCase_ , '''wb''' ) as handle: pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ ) with open(UpperCAmelCase_ , '''rb''' ) as handle: lowerCAmelCase = pickle.load(UpperCAmelCase_ ) lowerCAmelCase = tokenizer_new.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = JumanppTokenizer(do_lower_case=UpperCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = JumanppTokenizer(normalize_text=UpperCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''ア''', '''ッ''', '''フ''', '''゚''', '''ル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = JumanppTokenizer(trim_whitespace=UpperCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''。'''] , ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize('''ありがとうございますm(_ _)m見つけるのが大変です。''' ) , ['''ありがとう''', '''ございます''', '''m(_ _)m''', '''見つける''', '''の''', '''が''', '''大変です''', '''。'''] , ) def __snake_case ( self ): lowerCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは'''] lowerCAmelCase = {} for i, token in enumerate(UpperCAmelCase_ ): lowerCAmelCase = i lowerCAmelCase = WordpieceTokenizer(vocab=UpperCAmelCase_ , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こんにちは'''] ) self.assertListEqual(tokenizer.tokenize('''こんばんは''' ) , ['''こん''', '''##ばんは'''] ) self.assertListEqual(tokenizer.tokenize('''こんばんは こんばんにちは こんにちは''' ) , ['''こん''', '''##ばんは''', '''[UNK]''', '''こんにちは'''] ) def __snake_case ( self ): lowerCAmelCase = BertJapaneseTokenizer.from_pretrained('''nlp-waseda/roberta-base-japanese-with-auto-jumanpp''' ) lowerCAmelCase = tokenizer.subword_tokenizer lowerCAmelCase = subword_tokenizer.tokenize('''国境 の 長い トンネル を 抜ける と 雪国 であった 。''' ) self.assertListEqual(UpperCAmelCase_ , ['''▁国境''', '''▁の''', '''▁長い''', '''▁トンネル''', '''▁を''', '''▁抜ける''', '''▁と''', '''▁雪''', '''国''', '''▁であった''', '''▁。'''] ) lowerCAmelCase = subword_tokenizer.tokenize('''こんばんは こんばん にち は こんにちは''' ) self.assertListEqual(UpperCAmelCase_ , ['''▁こん''', '''ばん''', '''は''', '''▁こん''', '''ばん''', '''▁に''', '''ち''', '''▁は''', '''▁こんにちは'''] ) def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese''' ) lowerCAmelCase = tokenizer.encode('''ありがとう。''' , add_special_tokens=UpperCAmelCase_ ) lowerCAmelCase = tokenizer.encode('''どういたしまして。''' , add_special_tokens=UpperCAmelCase_ ) lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ ) lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_ ) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_a + [3] @custom_tokenizers class __UpperCamelCase ( __UpperCAmelCase , unittest.TestCase ): '''simple docstring''' __a : Union[str, Any] =BertJapaneseTokenizer __a : Optional[int] =False def __snake_case ( self ): super().setUp() lowerCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。'''] lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def __snake_case ( self , **UpperCAmelCase_ ): return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='''character''' , **UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = '''こんにちは、世界。 \nこんばんは、世界。''' lowerCAmelCase = '''こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。''' return input_text, output_text def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='''character''' ) lowerCAmelCase = tokenizer.tokenize('''こんにちは、世界。 \nこんばんは、世界。''' ) self.assertListEqual( UpperCAmelCase_ , ['''こ''', '''ん''', '''に''', '''ち''', '''は''', '''、''', '''世''', '''界''', '''。''', '''こ''', '''ん''', '''ば''', '''ん''', '''は''', '''、''', '''世''', '''界''', '''。'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] ) def __snake_case ( self ): lowerCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。'''] lowerCAmelCase = {} for i, token in enumerate(UpperCAmelCase_ ): lowerCAmelCase = i lowerCAmelCase = CharacterTokenizer(vocab=UpperCAmelCase_ , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''は'''] ) self.assertListEqual(tokenizer.tokenize('''こんにちほ''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''[UNK]'''] ) def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese-char''' ) lowerCAmelCase = tokenizer.encode('''ありがとう。''' , add_special_tokens=UpperCAmelCase_ ) lowerCAmelCase = tokenizer.encode('''どういたしまして。''' , add_special_tokens=UpperCAmelCase_ ) lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ ) lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_ ) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_a + [3] @custom_tokenizers class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __snake_case ( self ): lowerCAmelCase = '''cl-tohoku/bert-base-japanese''' lowerCAmelCase = AutoTokenizer.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __snake_case ( self ): lowerCAmelCase = '''cl-tohoku/bert-base-japanese''' with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm: BertTokenizer.from_pretrained(UpperCAmelCase_ ) self.assertTrue( cm.records[0].message.startswith( '''The tokenizer class you load from this checkpoint is not the same type as the class this function''' ''' is called from.''' ) ) lowerCAmelCase = '''bert-base-cased''' with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm: BertJapaneseTokenizer.from_pretrained(UpperCAmelCase_ ) self.assertTrue( cm.records[0].message.startswith( '''The tokenizer class you load from this checkpoint is not the same type as the class this function''' ''' is called from.''' ) )
33
1
import math from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ =logging.get_logger(__name__) UpperCAmelCase_ ={ """facebook/data2vec-base-960h""": """https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json""", # See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio } class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : int ="""data2vec-audio""" def __init__( self , UpperCAmelCase_=32 , UpperCAmelCase_=7_68 , UpperCAmelCase_=12 , UpperCAmelCase_=12 , UpperCAmelCase_=30_72 , UpperCAmelCase_="gelu" , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.02 , UpperCAmelCase_=1E-5 , UpperCAmelCase_="gelu" , UpperCAmelCase_=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , UpperCAmelCase_=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase_=(10, 3, 3, 3, 3, 2, 2) , UpperCAmelCase_=False , UpperCAmelCase_=16 , UpperCAmelCase_=19 , UpperCAmelCase_=5 , UpperCAmelCase_=0.05 , UpperCAmelCase_=10 , UpperCAmelCase_=2 , UpperCAmelCase_=0.0 , UpperCAmelCase_=10 , UpperCAmelCase_=0 , UpperCAmelCase_="sum" , UpperCAmelCase_=False , UpperCAmelCase_=False , UpperCAmelCase_=2_56 , UpperCAmelCase_=(5_12, 5_12, 5_12, 5_12, 15_00) , UpperCAmelCase_=(5, 3, 3, 1, 1) , UpperCAmelCase_=(1, 2, 3, 1, 1) , UpperCAmelCase_=5_12 , UpperCAmelCase_=0 , UpperCAmelCase_=1 , UpperCAmelCase_=2 , UpperCAmelCase_=False , UpperCAmelCase_=3 , UpperCAmelCase_=2 , UpperCAmelCase_=3 , UpperCAmelCase_=None , **UpperCAmelCase_ , ): super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ ) lowerCAmelCase = hidden_size lowerCAmelCase = feat_extract_activation lowerCAmelCase = list(UpperCAmelCase_ ) lowerCAmelCase = list(UpperCAmelCase_ ) lowerCAmelCase = list(UpperCAmelCase_ ) lowerCAmelCase = conv_bias lowerCAmelCase = num_conv_pos_embeddings lowerCAmelCase = num_conv_pos_embedding_groups lowerCAmelCase = conv_pos_kernel_size lowerCAmelCase = len(self.conv_dim ) lowerCAmelCase = num_hidden_layers lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_act lowerCAmelCase = num_attention_heads lowerCAmelCase = hidden_dropout lowerCAmelCase = attention_dropout lowerCAmelCase = activation_dropout lowerCAmelCase = feat_proj_dropout lowerCAmelCase = final_dropout lowerCAmelCase = layerdrop lowerCAmelCase = layer_norm_eps lowerCAmelCase = initializer_range lowerCAmelCase = vocab_size lowerCAmelCase = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,""" F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowerCAmelCase = mask_time_prob lowerCAmelCase = mask_time_length lowerCAmelCase = mask_time_min_masks lowerCAmelCase = mask_feature_prob lowerCAmelCase = mask_feature_length lowerCAmelCase = mask_feature_min_masks # ctc loss lowerCAmelCase = ctc_loss_reduction lowerCAmelCase = ctc_zero_infinity # adapter lowerCAmelCase = add_adapter lowerCAmelCase = adapter_kernel_size lowerCAmelCase = adapter_stride lowerCAmelCase = num_adapter_layers lowerCAmelCase = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. lowerCAmelCase = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. lowerCAmelCase = list(UpperCAmelCase_ ) lowerCAmelCase = list(UpperCAmelCase_ ) lowerCAmelCase = list(UpperCAmelCase_ ) lowerCAmelCase = xvector_output_dim @property def __snake_case ( self ): return math.prod(self.conv_stride )
33
import json import os import tempfile import unittest import unittest.mock as mock from pathlib import Path from requests.exceptions import HTTPError from transformers.utils import ( CONFIG_NAME, FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TRANSFORMERS_CACHE, WEIGHTS_NAME, cached_file, get_file_from_repo, has_file, ) UpperCAmelCase_ ="""hf-internal-testing/tiny-random-bert""" UpperCAmelCase_ =os.path.join(TRANSFORMERS_CACHE, """models--hf-internal-testing--tiny-random-bert""") UpperCAmelCase_ ="""9b8c223d42b2188cb49d29af482996f9d0f3e5a6""" class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __snake_case ( self ): lowerCAmelCase = cached_file(UpperCAmelCase_ , UpperCAmelCase_ ) # Should have downloaded the file in here self.assertTrue(os.path.isdir(UpperCAmelCase_ ) ) # Cache should contain at least those three subfolders: for subfolder in ["blobs", "refs", "snapshots"]: self.assertTrue(os.path.isdir(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) ) ) with open(os.path.join(UpperCAmelCase_ , '''refs''' , '''main''' ) ) as f: lowerCAmelCase = f.read() self.assertEqual(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , '''snapshots''' , UpperCAmelCase_ , UpperCAmelCase_ ) ) self.assertTrue(os.path.isfile(UpperCAmelCase_ ) ) # File is cached at the same place the second time. lowerCAmelCase = cached_file(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) # Using a specific revision to test the full commit hash. lowerCAmelCase = cached_file(UpperCAmelCase_ , UpperCAmelCase_ , revision='''9b8c223''' ) self.assertEqual(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , '''snapshots''' , UpperCAmelCase_ , UpperCAmelCase_ ) ) def __snake_case ( self ): with self.assertRaisesRegex(UpperCAmelCase_ , '''is not a valid model identifier''' ): lowerCAmelCase = cached_file('''tiny-random-bert''' , UpperCAmelCase_ ) with self.assertRaisesRegex(UpperCAmelCase_ , '''is not a valid git identifier''' ): lowerCAmelCase = cached_file(UpperCAmelCase_ , UpperCAmelCase_ , revision='''aaaa''' ) with self.assertRaisesRegex(UpperCAmelCase_ , '''does not appear to have a file named''' ): lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' ) def __snake_case ( self ): with self.assertRaisesRegex(UpperCAmelCase_ , '''does not appear to have a file named''' ): lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' ) with open(os.path.join(UpperCAmelCase_ , '''refs''' , '''main''' ) ) as f: lowerCAmelCase = f.read() self.assertTrue(os.path.isfile(os.path.join(UpperCAmelCase_ , '''.no_exist''' , UpperCAmelCase_ , '''conf''' ) ) ) lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' , _raise_exceptions_for_missing_entries=UpperCAmelCase_ ) self.assertIsNone(UpperCAmelCase_ ) lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' , local_files_only=UpperCAmelCase_ , _raise_exceptions_for_missing_entries=UpperCAmelCase_ ) self.assertIsNone(UpperCAmelCase_ ) lowerCAmelCase = mock.Mock() lowerCAmelCase = 5_00 lowerCAmelCase = {} lowerCAmelCase = HTTPError lowerCAmelCase = {} # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('''requests.Session.request''' , return_value=UpperCAmelCase_ ) as mock_head: lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' , _raise_exceptions_for_connection_errors=UpperCAmelCase_ ) self.assertIsNone(UpperCAmelCase_ ) # This check we did call the fake head request mock_head.assert_called() def __snake_case ( self ): self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , UpperCAmelCase_ ) ) self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , UpperCAmelCase_ ) ) self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , UpperCAmelCase_ ) ) def __snake_case ( self ): # `get_file_from_repo` returns None if the file does not exist self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) ) # The function raises if the repository does not exist. with self.assertRaisesRegex(UpperCAmelCase_ , '''is not a valid model identifier''' ): get_file_from_repo('''bert-base-case''' , UpperCAmelCase_ ) # The function raises if the revision does not exist. with self.assertRaisesRegex(UpperCAmelCase_ , '''is not a valid git identifier''' ): get_file_from_repo('''bert-base-cased''' , UpperCAmelCase_ , revision='''ahaha''' ) lowerCAmelCase = get_file_from_repo('''bert-base-cased''' , UpperCAmelCase_ ) # The name is the cached name which is not very easy to test, so instead we load the content. lowerCAmelCase = json.loads(open(UpperCAmelCase_ , '''r''' ).read() ) self.assertEqual(config['''hidden_size'''] , 7_68 ) def __snake_case ( self ): with tempfile.TemporaryDirectory() as tmp_dir: lowerCAmelCase = Path(UpperCAmelCase_ ) / '''a.txt''' filename.touch() self.assertEqual(get_file_from_repo(UpperCAmelCase_ , '''a.txt''' ) , str(UpperCAmelCase_ ) ) self.assertIsNone(get_file_from_repo(UpperCAmelCase_ , '''b.txt''' ) )
33
1
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import GLPNImageProcessor class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __init__( self , UpperCAmelCase_ , UpperCAmelCase_=7 , UpperCAmelCase_=3 , UpperCAmelCase_=18 , UpperCAmelCase_=30 , UpperCAmelCase_=4_00 , UpperCAmelCase_=True , UpperCAmelCase_=32 , UpperCAmelCase_=True , ): lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = num_channels lowerCAmelCase = image_size lowerCAmelCase = min_resolution lowerCAmelCase = max_resolution lowerCAmelCase = do_resize lowerCAmelCase = size_divisor lowerCAmelCase = do_rescale def __snake_case ( self ): return { "do_resize": self.do_resize, "size_divisor": self.size_divisor, "do_rescale": self.do_rescale, } @require_torch @require_vision class __UpperCamelCase ( __UpperCAmelCase , unittest.TestCase ): '''simple docstring''' __a : Dict =GLPNImageProcessor if is_vision_available() else None def __snake_case ( self ): lowerCAmelCase = GLPNImageProcessingTester(self ) @property def __snake_case ( self ): return self.image_processor_tester.prepare_image_processor_dict() def __snake_case ( self ): lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCAmelCase_ , '''do_resize''' ) ) self.assertTrue(hasattr(UpperCAmelCase_ , '''size_divisor''' ) ) self.assertTrue(hasattr(UpperCAmelCase_ , '''resample''' ) ) self.assertTrue(hasattr(UpperCAmelCase_ , '''do_rescale''' ) ) def __snake_case ( self ): pass def __snake_case ( self ): # Initialize image_processing lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase_ , Image.Image ) # Test not batched input (GLPNImageProcessor doesn't support batching) lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def __snake_case ( self ): # Initialize image_processing lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase_ , np.ndarray ) # Test not batched input (GLPNImageProcessor doesn't support batching) lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def __snake_case ( self ): # Initialize image_processing lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase_ , torch.Tensor ) # Test not batched input (GLPNImageProcessor doesn't support batching) lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
33
from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = True , UpperCAmelCase_ = None , UpperCAmelCase_ = False , UpperCAmelCase_ = None , UpperCAmelCase_ = True , UpperCAmelCase_ = "arrow" , **UpperCAmelCase_ , ): super().__init__( split=UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , keep_in_memory=UpperCAmelCase_ , streaming=UpperCAmelCase_ , **UpperCAmelCase_ , ) lowerCAmelCase = load_from_cache_file lowerCAmelCase = file_format lowerCAmelCase = Spark( df=UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , working_dir=UpperCAmelCase_ , **UpperCAmelCase_ , ) def __snake_case ( self ): if self.streaming: return self.builder.as_streaming_dataset(split=self.split ) lowerCAmelCase = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=UpperCAmelCase_ , file_format=self._file_format , ) return self.builder.as_dataset(split=self.split )
33
1
import numpy as np def UpperCAmelCase ( _snake_case ): return 1 / (1 + np.exp(-vector )) def UpperCAmelCase ( _snake_case ): return vector * sigmoid(1.702 * vector ) if __name__ == "__main__": import doctest doctest.testmod()
33
import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def UpperCAmelCase ( _snake_case = 3 ): if isinstance(_snake_case , _snake_case ): raise TypeError('''number of qubits must be a integer.''' ) if number_of_qubits <= 0: raise ValueError('''number of qubits must be > 0.''' ) if math.floor(_snake_case ) != number_of_qubits: raise ValueError('''number of qubits must be exact integer.''' ) if number_of_qubits > 10: raise ValueError('''number of qubits too large to simulate(>10).''' ) lowerCAmelCase = QuantumRegister(_snake_case , '''qr''' ) lowerCAmelCase = ClassicalRegister(_snake_case , '''cr''' ) lowerCAmelCase = QuantumCircuit(_snake_case , _snake_case ) lowerCAmelCase = number_of_qubits for i in range(_snake_case ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(_snake_case ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , _snake_case , _snake_case ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(_snake_case , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(_snake_case , _snake_case ) # simulate with 10000 shots lowerCAmelCase = Aer.get_backend('''qasm_simulator''' ) lowerCAmelCase = execute(_snake_case , _snake_case , shots=10000 ) return job.result().get_counts(_snake_case ) if __name__ == "__main__": print( F'''Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}''' )
33
1
import json import os import tempfile import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ImageGPTImageProcessor class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __init__( self , UpperCAmelCase_ , UpperCAmelCase_=7 , UpperCAmelCase_=3 , UpperCAmelCase_=18 , UpperCAmelCase_=30 , UpperCAmelCase_=4_00 , UpperCAmelCase_=True , UpperCAmelCase_=None , UpperCAmelCase_=True , ): lowerCAmelCase = size if size is not None else {'''height''': 18, '''width''': 18} lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = num_channels lowerCAmelCase = image_size lowerCAmelCase = min_resolution lowerCAmelCase = max_resolution lowerCAmelCase = do_resize lowerCAmelCase = size lowerCAmelCase = do_normalize def __snake_case ( self ): return { # here we create 2 clusters for the sake of simplicity "clusters": np.asarray( [ [0.8866443634033203, 0.6618829369544983, 0.3891746401786804], [-0.6042559146881104, -0.02295008860528469, 0.5423797369003296], ] ), "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, } @require_torch @require_vision class __UpperCamelCase ( __UpperCAmelCase , unittest.TestCase ): '''simple docstring''' __a : Optional[Any] =ImageGPTImageProcessor if is_vision_available() else None def __snake_case ( self ): lowerCAmelCase = ImageGPTImageProcessingTester(self ) @property def __snake_case ( self ): return self.image_processor_tester.prepare_image_processor_dict() def __snake_case ( self ): lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCAmelCase_ , '''clusters''' ) ) self.assertTrue(hasattr(UpperCAmelCase_ , '''do_resize''' ) ) self.assertTrue(hasattr(UpperCAmelCase_ , '''size''' ) ) self.assertTrue(hasattr(UpperCAmelCase_ , '''do_normalize''' ) ) def __snake_case ( self ): lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} ) lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} ) def __snake_case ( self ): lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) lowerCAmelCase = json.loads(image_processor.to_json_string() ) for key, value in self.image_processor_dict.items(): if key == "clusters": self.assertTrue(np.array_equal(UpperCAmelCase_ , obj[key] ) ) else: self.assertEqual(obj[key] , UpperCAmelCase_ ) def __snake_case ( self ): lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase = os.path.join(UpperCAmelCase_ , '''image_processor.json''' ) image_processor_first.to_json_file(UpperCAmelCase_ ) lowerCAmelCase = self.image_processing_class.from_json_file(UpperCAmelCase_ ).to_dict() lowerCAmelCase = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(UpperCAmelCase_ , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , UpperCAmelCase_ ) def __snake_case ( self ): lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: image_processor_first.save_pretrained(UpperCAmelCase_ ) lowerCAmelCase = self.image_processing_class.from_pretrained(UpperCAmelCase_ ).to_dict() lowerCAmelCase = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(UpperCAmelCase_ , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , UpperCAmelCase_ ) @unittest.skip('''ImageGPT requires clusters at initialization''' ) def __snake_case ( self ): pass def UpperCAmelCase ( ): lowerCAmelCase = load_dataset('''hf-internal-testing/fixtures_image_utils''' , split='''test''' ) lowerCAmelCase = Image.open(dataset[4]['''file'''] ) lowerCAmelCase = Image.open(dataset[5]['''file'''] ) lowerCAmelCase = [imagea, imagea] return images @require_vision @require_torch class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' @slow def __snake_case ( self ): lowerCAmelCase = ImageGPTImageProcessor.from_pretrained('''openai/imagegpt-small''' ) lowerCAmelCase = prepare_images() # test non-batched lowerCAmelCase = image_processing(images[0] , return_tensors='''pt''' ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (1, 10_24) ) lowerCAmelCase = [3_06, 1_91, 1_91] self.assertEqual(encoding.input_ids[0, :3].tolist() , UpperCAmelCase_ ) # test batched lowerCAmelCase = image_processing(UpperCAmelCase_ , return_tensors='''pt''' ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (2, 10_24) ) lowerCAmelCase = [3_03, 13, 13] self.assertEqual(encoding.input_ids[1, -3:].tolist() , UpperCAmelCase_ )
33
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from typing import Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import randn_tensor from .scheduling_utils import SchedulerMixin class __UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __a : Any =1 @register_to_config def __init__( self , UpperCAmelCase_=20_00 , UpperCAmelCase_=0.1 , UpperCAmelCase_=20 , UpperCAmelCase_=1E-3 ): lowerCAmelCase = None lowerCAmelCase = None lowerCAmelCase = None def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ): lowerCAmelCase = torch.linspace(1 , self.config.sampling_eps , UpperCAmelCase_ , device=UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None ): if self.timesteps is None: raise ValueError( '''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' ) # TODO(Patrick) better comments + non-PyTorch # postprocess model score lowerCAmelCase = ( -0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min ) lowerCAmelCase = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) ) lowerCAmelCase = std.flatten() while len(std.shape ) < len(score.shape ): lowerCAmelCase = std.unsqueeze(-1 ) lowerCAmelCase = -score / std # compute lowerCAmelCase = -1.0 / len(self.timesteps ) lowerCAmelCase = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min) lowerCAmelCase = beta_t.flatten() while len(beta_t.shape ) < len(x.shape ): lowerCAmelCase = beta_t.unsqueeze(-1 ) lowerCAmelCase = -0.5 * beta_t * x lowerCAmelCase = torch.sqrt(UpperCAmelCase_ ) lowerCAmelCase = drift - diffusion**2 * score lowerCAmelCase = x + drift * dt # add noise lowerCAmelCase = randn_tensor(x.shape , layout=x.layout , generator=UpperCAmelCase_ , device=x.device , dtype=x.dtype ) lowerCAmelCase = x_mean + diffusion * math.sqrt(-dt ) * noise return x, x_mean def __len__( self ): return self.config.num_train_timesteps
33
1
def UpperCAmelCase ( _snake_case ): lowerCAmelCase = int(_snake_case ) if n_element < 1: lowerCAmelCase = ValueError('''a should be a positive number''' ) raise my_error lowerCAmelCase = [1] lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = (0, 0, 0) lowerCAmelCase = 1 while index < n_element: while hamming_list[i] * 2 <= hamming_list[-1]: i += 1 while hamming_list[j] * 3 <= hamming_list[-1]: j += 1 while hamming_list[k] * 5 <= hamming_list[-1]: k += 1 hamming_list.append( min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) ) index += 1 return hamming_list if __name__ == "__main__": UpperCAmelCase_ =input("""Enter the last number (nth term) of the Hamming Number Series: """) print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""") UpperCAmelCase_ =hamming(int(n)) print("""-----------------------------------------------------""") print(F'''The list with nth numbers is: {hamming_numbers}''') print("""-----------------------------------------------------""")
33
from collections import Counter from pathlib import Path from typing import Optional, Tuple import yaml class __UpperCamelCase ( yaml.SafeLoader ): '''simple docstring''' def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = [self.constructed_objects[key_node] for key_node, _ in node.value] lowerCAmelCase = [tuple(UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else key for key in keys] lowerCAmelCase = Counter(UpperCAmelCase_ ) lowerCAmelCase = [key for key in counter if counter[key] > 1] if duplicate_keys: raise TypeError(F"""Got duplicate yaml keys: {duplicate_keys}""" ) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_=False ): lowerCAmelCase = super().construct_mapping(UpperCAmelCase_ , deep=UpperCAmelCase_ ) self._check_no_duplicates_on_constructed_node(UpperCAmelCase_ ) return mapping def UpperCAmelCase ( _snake_case ): lowerCAmelCase = list(readme_content.splitlines() ) if full_content and full_content[0] == "---" and "---" in full_content[1:]: lowerCAmelCase = full_content[1:].index('''---''' ) + 1 lowerCAmelCase = '''\n'''.join(full_content[1:sep_idx] ) return yamlblock, "\n".join(full_content[sep_idx + 1 :] ) return None, "\n".join(_snake_case ) class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : Any ={"""train_eval_index"""} # train-eval-index in the YAML metadata @classmethod def __snake_case ( cls , UpperCAmelCase_ ): with open(UpperCAmelCase_ , encoding='''utf-8''' ) as readme_file: lowerCAmelCase , lowerCAmelCase = _split_yaml_from_readme(readme_file.read() ) if yaml_string is not None: return cls.from_yaml_string(UpperCAmelCase_ ) else: return cls() def __snake_case ( self , UpperCAmelCase_ ): if path.exists(): with open(UpperCAmelCase_ , encoding='''utf-8''' ) as readme_file: lowerCAmelCase = readme_file.read() else: lowerCAmelCase = None lowerCAmelCase = self._to_readme(UpperCAmelCase_ ) with open(UpperCAmelCase_ , '''w''' , encoding='''utf-8''' ) as readme_file: readme_file.write(UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_ = None ): if readme_content is not None: lowerCAmelCase , lowerCAmelCase = _split_yaml_from_readme(UpperCAmelCase_ ) lowerCAmelCase = '''---\n''' + self.to_yaml_string() + '''---\n''' + content else: lowerCAmelCase = '''---\n''' + self.to_yaml_string() + '''---\n''' return full_content @classmethod def __snake_case ( cls , UpperCAmelCase_ ): lowerCAmelCase = yaml.load(UpperCAmelCase_ , Loader=_NoDuplicateSafeLoader ) or {} # Convert the YAML keys to DatasetMetadata fields lowerCAmelCase = { (key.replace('''-''' , '''_''' ) if key.replace('''-''' , '''_''' ) in cls._FIELDS_WITH_DASHES else key): value for key, value in metadata_dict.items() } return cls(**UpperCAmelCase_ ) def __snake_case ( self ): return yaml.safe_dump( { (key.replace('''_''' , '''-''' ) if key in self._FIELDS_WITH_DASHES else key): value for key, value in self.items() } , sort_keys=UpperCAmelCase_ , allow_unicode=UpperCAmelCase_ , encoding='''utf-8''' , ).decode('''utf-8''' ) UpperCAmelCase_ ={ """image-classification""": [], """translation""": [], """image-segmentation""": [], """fill-mask""": [], """automatic-speech-recognition""": [], """token-classification""": [], """sentence-similarity""": [], """audio-classification""": [], """question-answering""": [], """summarization""": [], """zero-shot-classification""": [], """table-to-text""": [], """feature-extraction""": [], """other""": [], """multiple-choice""": [], """text-classification""": [], """text-to-image""": [], """text2text-generation""": [], """zero-shot-image-classification""": [], """tabular-classification""": [], """tabular-regression""": [], """image-to-image""": [], """tabular-to-text""": [], """unconditional-image-generation""": [], """text-retrieval""": [], """text-to-speech""": [], """object-detection""": [], """audio-to-audio""": [], """text-generation""": [], """conversational""": [], """table-question-answering""": [], """visual-question-answering""": [], """image-to-text""": [], """reinforcement-learning""": [], """voice-activity-detection""": [], """time-series-forecasting""": [], """document-question-answering""": [], } if __name__ == "__main__": from argparse import ArgumentParser UpperCAmelCase_ =ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""") ap.add_argument("""readme_filepath""") UpperCAmelCase_ =ap.parse_args() UpperCAmelCase_ =Path(args.readme_filepath) UpperCAmelCase_ =DatasetMetadata.from_readme(readme_filepath) print(dataset_metadata) dataset_metadata.to_readme(readme_filepath)
33
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) UpperCAmelCase_ ={ """configuration_efficientformer""": [ """EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """EfficientFormerConfig""", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ =["""EfficientFormerImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ =[ """EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """EfficientFormerForImageClassification""", """EfficientFormerForImageClassificationWithTeacher""", """EfficientFormerModel""", """EfficientFormerPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ =[ """TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFEfficientFormerForImageClassification""", """TFEfficientFormerForImageClassificationWithTeacher""", """TFEfficientFormerModel""", """TFEfficientFormerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientformer import EfficientFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientformer import ( EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientFormerForImageClassification, EfficientFormerForImageClassificationWithTeacher, EfficientFormerModel, EfficientFormerPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, TFEfficientFormerPreTrainedModel, ) else: import sys UpperCAmelCase_ =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
33
import unittest from huggingface_hub import hf_hub_download from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor from transformers.pipelines import VideoClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_decord, require_tf, require_torch, require_torch_or_tf, require_vision, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf @require_vision @require_decord class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' __a : str =MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): lowerCAmelCase = hf_hub_download( repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' ) lowerCAmelCase = VideoClassificationPipeline(model=UpperCAmelCase_ , image_processor=UpperCAmelCase_ , top_k=2 ) lowerCAmelCase = [ example_video_filepath, '''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''', ] return video_classifier, examples def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ): for example in examples: lowerCAmelCase = video_classifier(UpperCAmelCase_ ) self.assertEqual( UpperCAmelCase_ , [ {'''score''': ANY(UpperCAmelCase_ ), '''label''': ANY(UpperCAmelCase_ )}, {'''score''': ANY(UpperCAmelCase_ ), '''label''': ANY(UpperCAmelCase_ )}, ] , ) @require_torch def __snake_case ( self ): lowerCAmelCase = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification''' lowerCAmelCase = VideoMAEFeatureExtractor( size={'''shortest_edge''': 10} , crop_size={'''height''': 10, '''width''': 10} ) lowerCAmelCase = pipeline( '''video-classification''' , model=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , frame_sampling_rate=4 ) lowerCAmelCase = hf_hub_download(repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' ) lowerCAmelCase = video_classifier(UpperCAmelCase_ , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase_ , decimals=4 ) , [{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}] , ) lowerCAmelCase = video_classifier( [ video_file_path, video_file_path, ] , top_k=2 , ) self.assertEqual( nested_simplify(UpperCAmelCase_ , decimals=4 ) , [ [{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}], [{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}], ] , ) @require_tf def __snake_case ( self ): pass
33
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase_ ={ """configuration_roberta""": ["""ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaConfig""", """RobertaOnnxConfig"""], """tokenization_roberta""": ["""RobertaTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ =["""RobertaTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ =[ """ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """RobertaForCausalLM""", """RobertaForMaskedLM""", """RobertaForMultipleChoice""", """RobertaForQuestionAnswering""", """RobertaForSequenceClassification""", """RobertaForTokenClassification""", """RobertaModel""", """RobertaPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ =[ """TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFRobertaForCausalLM""", """TFRobertaForMaskedLM""", """TFRobertaForMultipleChoice""", """TFRobertaForQuestionAnswering""", """TFRobertaForSequenceClassification""", """TFRobertaForTokenClassification""", """TFRobertaMainLayer""", """TFRobertaModel""", """TFRobertaPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ =[ """FlaxRobertaForCausalLM""", """FlaxRobertaForMaskedLM""", """FlaxRobertaForMultipleChoice""", """FlaxRobertaForQuestionAnswering""", """FlaxRobertaForSequenceClassification""", """FlaxRobertaForTokenClassification""", """FlaxRobertaModel""", """FlaxRobertaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig from .tokenization_roberta import RobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roberta_fast import RobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta import ( ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaForCausalLM, RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForQuestionAnswering, RobertaForSequenceClassification, RobertaForTokenClassification, RobertaModel, RobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta import ( TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForMultipleChoice, TFRobertaForQuestionAnswering, TFRobertaForSequenceClassification, TFRobertaForTokenClassification, TFRobertaMainLayer, TFRobertaModel, TFRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, FlaxRobertaPreTrainedModel, ) else: import sys UpperCAmelCase_ =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
33
import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class __UpperCamelCase ( __UpperCAmelCase , unittest.TestCase ): '''simple docstring''' __a : Any ="""hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline""" def __snake_case ( self , UpperCAmelCase_=0 ): lowerCAmelCase = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(UpperCAmelCase_ ) ) lowerCAmelCase = np.random.RandomState(UpperCAmelCase_ ) lowerCAmelCase = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 3, '''strength''': 0.75, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def __snake_case ( self ): lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 1_28, 1_28, 3) lowerCAmelCase = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087] ) assert np.abs(image_slice - expected_slice ).max() < 1E-1 def __snake_case ( self ): lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowerCAmelCase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=UpperCAmelCase_ ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) lowerCAmelCase = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __snake_case ( self ): lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowerCAmelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) # warmup pass to apply optimizations lowerCAmelCase = pipe(**self.get_dummy_inputs() ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) lowerCAmelCase = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __snake_case ( self ): lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowerCAmelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) lowerCAmelCase = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __snake_case ( self ): lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowerCAmelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) lowerCAmelCase = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __snake_case ( self ): lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) lowerCAmelCase = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 @nightly @require_onnxruntime @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' @property def __snake_case ( self ): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def __snake_case ( self ): lowerCAmelCase = ort.SessionOptions() lowerCAmelCase = False return options def __snake_case ( self ): lowerCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) lowerCAmelCase = init_image.resize((7_68, 5_12) ) # using the PNDM scheduler by default lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = '''A fantasy landscape, trending on artstation''' lowerCAmelCase = np.random.RandomState(0 ) lowerCAmelCase = pipe( prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCAmelCase_ , output_type='''np''' , ) lowerCAmelCase = output.images lowerCAmelCase = images[0, 2_55:2_58, 3_83:3_86, -1] assert images.shape == (1, 5_12, 7_68, 3) lowerCAmelCase = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def __snake_case ( self ): lowerCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) lowerCAmelCase = init_image.resize((7_68, 5_12) ) lowerCAmelCase = LMSDiscreteScheduler.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' ) lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = '''A fantasy landscape, trending on artstation''' lowerCAmelCase = np.random.RandomState(0 ) lowerCAmelCase = pipe( prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=UpperCAmelCase_ , output_type='''np''' , ) lowerCAmelCase = output.images lowerCAmelCase = images[0, 2_55:2_58, 3_83:3_86, -1] assert images.shape == (1, 5_12, 7_68, 3) lowerCAmelCase = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
33
1
import sys UpperCAmelCase_ =( "73167176531330624919225119674426574742355349194934" "96983520312774506326239578318016984801869478851843" "85861560789112949495459501737958331952853208805511" "12540698747158523863050715693290963295227443043557" "66896648950445244523161731856403098711121722383113" "62229893423380308135336276614282806444486645238749" "30358907296290491560440772390713810515859307960866" "70172427121883998797908792274921901699720888093776" "65727333001053367881220235421809751254540594752243" "52584907711670556013604839586446706324415722155397" "53697817977846174064955149290862569321978468622482" "83972241375657056057490261407972968652414535100474" "82166370484403199890008895243450658541227588666881" "16427171479924442928230863465674813919123162824586" "17866458359124566529476545682848912883142607690042" "24219022671055626321111109370544217506941658960408" "07198403850962455444362981230987879927244284909188" "84580156166097919133875499200524063689912560717606" "05886116467109405077541002256983155200055935729725" "71636269561882670428252483600823257530420752963450" ) def UpperCAmelCase ( _snake_case = N ): lowerCAmelCase = -sys.maxsize - 1 for i in range(len(SCREAMING_SNAKE_CASE_ ) - 12 ): lowerCAmelCase = 1 for j in range(13 ): product *= int(n[i + j] ) if product > largest_product: lowerCAmelCase = product return largest_product if __name__ == "__main__": print(F'''{solution() = }''')
700
import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def UpperCAmelCase ( _snake_case ): lowerCAmelCase = args.pruning_method lowerCAmelCase = args.threshold lowerCAmelCase = args.model_name_or_path.rstrip('''/''' ) lowerCAmelCase = args.target_model_path print(F"""Load fine-pruned model from {model_name_or_path}""" ) lowerCAmelCase = torch.load(os.path.join(_snake_case , '''pytorch_model.bin''' ) ) lowerCAmelCase = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: lowerCAmelCase = tensor print(F"""Copied layer {name}""" ) elif "classifier" in name or "qa_output" in name: lowerCAmelCase = tensor print(F"""Copied layer {name}""" ) elif "bias" in name: lowerCAmelCase = tensor print(F"""Copied layer {name}""" ) else: if pruning_method == "magnitude": lowerCAmelCase = MagnitudeBinarizer.apply(inputs=_snake_case , threshold=_snake_case ) lowerCAmelCase = tensor * mask print(F"""Pruned layer {name}""" ) elif pruning_method == "topK": if "mask_scores" in name: continue lowerCAmelCase = name[:-6] lowerCAmelCase = model[F"""{prefix_}mask_scores"""] lowerCAmelCase = TopKBinarizer.apply(_snake_case , _snake_case ) lowerCAmelCase = tensor * mask print(F"""Pruned layer {name}""" ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue lowerCAmelCase = name[:-6] lowerCAmelCase = model[F"""{prefix_}mask_scores"""] lowerCAmelCase = ThresholdBinarizer.apply(_snake_case , _snake_case , _snake_case ) lowerCAmelCase = tensor * mask print(F"""Pruned layer {name}""" ) elif pruning_method == "l0": if "mask_scores" in name: continue lowerCAmelCase = name[:-6] lowerCAmelCase = model[F"""{prefix_}mask_scores"""] lowerCAmelCase , lowerCAmelCase = -0.1, 1.1 lowerCAmelCase = torch.sigmoid(_snake_case ) lowerCAmelCase = s * (r - l) + l lowerCAmelCase = s_bar.clamp(min=0.0 , max=1.0 ) lowerCAmelCase = tensor * mask print(F"""Pruned layer {name}""" ) else: raise ValueError('''Unknown pruning method''' ) if target_model_path is None: lowerCAmelCase = os.path.join( os.path.dirname(_snake_case ) , F"""bertarized_{os.path.basename(_snake_case )}""" ) if not os.path.isdir(_snake_case ): shutil.copytree(_snake_case , _snake_case ) print(F"""\nCreated folder {target_model_path}""" ) torch.save(_snake_case , os.path.join(_snake_case , '''pytorch_model.bin''' ) ) print('''\nPruned model saved! See you later!''' ) if __name__ == "__main__": UpperCAmelCase_ =argparse.ArgumentParser() parser.add_argument( """--pruning_method""", choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""], type=str, required=True, help=( """Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,""" """ sigmoied_threshold = Soft movement pruning)""" ), ) parser.add_argument( """--threshold""", type=float, required=False, help=( """For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.""" """For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.""" """Not needed for `l0`""" ), ) parser.add_argument( """--model_name_or_path""", type=str, required=True, help="""Folder containing the model that was previously fine-pruned""", ) parser.add_argument( """--target_model_path""", default=None, type=str, required=False, help="""Folder containing the model that was previously fine-pruned""", ) UpperCAmelCase_ =parser.parse_args() main(args)
33
0
import random import unittest import torch from diffusers import IFImgaImgSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class __UpperCamelCase ( _snake_case , _snake_case , unittest.TestCase ): '''simple docstring''' __a : int =IFImgaImgSuperResolutionPipeline __a : int =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""width""", """height"""} __a : int =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""original_image"""} ) __a : Optional[int] =PipelineTesterMixin.required_optional_params - {"""latents"""} def __snake_case ( self ): return self._get_superresolution_dummy_components() def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_=0 ): if str(lowerCAmelCase__ ).startswith('''mps''' ): lowerCAmelCase = torch.manual_seed(lowerCAmelCase__ ) else: lowerCAmelCase = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ ) lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ ) lowerCAmelCase = floats_tensor((1, 3, 16, 16) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ ) lowerCAmelCase = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''original_image''': original_image, '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def __snake_case ( self ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def __snake_case ( self ): self._test_save_load_optional_components() @unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' ) def __snake_case ( self ): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def __snake_case ( self ): self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def __snake_case ( self ): self._test_save_load_local() def __snake_case ( self ): self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
701
import os import re from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCAmelCase_ =logging.get_logger(__name__) UpperCAmelCase_ ={ """vocab_file""": """vocab.txt""", """merges_file""": """bpe.codes""", } UpperCAmelCase_ ={ """vocab_file""": { """vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt""", """vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt""", }, """merges_file""": { """vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes""", """vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes""", }, } UpperCAmelCase_ ={ """vinai/phobert-base""": 256, """vinai/phobert-large""": 256, } def UpperCAmelCase ( _snake_case ): lowerCAmelCase = set() lowerCAmelCase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCAmelCase = char lowerCAmelCase = set(_snake_case ) return pairs class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : Union[str, Any] =VOCAB_FILES_NAMES __a : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP __a : Optional[int] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_="<s>" , UpperCAmelCase_="</s>" , UpperCAmelCase_="</s>" , UpperCAmelCase_="<s>" , UpperCAmelCase_="<unk>" , UpperCAmelCase_="<pad>" , UpperCAmelCase_="<mask>" , **UpperCAmelCase_ , ): super().__init__( bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , **UpperCAmelCase_ , ) lowerCAmelCase = vocab_file lowerCAmelCase = merges_file lowerCAmelCase = {} lowerCAmelCase = 0 lowerCAmelCase = 1 lowerCAmelCase = 2 lowerCAmelCase = 3 self.add_from_file(UpperCAmelCase_ ) lowerCAmelCase = {v: k for k, v in self.encoder.items()} with open(UpperCAmelCase_ , encoding='''utf-8''' ) as merges_handle: lowerCAmelCase = merges_handle.read().split('''\n''' )[:-1] lowerCAmelCase = [tuple(merge.split()[:-1] ) for merge in merges] lowerCAmelCase = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) ) lowerCAmelCase = {} def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCAmelCase = [self.cls_token_id] lowerCAmelCase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ ) if token_ids_a is None: return [1] + ([0] * len(UpperCAmelCase_ )) + [1] return [1] + ([0] * len(UpperCAmelCase_ )) + [1, 1] + ([0] * len(UpperCAmelCase_ )) + [1] def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ): lowerCAmelCase = [self.sep_token_id] lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def __snake_case ( self ): return len(self.encoder ) def __snake_case ( self ): return dict(self.encoder , **self.added_tokens_encoder ) def __snake_case ( self , UpperCAmelCase_ ): if token in self.cache: return self.cache[token] lowerCAmelCase = tuple(UpperCAmelCase_ ) lowerCAmelCase = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] ) lowerCAmelCase = get_pairs(UpperCAmelCase_ ) if not pairs: return token while True: lowerCAmelCase = min(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : self.bpe_ranks.get(UpperCAmelCase_ , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break lowerCAmelCase , lowerCAmelCase = bigram lowerCAmelCase = [] lowerCAmelCase = 0 while i < len(UpperCAmelCase_ ): try: lowerCAmelCase = word.index(UpperCAmelCase_ , UpperCAmelCase_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCAmelCase = j if word[i] == first and i < len(UpperCAmelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCAmelCase = tuple(UpperCAmelCase_ ) lowerCAmelCase = new_word if len(UpperCAmelCase_ ) == 1: break else: lowerCAmelCase = get_pairs(UpperCAmelCase_ ) lowerCAmelCase = '''@@ '''.join(UpperCAmelCase_ ) lowerCAmelCase = word[:-4] lowerCAmelCase = word return word def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = [] lowerCAmelCase = re.findall(r'''\S+\n?''' , UpperCAmelCase_ ) for token in words: split_tokens.extend(list(self.bpe(UpperCAmelCase_ ).split(''' ''' ) ) ) return split_tokens def __snake_case ( self , UpperCAmelCase_ ): return self.encoder.get(UpperCAmelCase_ , self.encoder.get(self.unk_token ) ) def __snake_case ( self , UpperCAmelCase_ ): return self.decoder.get(UpperCAmelCase_ , self.unk_token ) def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = ''' '''.join(UpperCAmelCase_ ).replace('''@@ ''' , '''''' ).strip() return out_string def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ): if not os.path.isdir(UpperCAmelCase_ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase = os.path.join( UpperCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCAmelCase = os.path.join( UpperCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ): copyfile(self.vocab_file , UpperCAmelCase_ ) if os.path.abspath(self.merges_file ) != os.path.abspath(UpperCAmelCase_ ): copyfile(self.merges_file , UpperCAmelCase_ ) return out_vocab_file, out_merge_file def __snake_case ( self , UpperCAmelCase_ ): if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): try: with open(UpperCAmelCase_ , '''r''' , encoding='''utf-8''' ) as fd: self.add_from_file(UpperCAmelCase_ ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception(F"""Incorrect encoding detected in {f}, please rebuild the dataset""" ) return lowerCAmelCase = f.readlines() for lineTmp in lines: lowerCAmelCase = lineTmp.strip() lowerCAmelCase = line.rfind(''' ''' ) if idx == -1: raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt>\'''' ) lowerCAmelCase = line[:idx] lowerCAmelCase = len(self.encoder )
33
0
def UpperCAmelCase ( _snake_case ): return "".join([hex(_SCREAMING_SNAKE_CASE )[2:].zfill(2 ).upper() for byte in list(_SCREAMING_SNAKE_CASE )] ) def UpperCAmelCase ( _snake_case ): # Check data validity, following RFC3548 # https://www.ietf.org/rfc/rfc3548.txt if (len(_SCREAMING_SNAKE_CASE ) % 2) != 0: raise ValueError( '''Base16 encoded data is invalid: Data does not have an even number of hex digits.''' ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(_SCREAMING_SNAKE_CASE ) <= set('''0123456789ABCDEF''' ): raise ValueError( '''Base16 encoded data is invalid: Data is not uppercase hex or it contains invalid characters.''' ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(_SCREAMING_SNAKE_CASE ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
702
from __future__ import annotations from typing import Generic, TypeVar UpperCAmelCase_ =TypeVar("""T""") class __UpperCamelCase ( Generic[T] ): '''simple docstring''' def __init__( self , UpperCAmelCase_ ): lowerCAmelCase = data lowerCAmelCase = self lowerCAmelCase = 0 class __UpperCamelCase ( Generic[T] ): '''simple docstring''' def __init__( self ): # map from node name to the node object lowerCAmelCase = {} def __snake_case ( self , UpperCAmelCase_ ): # create a new set with x as its member lowerCAmelCase = DisjointSetTreeNode(UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_ ): # find the set x belongs to (with path-compression) lowerCAmelCase = self.map[data] if elem_ref != elem_ref.parent: lowerCAmelCase = self.find_set(elem_ref.parent.data ) return elem_ref.parent def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ): # helper function for union operation if nodea.rank > nodea.rank: lowerCAmelCase = nodea else: lowerCAmelCase = nodea if nodea.rank == nodea.rank: nodea.rank += 1 def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ): # merge 2 disjoint sets self.link(self.find_set(UpperCAmelCase_ ) , self.find_set(UpperCAmelCase_ ) ) class __UpperCamelCase ( Generic[T] ): '''simple docstring''' def __init__( self ): # connections: map from the node to the neighbouring nodes (with weights) lowerCAmelCase = {} def __snake_case ( self , UpperCAmelCase_ ): # add a node ONLY if its not present in the graph if node not in self.connections: lowerCAmelCase = {} def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): # add an edge with the given weight self.add_node(UpperCAmelCase_ ) self.add_node(UpperCAmelCase_ ) lowerCAmelCase = weight lowerCAmelCase = weight def __snake_case ( self ): lowerCAmelCase = [] lowerCAmelCase = set() for start in self.connections: for end in self.connections[start]: if (start, end) not in seen: seen.add((end, start) ) edges.append((start, end, self.connections[start][end]) ) edges.sort(key=lambda UpperCAmelCase_ : x[2] ) # creating the disjoint set lowerCAmelCase = DisjointSetTree[T]() for node in self.connections: disjoint_set.make_set(UpperCAmelCase_ ) # MST generation lowerCAmelCase = 0 lowerCAmelCase = 0 lowerCAmelCase = GraphUndirectedWeighted[T]() while num_edges < len(self.connections ) - 1: lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = edges[index] index += 1 lowerCAmelCase = disjoint_set.find_set(UpperCAmelCase_ ) lowerCAmelCase = disjoint_set.find_set(UpperCAmelCase_ ) if parent_u != parent_v: num_edges += 1 graph.add_edge(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) disjoint_set.union(UpperCAmelCase_ , UpperCAmelCase_ ) return graph
33
0
import json from typing import Iterator, List, Union from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers from tokenizers.implementations.base_tokenizer import BaseTokenizer from tokenizers.models import Unigram from tokenizers.processors import TemplateProcessing class __UpperCamelCase ( _SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self , UpperCAmelCase_ = "▁" , UpperCAmelCase_ = True , UpperCAmelCase_ = "<unk>" , UpperCAmelCase_ = "</s>" , UpperCAmelCase_ = "<pad>" , ): lowerCAmelCase = { '''pad''': {'''id''': 0, '''token''': pad_token}, '''eos''': {'''id''': 1, '''token''': eos_token}, '''unk''': {'''id''': 2, '''token''': unk_token}, } lowerCAmelCase = [None] * len(self.special_tokens ) for token_dict in self.special_tokens.values(): lowerCAmelCase = token_dict['''token'''] lowerCAmelCase = Tokenizer(Unigram() ) lowerCAmelCase = normalizers.Sequence( [ normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(''' {2,}''' ) , ''' ''' ), normalizers.Lowercase(), ] ) lowerCAmelCase = pre_tokenizers.Sequence( [ pre_tokenizers.Metaspace(replacement=A_ , add_prefix_space=A_ ), pre_tokenizers.Digits(individual_digits=A_ ), pre_tokenizers.Punctuation(), ] ) lowerCAmelCase = decoders.Metaspace(replacement=A_ , add_prefix_space=A_ ) lowerCAmelCase = TemplateProcessing( single=F"""$A {self.special_tokens["eos"]["token"]}""" , special_tokens=[(self.special_tokens['''eos''']['''token'''], self.special_tokens['''eos''']['''id'''])] , ) lowerCAmelCase = { '''model''': '''SentencePieceUnigram''', '''replacement''': replacement, '''add_prefix_space''': add_prefix_space, } super().__init__(A_ , A_ ) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = 80_00 , UpperCAmelCase_ = True , ): lowerCAmelCase = trainers.UnigramTrainer( vocab_size=A_ , special_tokens=self.special_tokens_list , show_progress=A_ , ) if isinstance(A_ , A_ ): lowerCAmelCase = [files] self._tokenizer.train(A_ , trainer=A_ ) self.add_unk_id() def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = 80_00 , UpperCAmelCase_ = True , ): lowerCAmelCase = trainers.UnigramTrainer( vocab_size=A_ , special_tokens=self.special_tokens_list , show_progress=A_ , ) self._tokenizer.train_from_iterator(A_ , trainer=A_ ) self.add_unk_id() def __snake_case ( self ): lowerCAmelCase = json.loads(self._tokenizer.to_str() ) lowerCAmelCase = self.special_tokens['''unk''']['''id'''] lowerCAmelCase = Tokenizer.from_str(json.dumps(A_ ) )
703
def UpperCAmelCase ( _snake_case , _snake_case , _snake_case ): def count_of_possible_combinations(_snake_case ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(_snake_case ) def UpperCAmelCase ( _snake_case , _snake_case , _snake_case ): def count_of_possible_combinations_with_dp_array( _snake_case , _snake_case ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] lowerCAmelCase = sum( count_of_possible_combinations_with_dp_array(target - item , _snake_case ) for item in array ) lowerCAmelCase = answer return answer lowerCAmelCase = [-1] * (target + 1) return count_of_possible_combinations_with_dp_array(_snake_case , _snake_case ) def UpperCAmelCase ( _snake_case , _snake_case , _snake_case ): lowerCAmelCase = [0] * (target + 1) lowerCAmelCase = 1 for i in range(1 , target + 1 ): for j in range(_snake_case ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase_ =3 UpperCAmelCase_ =5 UpperCAmelCase_ =[1, 2, 5] print(combination_sum_iv(n, array, target))
33
0
from typing import List, Optional, Union import numpy as np import PIL.Image from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, PILImageResampling, get_image_size, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging UpperCAmelCase_ =logging.get_logger(__name__) class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : List[Any] =["""pixel_values"""] def __init__( self , UpperCAmelCase_ = True , UpperCAmelCase_ = 32 , UpperCAmelCase_=PILImageResampling.BILINEAR , UpperCAmelCase_ = True , **UpperCAmelCase_ , ): lowerCAmelCase = do_resize lowerCAmelCase = do_rescale lowerCAmelCase = size_divisor lowerCAmelCase = resample super().__init__(**UpperCamelCase_ ) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = None , **UpperCAmelCase_ ): lowerCAmelCase = get_image_size(UpperCamelCase_ ) # Rounds the height and width down to the closest multiple of size_divisor lowerCAmelCase = height // size_divisor * size_divisor lowerCAmelCase = width // size_divisor * size_divisor lowerCAmelCase = resize(UpperCamelCase_ , (new_h, new_w) , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) return image def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = None , **UpperCAmelCase_ ): return rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_=None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = ChannelDimension.FIRST , **UpperCAmelCase_ , ): lowerCAmelCase = do_resize if do_resize is not None else self.do_resize lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale lowerCAmelCase = size_divisor if size_divisor is not None else self.size_divisor lowerCAmelCase = resample if resample is not None else self.resample if do_resize and size_divisor is None: raise ValueError('''size_divisor is required for resizing''' ) lowerCAmelCase = make_list_of_images(UpperCamelCase_ ) if not valid_images(UpperCamelCase_ ): raise ValueError('''Invalid image(s)''' ) # All transformations expect numpy arrays. lowerCAmelCase = [to_numpy_array(UpperCamelCase_ ) for img in images] if do_resize: lowerCAmelCase = [self.resize(UpperCamelCase_ , size_divisor=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images] if do_rescale: lowerCAmelCase = [self.rescale(UpperCamelCase_ , scale=1 / 2_55 ) for image in images] lowerCAmelCase = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images] lowerCAmelCase = {'pixel_values': images} return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
704
import torch from diffusers import StableDiffusionPipeline UpperCAmelCase_ ="""path-to-your-trained-model""" UpperCAmelCase_ =StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("""cuda""") UpperCAmelCase_ ="""A photo of sks dog in a bucket""" UpperCAmelCase_ =pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0] image.save("""dog-bucket.png""")
33
0
'''simple docstring''' import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, PerceiverTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): UpperCAmelCase_ ="pt" elif is_tf_available(): UpperCAmelCase_ ="tf" else: UpperCAmelCase_ ="jax" class __UpperCamelCase ( lowercase__ , unittest.TestCase ): '''simple docstring''' __a : Optional[Any] =PerceiverTokenizer __a : Union[str, Any] =False def __snake_case ( self ): super().setUp() lowerCAmelCase = PerceiverTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __snake_case ( self ): return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' ) def __snake_case ( self , **UpperCAmelCase_ ): return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase ) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_=False , UpperCAmelCase_=20 , UpperCAmelCase_=5 ): lowerCAmelCase = [] for i in range(len(__lowerCamelCase ) ): try: lowerCAmelCase = tokenizer.decode([i] , clean_up_tokenization_spaces=__lowerCamelCase ) except UnicodeDecodeError: pass toks.append((i, tok) ) lowerCAmelCase = list(filter(lambda UpperCAmelCase_ : re.match(r'''^[ a-zA-Z]+$''' , t[1] ) , __lowerCamelCase ) ) lowerCAmelCase = list(filter(lambda UpperCAmelCase_ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__lowerCamelCase ) , __lowerCamelCase ) ) if max_length is not None and len(__lowerCamelCase ) > max_length: lowerCAmelCase = toks[:max_length] if min_length is not None and len(__lowerCamelCase ) < min_length and len(__lowerCamelCase ) > 0: while len(__lowerCamelCase ) < min_length: lowerCAmelCase = toks + toks # toks_str = [t[1] for t in toks] lowerCAmelCase = [t[0] for t in toks] # Ensure consistency lowerCAmelCase = tokenizer.decode(__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase ) if " " not in output_txt and len(__lowerCamelCase ) > 1: lowerCAmelCase = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__lowerCamelCase ) + " " + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__lowerCamelCase ) ) if with_prefix_space: lowerCAmelCase = " " + output_txt lowerCAmelCase = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) return output_txt, output_ids def __snake_case ( self ): lowerCAmelCase = self.perceiver_tokenizer lowerCAmelCase = "Unicode €." lowerCAmelCase = tokenizer(__lowerCamelCase ) lowerCAmelCase = [4, 91, 1_16, 1_11, 1_05, 1_17, 1_06, 1_07, 38, 2_32, 1_36, 1_78, 52, 5] self.assertEqual(encoded['''input_ids'''] , __lowerCamelCase ) # decoding lowerCAmelCase = tokenizer.decode(__lowerCamelCase ) self.assertEqual(__lowerCamelCase , '''[CLS]Unicode €.[SEP]''' ) lowerCAmelCase = tokenizer('''e è é ê ë''' ) lowerCAmelCase = [4, 1_07, 38, 2_01, 1_74, 38, 2_01, 1_75, 38, 2_01, 1_76, 38, 2_01, 1_77, 5] self.assertEqual(encoded['''input_ids'''] , __lowerCamelCase ) # decoding lowerCAmelCase = tokenizer.decode(__lowerCamelCase ) self.assertEqual(__lowerCamelCase , '''[CLS]e è é ê ë[SEP]''' ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' ) def __snake_case ( self ): lowerCAmelCase = self.perceiver_tokenizer lowerCAmelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."] # fmt: off lowerCAmelCase = [4, 71, 38, 1_14, 1_17, 1_16, 1_09, 38, 1_18, 1_03, 1_20, 1_03, 1_09, 1_20, 1_03, 1_18, 1_10, 38, 1_08, 1_17, 1_20, 38, 1_21, 1_23, 1_15, 1_15, 1_03, 1_20, 1_11, 1_28, 1_03, 1_22, 1_11, 1_17, 1_16, 52, 5, 0] # fmt: on lowerCAmelCase = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) if FRAMEWORK != "jax": lowerCAmelCase = list(batch.input_ids.numpy()[0] ) else: lowerCAmelCase = list(batch.input_ids.tolist()[0] ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) self.assertEqual((2, 38) , batch.input_ids.shape ) self.assertEqual((2, 38) , batch.attention_mask.shape ) def __snake_case ( self ): lowerCAmelCase = self.perceiver_tokenizer lowerCAmelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."] lowerCAmelCase = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase ) # check if input_ids are returned and no decoder_input_ids self.assertIn('''input_ids''' , __lowerCamelCase ) self.assertIn('''attention_mask''' , __lowerCamelCase ) self.assertNotIn('''decoder_input_ids''' , __lowerCamelCase ) self.assertNotIn('''decoder_attention_mask''' , __lowerCamelCase ) def __snake_case ( self ): lowerCAmelCase = self.perceiver_tokenizer lowerCAmelCase = [ "Summary of the text.", "Another summary.", ] lowerCAmelCase = tokenizer( text_target=__lowerCamelCase , max_length=32 , padding='''max_length''' , truncation=__lowerCamelCase , return_tensors=__lowerCamelCase ) self.assertEqual(32 , targets['''input_ids'''].shape[1] ) def __snake_case ( self ): lowerCAmelCase = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test lowerCAmelCase = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): # Isolate this from the other tests because we save additional tokens/etc lowerCAmelCase = tempfile.mkdtemp() lowerCAmelCase = " He is very happy, UNwant\u00E9d,running" lowerCAmelCase = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) tokenizer.save_pretrained(__lowerCamelCase ) lowerCAmelCase = tokenizer.__class__.from_pretrained(__lowerCamelCase ) lowerCAmelCase = after_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) shutil.rmtree(__lowerCamelCase ) lowerCAmelCase = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): # Isolate this from the other tests because we save additional tokens/etc lowerCAmelCase = tempfile.mkdtemp() lowerCAmelCase = " He is very happy, UNwant\u00E9d,running" tokenizer.add_tokens(['''bim''', '''bambam'''] ) lowerCAmelCase = tokenizer.additional_special_tokens additional_special_tokens.append('''new_additional_special_token''' ) tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} ) lowerCAmelCase = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) tokenizer.save_pretrained(__lowerCamelCase ) lowerCAmelCase = tokenizer.__class__.from_pretrained(__lowerCamelCase ) lowerCAmelCase = after_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) lowerCAmelCase = tokenizer.__class__.from_pretrained(__lowerCamelCase , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(__lowerCamelCase ) def __snake_case ( self ): lowerCAmelCase = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(__lowerCamelCase ) with open(os.path.join(__lowerCamelCase , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file: lowerCAmelCase = json.load(__lowerCamelCase ) with open(os.path.join(__lowerCamelCase , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file: lowerCAmelCase = json.load(__lowerCamelCase ) lowerCAmelCase = [F"""<extra_id_{i}>""" for i in range(1_25 )] lowerCAmelCase = added_tokens_extra_ids + [ "an_additional_special_token" ] lowerCAmelCase = added_tokens_extra_ids + [ "an_additional_special_token" ] with open(os.path.join(__lowerCamelCase , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile: json.dump(__lowerCamelCase , __lowerCamelCase ) with open(os.path.join(__lowerCamelCase , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile: json.dump(__lowerCamelCase , __lowerCamelCase ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files lowerCAmelCase = tokenizer_class.from_pretrained( __lowerCamelCase , ) self.assertIn( '''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens ) self.assertEqual( ['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained lowerCAmelCase = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=__lowerCamelCase )] lowerCAmelCase = tokenizer_class.from_pretrained( __lowerCamelCase , additional_special_tokens=__lowerCamelCase , ) self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens ) self.assertEqual( ['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , ) def __snake_case ( self ): lowerCAmelCase = self.perceiver_tokenizer self.assertEqual(tokenizer.decode([1_78] ) , '''�''' ) def __snake_case ( self ): pass def __snake_case ( self ): pass def __snake_case ( self ): pass def __snake_case ( self ): pass def __snake_case ( self ): lowerCAmelCase = self.get_tokenizers(fast=__lowerCamelCase , do_lower_case=__lowerCamelCase ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): lowerCAmelCase = ["[CLS]", "t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "s", "t", "[SEP]"] lowerCAmelCase = tokenizer.convert_tokens_to_string(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
705
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase_ ={ """configuration_jukebox""": [ """JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """JukeboxConfig""", """JukeboxPriorConfig""", """JukeboxVQVAEConfig""", ], """tokenization_jukebox""": ["""JukeboxTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ =[ """JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST""", """JukeboxModel""", """JukeboxPreTrainedModel""", """JukeboxVQVAE""", """JukeboxPrior""", ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys UpperCAmelCase_ =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
33
0
import os try: from .build_directory_md import good_file_paths except ImportError: from build_directory_md import good_file_paths # type: ignore UpperCAmelCase_ =list(good_file_paths()) assert filepaths, "good_file_paths() failed!" UpperCAmelCase_ =[file for file in filepaths if file != file.lower()] if upper_files: print(F'''{len(upper_files)} files contain uppercase characters:''') print("""\n""".join(upper_files) + """\n""") UpperCAmelCase_ =[file for file in filepaths if """ """ in file] if space_files: print(F'''{len(space_files)} files contain space characters:''') print("""\n""".join(space_files) + """\n""") UpperCAmelCase_ =[file for file in filepaths if """-""" in file] if hyphen_files: print(F'''{len(hyphen_files)} files contain hyphen characters:''') print("""\n""".join(hyphen_files) + """\n""") UpperCAmelCase_ =[file for file in filepaths if os.sep not in file] if nodir_files: print(F'''{len(nodir_files)} files are not in a directory:''') print("""\n""".join(nodir_files) + """\n""") UpperCAmelCase_ =len(upper_files + space_files + hyphen_files + nodir_files) if bad_files: import sys sys.exit(bad_files)
706
import json import os import pickle import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers import is_faiss_available from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bart.tokenization_bart import BartTokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch if is_faiss_available(): import faiss @require_faiss class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' def __snake_case ( self ): lowerCAmelCase = tempfile.mkdtemp() lowerCAmelCase = 8 # DPR tok lowerCAmelCase = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] lowerCAmelCase = os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ ) lowerCAmelCase = os.path.join(UpperCAmelCase_ , DPR_VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) # BART tok lowerCAmelCase = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] lowerCAmelCase = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) ) lowerCAmelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] lowerCAmelCase = {'''unk_token''': '''<unk>'''} lowerCAmelCase = os.path.join(self.tmpdirname , '''bart_tokenizer''' ) os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ ) lowerCAmelCase = os.path.join(UpperCAmelCase_ , BART_VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCAmelCase = os.path.join(UpperCAmelCase_ , BART_VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(UpperCAmelCase_ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(UpperCAmelCase_ ) ) def __snake_case ( self ): return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) ) def __snake_case ( self ): return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) ) def __snake_case ( self ): return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) ) def __snake_case ( self ): shutil.rmtree(self.tmpdirname ) def __snake_case ( self ): lowerCAmelCase = Dataset.from_dict( { '''id''': ['''0''', '''1'''], '''text''': ['''foo''', '''bar'''], '''title''': ['''Foo''', '''Bar'''], '''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )], } ) dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT ) return dataset def __snake_case ( self ): lowerCAmelCase = self.get_dummy_dataset() lowerCAmelCase = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , ) with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset: lowerCAmelCase = dataset lowerCAmelCase = RagRetriever( UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) return retriever def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = self.get_dummy_dataset() lowerCAmelCase = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , ) if from_disk: lowerCAmelCase = os.path.join(self.tmpdirname , '''dataset''' ) lowerCAmelCase = os.path.join(self.tmpdirname , '''index.faiss''' ) dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) ) dataset.drop_index('''embeddings''' ) dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) ) del dataset lowerCAmelCase = RagRetriever( UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) else: lowerCAmelCase = RagRetriever( UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , UpperCAmelCase_ ) , ) return retriever def __snake_case ( self ): lowerCAmelCase = Dataset.from_dict( { '''id''': ['''0''', '''1'''], '''text''': ['''foo''', '''bar'''], '''title''': ['''Foo''', '''Bar'''], '''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )], } ) dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT ) lowerCAmelCase = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' ) dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' ) pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) ) lowerCAmelCase = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' ) lowerCAmelCase = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset} pickle.dump(UpperCAmelCase_ , open(UpperCAmelCase_ , '''wb''' ) ) lowerCAmelCase = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , ) lowerCAmelCase = RagRetriever( UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() ) return retriever def __snake_case ( self ): lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_canonical_hf_index_retriever() lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(UpperCAmelCase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , UpperCAmelCase_ ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def __snake_case ( self ): lowerCAmelCase = self.get_dummy_canonical_hf_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset: lowerCAmelCase = self.get_dummy_dataset() retriever.save_pretrained(UpperCAmelCase_ ) lowerCAmelCase = RagRetriever.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=1 ) self.assertTrue(out is not None ) def __snake_case ( self ): lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(UpperCAmelCase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , UpperCAmelCase_ ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def __snake_case ( self ): lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(UpperCAmelCase_ ) lowerCAmelCase = RagRetriever.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=1 ) self.assertTrue(out is not None ) def __snake_case ( self ): lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(UpperCAmelCase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , UpperCAmelCase_ ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def __snake_case ( self ): lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(UpperCAmelCase_ ) lowerCAmelCase = RagRetriever.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=1 ) self.assertTrue(out is not None ) def __snake_case ( self ): lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_legacy_index_retriever() lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(UpperCAmelCase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''text'''] ) , UpperCAmelCase_ ) self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def __snake_case ( self ): lowerCAmelCase = self.get_dummy_legacy_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(UpperCAmelCase_ ) lowerCAmelCase = RagRetriever.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=1 ) self.assertTrue(out is not None ) @require_torch @require_tokenizers @require_sentencepiece def __snake_case ( self ): import torch lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_canonical_hf_index_retriever() lowerCAmelCase = [[5, 7], [10, 11]] lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever(UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_ ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = ( out['''context_input_ids'''], out['''context_attention_mask'''], out['''retrieved_doc_embeds'''], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , np.ndarray ) lowerCAmelCase = retriever( UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_ , return_tensors='''pt''' , ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = ( # noqa: F841 out['''context_input_ids'''], out['''context_attention_mask'''], out['''retrieved_doc_embeds'''], out['''doc_ids'''], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(UpperCAmelCase_ , torch.Tensor ) self.assertIsInstance(UpperCAmelCase_ , torch.Tensor ) self.assertIsInstance(UpperCAmelCase_ , torch.Tensor ) @require_torch @require_tokenizers @require_sentencepiece def __snake_case ( self ): lowerCAmelCase = self.get_dpr_ctx_encoder_tokenizer() lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ ) retriever.set_ctx_encoder_tokenizer(UpperCAmelCase_ ) lowerCAmelCase = [[5, 7], [10, 11]] lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever(UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_ ) self.assertEqual( len(UpperCAmelCase_ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs self.assertEqual( all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , UpperCAmelCase_ ) # check for doc token related keys in dictionary.
33
0
import itertools import os import re UpperCAmelCase_ =re.compile(R"""([A-Z]+)([A-Z][a-z])""") UpperCAmelCase_ =re.compile(R"""([a-z\d])([A-Z])""") UpperCAmelCase_ =re.compile(R"""(?<!_)_(?!_)""") UpperCAmelCase_ =re.compile(R"""(_{2,})""") UpperCAmelCase_ =r"^\w+(\.\w+)*$" UpperCAmelCase_ =r"<>:/\|?*" def UpperCAmelCase ( _snake_case ): lowerCAmelCase = _uppercase_uppercase_re.sub(R'''\1_\2''' , _snake_case ) lowerCAmelCase = _lowercase_uppercase_re.sub(R'''\1_\2''' , _snake_case ) return name.lower() def UpperCAmelCase ( _snake_case ): lowerCAmelCase = _single_underscore_re.split(_snake_case ) lowerCAmelCase = [_multiple_underscores_re.split(_snake_case ) for n in name] return "".join(n.capitalize() for n in itertools.chain.from_iterable(_snake_case ) if n != '''''' ) def UpperCAmelCase ( _snake_case ): if os.path.basename(_snake_case ) != name: raise ValueError(F"""Should be a dataset name, not a path: {name}""" ) return camelcase_to_snakecase(_snake_case ) def UpperCAmelCase ( _snake_case , _snake_case ): if os.path.basename(_snake_case ) != name: raise ValueError(F"""Should be a dataset name, not a path: {name}""" ) if not re.match(_split_re , _snake_case ): raise ValueError(F"""Split name should match '{_split_re}'' but got '{split}'.""" ) return F"""{filename_prefix_for_name(_snake_case )}-{split}""" def UpperCAmelCase ( _snake_case , _snake_case , _snake_case , _snake_case=None ): lowerCAmelCase = filename_prefix_for_split(_snake_case , _snake_case ) if filetype_suffix: prefix += F""".{filetype_suffix}""" lowerCAmelCase = os.path.join(_snake_case , _snake_case ) return F"""{filepath}*""" def UpperCAmelCase ( _snake_case , _snake_case , _snake_case , _snake_case=None , _snake_case=None ): lowerCAmelCase = filename_prefix_for_split(_snake_case , _snake_case ) lowerCAmelCase = os.path.join(_snake_case , _snake_case ) if shard_lengths: lowerCAmelCase = len(_snake_case ) lowerCAmelCase = [F"""{prefix}-{shard_id:05d}-of-{num_shards:05d}""" for shard_id in range(_snake_case )] if filetype_suffix: lowerCAmelCase = [filename + F""".{filetype_suffix}""" for filename in filenames] return filenames else: lowerCAmelCase = prefix if filetype_suffix: filename += F""".{filetype_suffix}""" return [filename]
707
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ =logging.get_logger(__name__) UpperCAmelCase_ ={ """google/switch-base-8""": """https://huggingface.co/google/switch-base-8/blob/main/config.json""", } class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : Any ="""switch_transformers""" __a : Union[str, Any] =["""past_key_values"""] __a : Dict ={"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""} def __init__( self , UpperCAmelCase_=3_21_28 , UpperCAmelCase_=7_68 , UpperCAmelCase_=64 , UpperCAmelCase_=20_48 , UpperCAmelCase_=64 , UpperCAmelCase_=12 , UpperCAmelCase_=3 , UpperCAmelCase_=12 , UpperCAmelCase_=3 , UpperCAmelCase_=12 , UpperCAmelCase_=8 , UpperCAmelCase_=False , UpperCAmelCase_=0.01 , UpperCAmelCase_="float32" , UpperCAmelCase_=False , UpperCAmelCase_=32 , UpperCAmelCase_=1_28 , UpperCAmelCase_=0.1 , UpperCAmelCase_=1E-6 , UpperCAmelCase_=0.001 , UpperCAmelCase_=0.001 , UpperCAmelCase_=1.0 , UpperCAmelCase_="relu" , UpperCAmelCase_=True , UpperCAmelCase_=False , UpperCAmelCase_=True , UpperCAmelCase_=0 , UpperCAmelCase_=1 , **UpperCAmelCase_ , ): lowerCAmelCase = vocab_size lowerCAmelCase = d_model lowerCAmelCase = d_kv lowerCAmelCase = d_ff lowerCAmelCase = num_sparse_encoder_layers lowerCAmelCase = num_layers lowerCAmelCase = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry lowerCAmelCase = num_sparse_decoder_layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_encoder_layers > 0: lowerCAmelCase = self.num_layers // self.num_sparse_encoder_layers else: lowerCAmelCase = self.num_layers # HACK: this will create 0 sparse layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_decoder_layers > 0: lowerCAmelCase = self.num_decoder_layers // self.num_sparse_decoder_layers else: lowerCAmelCase = self.num_decoder_layers # HACK: this will create 0 sparse layers lowerCAmelCase = num_heads lowerCAmelCase = num_experts lowerCAmelCase = expert_capacity lowerCAmelCase = router_bias lowerCAmelCase = router_jitter_noise if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(F"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" ) lowerCAmelCase = router_dtype lowerCAmelCase = router_ignore_padding_tokens lowerCAmelCase = relative_attention_num_buckets lowerCAmelCase = relative_attention_max_distance lowerCAmelCase = dropout_rate lowerCAmelCase = layer_norm_epsilon lowerCAmelCase = initializer_factor lowerCAmelCase = feed_forward_proj lowerCAmelCase = use_cache lowerCAmelCase = add_router_probs lowerCAmelCase = router_z_loss_coef lowerCAmelCase = router_aux_loss_coef lowerCAmelCase = self.feed_forward_proj.split('''-''' ) lowerCAmelCase = act_info[-1] lowerCAmelCase = act_info[0] == '''gated''' if len(UpperCAmelCase_ ) > 1 and act_info[0] != "gated" or len(UpperCAmelCase_ ) > 2: raise ValueError( F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.""" '''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ''' '''\'gated-gelu\' or \'relu\'''' ) # for backwards compatibility if feed_forward_proj == "gated-gelu": lowerCAmelCase = '''gelu_new''' super().__init__( pad_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , **UpperCAmelCase_ , )
33
0
import os from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen, xsplitext from ..table import array_cast from ..utils.py_utils import no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: from .features import FeatureType UpperCAmelCase_,UpperCAmelCase_,UpperCAmelCase_ =False, False, False @dataclass class __UpperCamelCase : '''simple docstring''' __a : Dict =None __a : str =True __a : List[Any] =True __a : Optional[int] =None # Automatically constructed __a : Optional[Any] ="""dict""" __a : Union[str, Any] =pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} ) __a : int =field(default="""Audio""" , init=__A , repr=__A ) def __call__( self ): return self.pa_type def __snake_case ( self , UpperCAmelCase_ ): try: import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files. except ImportError as err: raise ImportError('''To support encoding audio data, please install \'soundfile\'.''' ) from err if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): return {"bytes": None, "path": value} elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): return {"bytes": value, "path": None} elif "array" in value: # convert the audio array to wav bytes lowerCAmelCase = BytesIO() sf.write(UpperCAmelCase_ , value['''array'''] , value['''sampling_rate'''] , format='''wav''' ) return {"bytes": buffer.getvalue(), "path": None} elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ): # we set "bytes": None to not duplicate the data if they're already available locally if value["path"].endswith('''pcm''' ): # "PCM" only has raw audio bytes if value.get('''sampling_rate''' ) is None: # At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate raise KeyError('''To use PCM files, please specify a \'sampling_rate\' in Audio object''' ) if value.get('''bytes''' ): # If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!) lowerCAmelCase = np.frombuffer(value['''bytes'''] , dtype=np.intaa ).astype(np.floataa ) / 3_27_67 else: lowerCAmelCase = np.memmap(value['''path'''] , dtype='''h''' , mode='''r''' ).astype(np.floataa ) / 3_27_67 lowerCAmelCase = BytesIO(bytes() ) sf.write(UpperCAmelCase_ , UpperCAmelCase_ , value['''sampling_rate'''] , format='''wav''' ) return {"bytes": buffer.getvalue(), "path": None} else: return {"bytes": None, "path": value.get('''path''' )} elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None: # store the audio bytes, and path is used to infer the audio format using the file extension return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )} else: raise ValueError( F"""An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.""" ) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ): if not self.decode: raise RuntimeError('''Decoding is disabled for this feature. Please use Audio(decode=True) instead.''' ) lowerCAmelCase = (value['''path'''], BytesIO(value['''bytes'''] )) if value['''bytes'''] is not None else (value['''path'''], None) if path is None and file is None: raise ValueError(F"""An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.""" ) try: import librosa import soundfile as sf except ImportError as err: raise ImportError('''To support decoding audio files, please install \'librosa\' and \'soundfile\'.''' ) from err lowerCAmelCase = xsplitext(UpperCAmelCase_ )[1][1:].lower() if path is not None else None if not config.IS_OPUS_SUPPORTED and audio_format == "opus": raise RuntimeError( '''Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, ''' '''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' ) elif not config.IS_MP3_SUPPORTED and audio_format == "mp3": raise RuntimeError( '''Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, ''' '''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' ) if file is None: lowerCAmelCase = token_per_repo_id or {} lowerCAmelCase = path.split('''::''' )[-1] try: lowerCAmelCase = string_to_dict(UpperCAmelCase_ , config.HUB_DATASETS_URL )['''repo_id'''] lowerCAmelCase = token_per_repo_id[repo_id] except (ValueError, KeyError): lowerCAmelCase = None with xopen(UpperCAmelCase_ , '''rb''' , use_auth_token=UpperCAmelCase_ ) as f: lowerCAmelCase = sf.read(UpperCAmelCase_ ) else: lowerCAmelCase = sf.read(UpperCAmelCase_ ) lowerCAmelCase = array.T if self.mono: lowerCAmelCase = librosa.to_mono(UpperCAmelCase_ ) if self.sampling_rate and self.sampling_rate != sampling_rate: lowerCAmelCase = librosa.resample(UpperCAmelCase_ , orig_sr=UpperCAmelCase_ , target_sr=self.sampling_rate ) lowerCAmelCase = self.sampling_rate return {"path": path, "array": array, "sampling_rate": sampling_rate} def __snake_case ( self ): from .features import Value if self.decode: raise ValueError('''Cannot flatten a decoded Audio feature.''' ) return { "bytes": Value('''binary''' ), "path": Value('''string''' ), } def __snake_case ( self , UpperCAmelCase_ ): if pa.types.is_string(storage.type ): lowerCAmelCase = pa.array([None] * len(UpperCAmelCase_ ) , type=pa.binary() ) lowerCAmelCase = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): lowerCAmelCase = pa.array([None] * len(UpperCAmelCase_ ) , type=pa.string() ) lowerCAmelCase = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() ) elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('''array''' ): lowerCAmelCase = pa.array([Audio().encode_example(UpperCAmelCase_ ) if x is not None else None for x in storage.to_pylist()] ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index('''bytes''' ) >= 0: lowerCAmelCase = storage.field('''bytes''' ) else: lowerCAmelCase = pa.array([None] * len(UpperCAmelCase_ ) , type=pa.binary() ) if storage.type.get_field_index('''path''' ) >= 0: lowerCAmelCase = storage.field('''path''' ) else: lowerCAmelCase = pa.array([None] * len(UpperCAmelCase_ ) , type=pa.string() ) lowerCAmelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() ) return array_cast(UpperCAmelCase_ , self.pa_type ) def __snake_case ( self , UpperCAmelCase_ ): @no_op_if_value_is_null def path_to_bytes(UpperCAmelCase_ ): with xopen(UpperCAmelCase_ , '''rb''' ) as f: lowerCAmelCase = f.read() return bytes_ lowerCAmelCase = pa.array( [ (path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None for x in storage.to_pylist() ] , type=pa.binary() , ) lowerCAmelCase = pa.array( [os.path.basename(UpperCAmelCase_ ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , ) lowerCAmelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() ) return array_cast(UpperCAmelCase_ , self.pa_type )
708
from __future__ import annotations import math from collections import Counter from string import ascii_lowercase def UpperCAmelCase ( _snake_case ): lowerCAmelCase , lowerCAmelCase = analyze_text(_snake_case ) lowerCAmelCase = list(''' ''' + ascii_lowercase ) # what is our total sum of probabilities. lowerCAmelCase = sum(single_char_strings.values() ) # one length string lowerCAmelCase = 0 # for each alpha we go in our dict and if it is in it we calculate entropy for ch in my_alphas: if ch in single_char_strings: lowerCAmelCase = single_char_strings[ch] lowerCAmelCase = my_str / all_sum my_fir_sum += prob * math.loga(_snake_case ) # entropy formula. # print entropy print(F"""{round(-1 * my_fir_sum ):.1f}""" ) # two len string lowerCAmelCase = sum(two_char_strings.values() ) lowerCAmelCase = 0 # for each alpha (two in size) calculate entropy. for cha in my_alphas: for cha in my_alphas: lowerCAmelCase = cha + cha if sequence in two_char_strings: lowerCAmelCase = two_char_strings[sequence] lowerCAmelCase = int(_snake_case ) / all_sum my_sec_sum += prob * math.loga(_snake_case ) # print second entropy print(F"""{round(-1 * my_sec_sum ):.1f}""" ) # print the difference between them print(F"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" ) def UpperCAmelCase ( _snake_case ): lowerCAmelCase = Counter() # type: ignore lowerCAmelCase = Counter() # type: ignore single_char_strings[text[-1]] += 1 # first case when we have space at start. two_char_strings[" " + text[0]] += 1 for i in range(0 , len(_snake_case ) - 1 ): single_char_strings[text[i]] += 1 two_char_strings[text[i : i + 2]] += 1 return single_char_strings, two_char_strings def UpperCAmelCase ( ): import doctest doctest.testmod() # text = ( # "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark " # "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest " # "jointure saw horrible. He private he on be imagine suppose. Fertile " # "beloved evident through no service elderly is. Blind there if every no so " # "at. Own neglected you preferred way sincerity delivered his attempted. To " # "of message cottage windows do besides against uncivil. Delightful " # "unreserved impossible few estimating men favourable see entreaties. She " # "propriety immediate was improving. He or entrance humoured likewise " # "moderate. Much nor game son say feel. Fat make met can must form into " # "gate. Me we offending prevailed discovery. " # ) # calculate_prob(text) if __name__ == "__main__": main()
33
0
UpperCAmelCase_ =[ 'DownloadConfig', 'DownloadManager', 'DownloadMode', 'StreamingDownloadManager', ] from .download_config import DownloadConfig from .download_manager import DownloadManager, DownloadMode from .streaming_download_manager import StreamingDownloadManager
709
import random import unittest import torch from diffusers import IFInpaintingSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class __UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): '''simple docstring''' __a : Tuple =IFInpaintingSuperResolutionPipeline __a : Dict =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""} __a : int =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""} ) __a : Union[str, Any] =PipelineTesterMixin.required_optional_params - {"""latents"""} def __snake_case ( self ): return self._get_superresolution_dummy_components() def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_=0 ): if str(UpperCAmelCase_ ).startswith('''mps''' ): lowerCAmelCase = torch.manual_seed(UpperCAmelCase_ ) else: lowerCAmelCase = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ ) lowerCAmelCase = floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ ) lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ ) lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ ) lowerCAmelCase = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''original_image''': original_image, '''mask_image''': mask_image, '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def __snake_case ( self ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def __snake_case ( self ): self._test_save_load_optional_components() @unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' ) def __snake_case ( self ): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def __snake_case ( self ): self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def __snake_case ( self ): self._test_save_load_local() def __snake_case ( self ): self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
33
0
def UpperCAmelCase ( ): return [ a * b * (1000 - a - b) for a in range(1 , 999 ) for b in range(lowerCamelCase__ , 999 ) if (a * a + b * b == (1000 - a - b) ** 2) ][0] if __name__ == "__main__": print(F'''{solution() = }''')
710
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) UpperCAmelCase_ ={ """configuration_efficientformer""": [ """EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """EfficientFormerConfig""", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ =["""EfficientFormerImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ =[ """EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """EfficientFormerForImageClassification""", """EfficientFormerForImageClassificationWithTeacher""", """EfficientFormerModel""", """EfficientFormerPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ =[ """TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFEfficientFormerForImageClassification""", """TFEfficientFormerForImageClassificationWithTeacher""", """TFEfficientFormerModel""", """TFEfficientFormerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientformer import EfficientFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientformer import ( EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientFormerForImageClassification, EfficientFormerForImageClassificationWithTeacher, EfficientFormerModel, EfficientFormerPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, TFEfficientFormerPreTrainedModel, ) else: import sys UpperCAmelCase_ =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
33
0
'''simple docstring''' from typing import Any class __UpperCamelCase : '''simple docstring''' def __init__( self , UpperCAmelCase_ ): lowerCAmelCase = data lowerCAmelCase = None def __repr__( self ): return F"""Node({self.data})""" class __UpperCamelCase : '''simple docstring''' def __init__( self ): lowerCAmelCase = None def __iter__( self ): lowerCAmelCase = self.head while node: yield node.data lowerCAmelCase = node.next def __len__( self ): return sum(1 for _ in self ) def __repr__( self ): return "->".join([str(__A ) for item in self] ) def __getitem__( self , UpperCAmelCase_ ): if not 0 <= index < len(self ): raise ValueError('''list index out of range.''' ) for i, node in enumerate(self ): if i == index: return node return None def __setitem__( self , UpperCAmelCase_ , UpperCAmelCase_ ): if not 0 <= index < len(self ): raise ValueError('''list index out of range.''' ) lowerCAmelCase = self.head for _ in range(__A ): lowerCAmelCase = current.next lowerCAmelCase = data def __snake_case ( self , UpperCAmelCase_ ): self.insert_nth(len(self ) , __A ) def __snake_case ( self , UpperCAmelCase_ ): self.insert_nth(0 , __A ) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ): if not 0 <= index <= len(self ): raise IndexError('''list index out of range''' ) lowerCAmelCase = Node(__A ) if self.head is None: lowerCAmelCase = new_node elif index == 0: lowerCAmelCase = self.head # link new_node to head lowerCAmelCase = new_node else: lowerCAmelCase = self.head for _ in range(index - 1 ): lowerCAmelCase = temp.next lowerCAmelCase = temp.next lowerCAmelCase = new_node def __snake_case ( self ): # print every node data print(self ) def __snake_case ( self ): return self.delete_nth(0 ) def __snake_case ( self ): # delete from tail return self.delete_nth(len(self ) - 1 ) def __snake_case ( self , UpperCAmelCase_ = 0 ): if not 0 <= index <= len(self ) - 1: # test if index is valid raise IndexError('''List index out of range.''' ) lowerCAmelCase = self.head # default first node if index == 0: lowerCAmelCase = self.head.next else: lowerCAmelCase = self.head for _ in range(index - 1 ): lowerCAmelCase = temp.next lowerCAmelCase = temp.next lowerCAmelCase = temp.next.next return delete_node.data def __snake_case ( self ): return self.head is None def __snake_case ( self ): lowerCAmelCase = None lowerCAmelCase = self.head while current: # Store the current node's next node. lowerCAmelCase = current.next # Make the current node's next point backwards lowerCAmelCase = prev # Make the previous node be the current node lowerCAmelCase = current # Make the current node the next node (to progress iteration) lowerCAmelCase = next_node # Return prev in order to put the head at the end lowerCAmelCase = prev def UpperCAmelCase ( ): lowerCAmelCase = LinkedList() assert linked_list.is_empty() is True assert str(_lowerCAmelCase ) == "" try: linked_list.delete_head() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. for i in range(10 ): assert len(_lowerCAmelCase ) == i linked_list.insert_nth(_lowerCAmelCase , i + 1 ) assert str(_lowerCAmelCase ) == "->".join(str(_lowerCAmelCase ) for i in range(1 , 11 ) ) linked_list.insert_head(0 ) linked_list.insert_tail(11 ) assert str(_lowerCAmelCase ) == "->".join(str(_lowerCAmelCase ) for i in range(0 , 12 ) ) assert linked_list.delete_head() == 0 assert linked_list.delete_nth(9 ) == 10 assert linked_list.delete_tail() == 11 assert len(_lowerCAmelCase ) == 9 assert str(_lowerCAmelCase ) == "->".join(str(_lowerCAmelCase ) for i in range(1 , 10 ) ) assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True for i in range(0 , 9 ): lowerCAmelCase = -i assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True linked_list.reverse() assert str(_lowerCAmelCase ) == "->".join(str(_lowerCAmelCase ) for i in range(-8 , 1 ) ) def UpperCAmelCase ( ): lowerCAmelCase = [ -9, 100, Node(77345112 ), "dlrow olleH", 7, 5555, 0, -192.55555, "Hello, world!", 77.9, Node(10 ), None, None, 12.20, ] lowerCAmelCase = LinkedList() for i in test_input: linked_list.insert_tail(_lowerCAmelCase ) # Check if it's empty or not assert linked_list.is_empty() is False assert ( str(_lowerCAmelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->" "-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the head lowerCAmelCase = linked_list.delete_head() assert result == -9 assert ( str(_lowerCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the tail lowerCAmelCase = linked_list.delete_tail() assert result == 12.2 assert ( str(_lowerCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None" ) # Delete a node in specific location in linked list lowerCAmelCase = linked_list.delete_nth(10 ) assert result is None assert ( str(_lowerCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None" ) # Add a Node instance to its head linked_list.insert_head(Node('''Hello again, world!''' ) ) assert ( str(_lowerCAmelCase ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None" ) # Add None to its tail linked_list.insert_tail(_lowerCAmelCase ) assert ( str(_lowerCAmelCase ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None" ) # Reverse the linked list linked_list.reverse() assert ( str(_lowerCAmelCase ) == "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->" "7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)" ) def UpperCAmelCase ( ): from doctest import testmod testmod() lowerCAmelCase = LinkedList() linked_list.insert_head(input('''Inserting 1st at head ''' ).strip() ) linked_list.insert_head(input('''Inserting 2nd at head ''' ).strip() ) print('''\nPrint list:''' ) linked_list.print_list() linked_list.insert_tail(input('''\nInserting 1st at tail ''' ).strip() ) linked_list.insert_tail(input('''Inserting 2nd at tail ''' ).strip() ) print('''\nPrint list:''' ) linked_list.print_list() print('''\nDelete head''' ) linked_list.delete_head() print('''Delete tail''' ) linked_list.delete_tail() print('''\nPrint list:''' ) linked_list.print_list() print('''\nReverse linked list''' ) linked_list.reverse() print('''\nPrint list:''' ) linked_list.print_list() print('''\nString representation of linked list:''' ) print(_lowerCAmelCase ) print('''\nReading/changing Node data using indexing:''' ) print(F"""Element at Position 1: {linked_list[1]}""" ) lowerCAmelCase = input('''Enter New Value: ''' ).strip() print('''New list:''' ) print(_lowerCAmelCase ) print(F"""length of linked_list is : {len(_lowerCAmelCase )}""" ) if __name__ == "__main__": main()
711
import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline UpperCAmelCase_ =datasets.utils.logging.get_logger(__name__) @dataclass class __UpperCamelCase ( datasets.BuilderConfig ): '''simple docstring''' __a : Optional[datasets.Features] =None __a : str ="utf-8" __a : Optional[str] =None __a : Optional[str] =None __a : bool =True # deprecated __a : Optional[int] =None # deprecated __a : int =1_0 << 2_0 # 10MB __a : Optional[bool] =None class __UpperCamelCase ( datasets.ArrowBasedBuilder ): '''simple docstring''' __a : str =JsonConfig def __snake_case ( self ): if self.config.block_size is not None: logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' ) lowerCAmelCase = self.config.block_size if self.config.use_threads is not True: logger.warning( '''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' ) if self.config.newlines_in_values is not None: raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' ) return datasets.DatasetInfo(features=self.config.features ) def __snake_case ( self , UpperCAmelCase_ ): if not self.config.data_files: raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) lowerCAmelCase = dl_manager.download_and_extract(self.config.data_files ) if isinstance(UpperCAmelCase_ , (str, list, tuple) ): lowerCAmelCase = data_files if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): lowerCAmelCase = [files] lowerCAmelCase = [dl_manager.iter_files(UpperCAmelCase_ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] lowerCAmelCase = [] for split_name, files in data_files.items(): if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): lowerCAmelCase = [files] lowerCAmelCase = [dl_manager.iter_files(UpperCAmelCase_ ) for file in files] splits.append(datasets.SplitGenerator(name=UpperCAmelCase_ , gen_kwargs={'''files''': files} ) ) return splits def __snake_case ( self , UpperCAmelCase_ ): if self.config.features is not None: # adding missing columns for column_name in set(self.config.features ) - set(pa_table.column_names ): lowerCAmelCase = self.config.features.arrow_schema.field(UpperCAmelCase_ ).type lowerCAmelCase = pa_table.append_column(UpperCAmelCase_ , pa.array([None] * len(UpperCAmelCase_ ) , type=UpperCAmelCase_ ) ) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example lowerCAmelCase = table_cast(UpperCAmelCase_ , self.config.features.arrow_schema ) return pa_table def __snake_case ( self , UpperCAmelCase_ ): for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase_ ) ): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(UpperCAmelCase_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: lowerCAmelCase = json.load(UpperCAmelCase_ ) # We keep only the field we are interested in lowerCAmelCase = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(UpperCAmelCase_ , (list, tuple) ): lowerCAmelCase = set().union(*[row.keys() for row in dataset] ) lowerCAmelCase = {col: [row.get(UpperCAmelCase_ ) for row in dataset] for col in keys} else: lowerCAmelCase = dataset lowerCAmelCase = pa.Table.from_pydict(UpperCAmelCase_ ) yield file_idx, self._cast_table(UpperCAmelCase_ ) # If the file has one json object per line else: with open(UpperCAmelCase_ , '''rb''' ) as f: lowerCAmelCase = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small lowerCAmelCase = max(self.config.chunksize // 32 , 16 << 10 ) lowerCAmelCase = ( self.config.encoding_errors if self.config.encoding_errors is not None else '''strict''' ) while True: lowerCAmelCase = f.read(self.config.chunksize ) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(UpperCAmelCase_ ) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": lowerCAmelCase = batch.decode(self.config.encoding , errors=UpperCAmelCase_ ).encode('''utf-8''' ) try: while True: try: lowerCAmelCase = paj.read_json( io.BytesIO(UpperCAmelCase_ ) , read_options=paj.ReadOptions(block_size=UpperCAmelCase_ ) ) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(UpperCAmelCase_ , pa.ArrowInvalid ) and "straddling" not in str(UpperCAmelCase_ ) or block_size > len(UpperCAmelCase_ ) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( F"""Batch of {len(UpperCAmelCase_ )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" ) block_size *= 2 except pa.ArrowInvalid as e: try: with open( UpperCAmelCase_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: lowerCAmelCase = json.load(UpperCAmelCase_ ) except json.JSONDecodeError: logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase_ )}: {e}""" ) raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): # list is the only sequence type supported in JSON try: lowerCAmelCase = set().union(*[row.keys() for row in dataset] ) lowerCAmelCase = {col: [row.get(UpperCAmelCase_ ) for row in dataset] for col in keys} lowerCAmelCase = pa.Table.from_pydict(UpperCAmelCase_ ) except (pa.ArrowInvalid, AttributeError) as e: logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase_ )}: {e}""" ) raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None yield file_idx, self._cast_table(UpperCAmelCase_ ) break else: logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase_ )}: {e}""" ) raise ValueError( F"""Not able to read records in the JSON file at {file}. """ F"""You should probably indicate the field of the JSON file containing your records. """ F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """ F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(UpperCAmelCase_ ) batch_idx += 1
33
0
from pathlib import Path import torch from ...utils import is_npu_available, is_xpu_available from .config_args import ClusterConfig, default_json_config_file from .config_utils import SubcommandHelpFormatter UpperCAmelCase_ ="Create a default config file for Accelerate with only a few flags set." def UpperCAmelCase ( _snake_case="no" , _snake_case = default_json_config_file , _snake_case = False ): lowerCAmelCase = Path(__lowerCAmelCase ) path.parent.mkdir(parents=__lowerCAmelCase , exist_ok=__lowerCAmelCase ) if path.exists(): print( F"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" ) return False lowerCAmelCase = mixed_precision.lower() if mixed_precision not in ["no", "fp16", "bf16", "fp8"]: raise ValueError( F"""`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}""" ) lowerCAmelCase = { """compute_environment""": """LOCAL_MACHINE""", """mixed_precision""": mixed_precision, } if torch.cuda.is_available(): lowerCAmelCase = torch.cuda.device_count() lowerCAmelCase = num_gpus lowerCAmelCase = False if num_gpus > 1: lowerCAmelCase = """MULTI_GPU""" else: lowerCAmelCase = """NO""" elif is_xpu_available() and use_xpu: lowerCAmelCase = torch.xpu.device_count() lowerCAmelCase = num_xpus lowerCAmelCase = False if num_xpus > 1: lowerCAmelCase = """MULTI_XPU""" else: lowerCAmelCase = """NO""" elif is_npu_available(): lowerCAmelCase = torch.npu.device_count() lowerCAmelCase = num_npus lowerCAmelCase = False if num_npus > 1: lowerCAmelCase = """MULTI_NPU""" else: lowerCAmelCase = """NO""" else: lowerCAmelCase = 0 lowerCAmelCase = True lowerCAmelCase = 1 lowerCAmelCase = """NO""" lowerCAmelCase = ClusterConfig(**__lowerCAmelCase ) config.to_json_file(__lowerCAmelCase ) return path def UpperCAmelCase ( _snake_case , _snake_case ): lowerCAmelCase = parser.add_parser('''default''' , parents=__lowerCAmelCase , help=__lowerCAmelCase , formatter_class=__lowerCAmelCase ) parser.add_argument( '''--config_file''' , default=__lowerCAmelCase , help=( '''The path to use to store the config file. Will default to a file named default_config.yaml in the cache ''' '''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ''' '''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ''' '''with \'huggingface\'.''' ) , dest='''save_location''' , ) parser.add_argument( '''--mixed_precision''' , choices=['''no''', '''fp16''', '''bf16'''] , type=__lowerCAmelCase , help='''Whether or not to use mixed precision training. ''' '''Choose between FP16 and BF16 (bfloat16) training. ''' '''BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.''' , default='''no''' , ) parser.set_defaults(func=__lowerCAmelCase ) return parser def UpperCAmelCase ( _snake_case ): lowerCAmelCase = write_basic_config(args.mixed_precision , args.save_location ) if config_file: print(F"""accelerate configuration saved at {config_file}""" )
712
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices UpperCAmelCase_ =logging.get_logger(__name__) class __UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __a : Optional[Any] ="""maskformer-swin""" __a : Optional[int] ={ """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self , UpperCAmelCase_=2_24 , UpperCAmelCase_=4 , UpperCAmelCase_=3 , UpperCAmelCase_=96 , UpperCAmelCase_=[2, 2, 6, 2] , UpperCAmelCase_=[3, 6, 12, 24] , UpperCAmelCase_=7 , UpperCAmelCase_=4.0 , UpperCAmelCase_=True , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.1 , UpperCAmelCase_="gelu" , UpperCAmelCase_=False , UpperCAmelCase_=0.02 , UpperCAmelCase_=1E-5 , UpperCAmelCase_=None , UpperCAmelCase_=None , **UpperCAmelCase_ , ): super().__init__(**UpperCAmelCase_ ) lowerCAmelCase = image_size lowerCAmelCase = patch_size lowerCAmelCase = num_channels lowerCAmelCase = embed_dim lowerCAmelCase = depths lowerCAmelCase = len(UpperCAmelCase_ ) lowerCAmelCase = num_heads lowerCAmelCase = window_size lowerCAmelCase = mlp_ratio lowerCAmelCase = qkv_bias lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = drop_path_rate lowerCAmelCase = hidden_act lowerCAmelCase = use_absolute_embeddings lowerCAmelCase = layer_norm_eps lowerCAmelCase = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model lowerCAmelCase = int(embed_dim * 2 ** (len(UpperCAmelCase_ ) - 1) ) lowerCAmelCase = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(UpperCAmelCase_ ) + 1 )] lowerCAmelCase , lowerCAmelCase = get_aligned_output_features_output_indices( out_features=UpperCAmelCase_ , out_indices=UpperCAmelCase_ , stage_names=self.stage_names )
33
0
import argparse import torch from torch import nn from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration def UpperCAmelCase ( _snake_case ): lowerCAmelCase = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''encoder.embed_positions._float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(__UpperCAmelCase , __UpperCAmelCase ) def UpperCAmelCase ( _snake_case ): lowerCAmelCase = list(s_dict.keys() ) for key in keys: if "transformer_layers" in key: lowerCAmelCase = s_dict.pop(__UpperCAmelCase ) elif "subsample" in key: lowerCAmelCase = s_dict.pop(__UpperCAmelCase ) def UpperCAmelCase ( _snake_case ): lowerCAmelCase , lowerCAmelCase = emb.weight.shape lowerCAmelCase = nn.Linear(__UpperCAmelCase , __UpperCAmelCase , bias=__UpperCAmelCase ) lowerCAmelCase = emb.weight.data return lin_layer def UpperCAmelCase ( _snake_case , _snake_case ): lowerCAmelCase = torch.load(__UpperCAmelCase , map_location='''cpu''' ) lowerCAmelCase = mam_aaa['''args'''] lowerCAmelCase = mam_aaa['''model'''] lowerCAmelCase = state_dict['''decoder.output_projection.weight'''] remove_ignore_keys_(__UpperCAmelCase ) rename_keys(__UpperCAmelCase ) lowerCAmelCase = state_dict['''decoder.embed_tokens.weight'''].shape[0] lowerCAmelCase = args.share_decoder_input_output_embed lowerCAmelCase = [int(__UpperCAmelCase ) for i in args.conv_kernel_sizes.split(''',''' )] lowerCAmelCase = SpeechaTextConfig( vocab_size=__UpperCAmelCase , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , num_conv_layers=len(__UpperCAmelCase ) , conv_channels=args.conv_channels , conv_kernel_sizes=__UpperCAmelCase , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=__UpperCAmelCase , num_beams=5 , max_length=200 , use_cache=__UpperCAmelCase , decoder_start_token_id=2 , early_stopping=__UpperCAmelCase , ) lowerCAmelCase = SpeechaTextForConditionalGeneration(__UpperCAmelCase ) lowerCAmelCase , lowerCAmelCase = model.model.load_state_dict(__UpperCAmelCase , strict=__UpperCAmelCase ) if len(__UpperCAmelCase ) > 0 and not set(__UpperCAmelCase ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( '''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,''' F""" but all the following weights are missing {missing}""" ) if tie_embeds: lowerCAmelCase = make_linear_from_emb(model.model.decoder.embed_tokens ) else: lowerCAmelCase = lm_head_weights model.save_pretrained(__UpperCAmelCase ) if __name__ == "__main__": UpperCAmelCase_ =argparse.ArgumentParser() # Required parameters parser.add_argument("""--fairseq_path""", type=str, help="""Path to the fairseq model (.pt) file.""") parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") UpperCAmelCase_ =parser.parse_args() convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
713
from collections.abc import Sequence def UpperCAmelCase ( _snake_case , _snake_case = False ): if not arr: return 0 lowerCAmelCase = 0 if allow_empty_subarrays else float('''-inf''' ) lowerCAmelCase = 0.0 for num in arr: lowerCAmelCase = max(0 if allow_empty_subarrays else num , curr_sum + num ) lowerCAmelCase = max(_snake_case , _snake_case ) return max_sum if __name__ == "__main__": from doctest import testmod testmod() UpperCAmelCase_ =[-2, 1, -3, 4, -1, 2, 1, -5, 4] print(F'''{max_subarray_sum(nums) = }''')
33
0
from __future__ import annotations def UpperCAmelCase ( _snake_case , _snake_case ): lowerCAmelCase = [] lowerCAmelCase = [] lowerCAmelCase = 0 lowerCAmelCase = sum(lowerCAmelCase__ ) create_state_space_tree(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) return result def UpperCAmelCase ( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ): if sum(lowerCAmelCase__ ) > max_sum or (remaining_nums_sum + sum(lowerCAmelCase__ )) < max_sum: return if sum(lowerCAmelCase__ ) == max_sum: result.append(lowerCAmelCase__ ) return for index in range(lowerCAmelCase__ , len(lowerCAmelCase__ ) ): create_state_space_tree( lowerCAmelCase__ , lowerCAmelCase__ , index + 1 , [*path, nums[index]] , lowerCAmelCase__ , remaining_nums_sum - nums[index] , ) UpperCAmelCase_ =[3, 34, 4, 12, 5, 2] UpperCAmelCase_ =9 UpperCAmelCase_ =generate_sum_of_subsets_soln(nums, max_sum) print(*result)
714
import os import pickle import unittest from transformers import AutoTokenizer from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.models.bert_japanese.tokenization_bert_japanese import ( VOCAB_FILES_NAMES, BertJapaneseTokenizer, CharacterTokenizer, JumanppTokenizer, MecabTokenizer, SudachiTokenizer, WordpieceTokenizer, ) from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi from ...test_tokenization_common import TokenizerTesterMixin @custom_tokenizers class __UpperCamelCase ( __UpperCAmelCase , unittest.TestCase ): '''simple docstring''' __a : Any =BertJapaneseTokenizer __a : Optional[int] =False __a : int =True def __snake_case ( self ): super().setUp() lowerCAmelCase = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは''', '''世界''', '''##世界''', '''、''', '''##、''', '''。''', '''##。''', ] lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = '''こんにちは、世界。 \nこんばんは、世界。''' lowerCAmelCase = '''こんにちは 、 世界 。 こんばんは 、 世界 。''' return input_text, output_text def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase , lowerCAmelCase = self.get_input_output_texts(UpperCAmelCase_ ) lowerCAmelCase = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) lowerCAmelCase = tokenizer.decode(UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ ) return text, ids def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class(self.vocab_file ) lowerCAmelCase = tokenizer.tokenize('''こんにちは、世界。\nこんばんは、世界。''' ) self.assertListEqual(UpperCAmelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''mecab''' ) self.assertIsNotNone(UpperCAmelCase_ ) lowerCAmelCase = '''こんにちは、世界。\nこんばんは、世界。''' lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) lowerCAmelCase = os.path.join(self.tmpdirname , '''tokenizer.bin''' ) with open(UpperCAmelCase_ , '''wb''' ) as handle: pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ ) with open(UpperCAmelCase_ , '''rb''' ) as handle: lowerCAmelCase = pickle.load(UpperCAmelCase_ ) lowerCAmelCase = tokenizer_new.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def __snake_case ( self ): lowerCAmelCase = MecabTokenizer(mecab_dic='''ipadic''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) def __snake_case ( self ): try: lowerCAmelCase = MecabTokenizer(mecab_dic='''unidic_lite''' ) except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) def __snake_case ( self ): try: lowerCAmelCase = MecabTokenizer(mecab_dic='''unidic''' ) except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) def __snake_case ( self ): lowerCAmelCase = MecabTokenizer(do_lower_case=UpperCAmelCase_ , mecab_dic='''ipadic''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iphone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) def __snake_case ( self ): try: lowerCAmelCase = MecabTokenizer( do_lower_case=UpperCAmelCase_ , normalize_text=UpperCAmelCase_ , mecab_option='''-d /usr/local/lib/mecab/dic/jumandic''' ) except RuntimeError: # if dict doesn't exist in the system, previous code raises this error. return self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , ) def __snake_case ( self ): lowerCAmelCase = MecabTokenizer(normalize_text=UpperCAmelCase_ , mecab_dic='''ipadic''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。'''] , ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''sudachi''' ) self.assertIsNotNone(UpperCAmelCase_ ) lowerCAmelCase = '''こんにちは、世界。\nこんばんは、世界。''' lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) lowerCAmelCase = os.path.join(self.tmpdirname , '''tokenizer.bin''' ) with open(UpperCAmelCase_ , '''wb''' ) as handle: pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ ) with open(UpperCAmelCase_ , '''rb''' ) as handle: lowerCAmelCase = pickle.load(UpperCAmelCase_ ) lowerCAmelCase = tokenizer_new.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='''core''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''A''' ) self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国''', '''人''', '''参政''', '''権'''] ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''B''' ) self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人''', '''参政権'''] ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''C''' ) self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人参政権'''] ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(do_lower_case=UpperCAmelCase_ , sudachi_dict_type='''core''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(normalize_text=UpperCAmelCase_ , sudachi_dict_type='''core''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', '''\u3000''', '''。''', ''' ''', ''' '''] , ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(trim_whitespace=UpperCAmelCase_ , sudachi_dict_type='''core''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''jumanpp''' ) self.assertIsNotNone(UpperCAmelCase_ ) lowerCAmelCase = '''こんにちは、世界。\nこんばんは、世界。''' lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) lowerCAmelCase = os.path.join(self.tmpdirname , '''tokenizer.bin''' ) with open(UpperCAmelCase_ , '''wb''' ) as handle: pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ ) with open(UpperCAmelCase_ , '''rb''' ) as handle: lowerCAmelCase = pickle.load(UpperCAmelCase_ ) lowerCAmelCase = tokenizer_new.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = JumanppTokenizer(do_lower_case=UpperCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = JumanppTokenizer(normalize_text=UpperCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''ア''', '''ッ''', '''フ''', '''゚''', '''ル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = JumanppTokenizer(trim_whitespace=UpperCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''。'''] , ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize('''ありがとうございますm(_ _)m見つけるのが大変です。''' ) , ['''ありがとう''', '''ございます''', '''m(_ _)m''', '''見つける''', '''の''', '''が''', '''大変です''', '''。'''] , ) def __snake_case ( self ): lowerCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは'''] lowerCAmelCase = {} for i, token in enumerate(UpperCAmelCase_ ): lowerCAmelCase = i lowerCAmelCase = WordpieceTokenizer(vocab=UpperCAmelCase_ , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こんにちは'''] ) self.assertListEqual(tokenizer.tokenize('''こんばんは''' ) , ['''こん''', '''##ばんは'''] ) self.assertListEqual(tokenizer.tokenize('''こんばんは こんばんにちは こんにちは''' ) , ['''こん''', '''##ばんは''', '''[UNK]''', '''こんにちは'''] ) def __snake_case ( self ): lowerCAmelCase = BertJapaneseTokenizer.from_pretrained('''nlp-waseda/roberta-base-japanese-with-auto-jumanpp''' ) lowerCAmelCase = tokenizer.subword_tokenizer lowerCAmelCase = subword_tokenizer.tokenize('''国境 の 長い トンネル を 抜ける と 雪国 であった 。''' ) self.assertListEqual(UpperCAmelCase_ , ['''▁国境''', '''▁の''', '''▁長い''', '''▁トンネル''', '''▁を''', '''▁抜ける''', '''▁と''', '''▁雪''', '''国''', '''▁であった''', '''▁。'''] ) lowerCAmelCase = subword_tokenizer.tokenize('''こんばんは こんばん にち は こんにちは''' ) self.assertListEqual(UpperCAmelCase_ , ['''▁こん''', '''ばん''', '''は''', '''▁こん''', '''ばん''', '''▁に''', '''ち''', '''▁は''', '''▁こんにちは'''] ) def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese''' ) lowerCAmelCase = tokenizer.encode('''ありがとう。''' , add_special_tokens=UpperCAmelCase_ ) lowerCAmelCase = tokenizer.encode('''どういたしまして。''' , add_special_tokens=UpperCAmelCase_ ) lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ ) lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_ ) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_a + [3] @custom_tokenizers class __UpperCamelCase ( __UpperCAmelCase , unittest.TestCase ): '''simple docstring''' __a : Union[str, Any] =BertJapaneseTokenizer __a : Optional[int] =False def __snake_case ( self ): super().setUp() lowerCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。'''] lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def __snake_case ( self , **UpperCAmelCase_ ): return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='''character''' , **UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = '''こんにちは、世界。 \nこんばんは、世界。''' lowerCAmelCase = '''こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。''' return input_text, output_text def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='''character''' ) lowerCAmelCase = tokenizer.tokenize('''こんにちは、世界。 \nこんばんは、世界。''' ) self.assertListEqual( UpperCAmelCase_ , ['''こ''', '''ん''', '''に''', '''ち''', '''は''', '''、''', '''世''', '''界''', '''。''', '''こ''', '''ん''', '''ば''', '''ん''', '''は''', '''、''', '''世''', '''界''', '''。'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] ) def __snake_case ( self ): lowerCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。'''] lowerCAmelCase = {} for i, token in enumerate(UpperCAmelCase_ ): lowerCAmelCase = i lowerCAmelCase = CharacterTokenizer(vocab=UpperCAmelCase_ , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''は'''] ) self.assertListEqual(tokenizer.tokenize('''こんにちほ''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''[UNK]'''] ) def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese-char''' ) lowerCAmelCase = tokenizer.encode('''ありがとう。''' , add_special_tokens=UpperCAmelCase_ ) lowerCAmelCase = tokenizer.encode('''どういたしまして。''' , add_special_tokens=UpperCAmelCase_ ) lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ ) lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_ ) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_a + [3] @custom_tokenizers class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __snake_case ( self ): lowerCAmelCase = '''cl-tohoku/bert-base-japanese''' lowerCAmelCase = AutoTokenizer.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __snake_case ( self ): lowerCAmelCase = '''cl-tohoku/bert-base-japanese''' with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm: BertTokenizer.from_pretrained(UpperCAmelCase_ ) self.assertTrue( cm.records[0].message.startswith( '''The tokenizer class you load from this checkpoint is not the same type as the class this function''' ''' is called from.''' ) ) lowerCAmelCase = '''bert-base-cased''' with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm: BertJapaneseTokenizer.from_pretrained(UpperCAmelCase_ ) self.assertTrue( cm.records[0].message.startswith( '''The tokenizer class you load from this checkpoint is not the same type as the class this function''' ''' is called from.''' ) )
33
0
def UpperCAmelCase ( _snake_case ): '''simple docstring''' lowerCAmelCase = 1 for i in range(1 , num + 1 ): fact *= i return fact def UpperCAmelCase ( _snake_case ): '''simple docstring''' lowerCAmelCase = 0 while number > 0: lowerCAmelCase = number % 10 sum_of_digits += last_digit lowerCAmelCase = number // 10 # Removing the last_digit from the given number return sum_of_digits def UpperCAmelCase ( _snake_case = 100 ): '''simple docstring''' lowerCAmelCase = factorial(__A ) lowerCAmelCase = split_and_add(__A ) return result if __name__ == "__main__": print(solution(int(input("""Enter the Number: """).strip())))
715
import json import os import tempfile import unittest import unittest.mock as mock from pathlib import Path from requests.exceptions import HTTPError from transformers.utils import ( CONFIG_NAME, FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TRANSFORMERS_CACHE, WEIGHTS_NAME, cached_file, get_file_from_repo, has_file, ) UpperCAmelCase_ ="""hf-internal-testing/tiny-random-bert""" UpperCAmelCase_ =os.path.join(TRANSFORMERS_CACHE, """models--hf-internal-testing--tiny-random-bert""") UpperCAmelCase_ ="""9b8c223d42b2188cb49d29af482996f9d0f3e5a6""" class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __snake_case ( self ): lowerCAmelCase = cached_file(UpperCAmelCase_ , UpperCAmelCase_ ) # Should have downloaded the file in here self.assertTrue(os.path.isdir(UpperCAmelCase_ ) ) # Cache should contain at least those three subfolders: for subfolder in ["blobs", "refs", "snapshots"]: self.assertTrue(os.path.isdir(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) ) ) with open(os.path.join(UpperCAmelCase_ , '''refs''' , '''main''' ) ) as f: lowerCAmelCase = f.read() self.assertEqual(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , '''snapshots''' , UpperCAmelCase_ , UpperCAmelCase_ ) ) self.assertTrue(os.path.isfile(UpperCAmelCase_ ) ) # File is cached at the same place the second time. lowerCAmelCase = cached_file(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) # Using a specific revision to test the full commit hash. lowerCAmelCase = cached_file(UpperCAmelCase_ , UpperCAmelCase_ , revision='''9b8c223''' ) self.assertEqual(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , '''snapshots''' , UpperCAmelCase_ , UpperCAmelCase_ ) ) def __snake_case ( self ): with self.assertRaisesRegex(UpperCAmelCase_ , '''is not a valid model identifier''' ): lowerCAmelCase = cached_file('''tiny-random-bert''' , UpperCAmelCase_ ) with self.assertRaisesRegex(UpperCAmelCase_ , '''is not a valid git identifier''' ): lowerCAmelCase = cached_file(UpperCAmelCase_ , UpperCAmelCase_ , revision='''aaaa''' ) with self.assertRaisesRegex(UpperCAmelCase_ , '''does not appear to have a file named''' ): lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' ) def __snake_case ( self ): with self.assertRaisesRegex(UpperCAmelCase_ , '''does not appear to have a file named''' ): lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' ) with open(os.path.join(UpperCAmelCase_ , '''refs''' , '''main''' ) ) as f: lowerCAmelCase = f.read() self.assertTrue(os.path.isfile(os.path.join(UpperCAmelCase_ , '''.no_exist''' , UpperCAmelCase_ , '''conf''' ) ) ) lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' , _raise_exceptions_for_missing_entries=UpperCAmelCase_ ) self.assertIsNone(UpperCAmelCase_ ) lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' , local_files_only=UpperCAmelCase_ , _raise_exceptions_for_missing_entries=UpperCAmelCase_ ) self.assertIsNone(UpperCAmelCase_ ) lowerCAmelCase = mock.Mock() lowerCAmelCase = 5_00 lowerCAmelCase = {} lowerCAmelCase = HTTPError lowerCAmelCase = {} # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('''requests.Session.request''' , return_value=UpperCAmelCase_ ) as mock_head: lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' , _raise_exceptions_for_connection_errors=UpperCAmelCase_ ) self.assertIsNone(UpperCAmelCase_ ) # This check we did call the fake head request mock_head.assert_called() def __snake_case ( self ): self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , UpperCAmelCase_ ) ) self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , UpperCAmelCase_ ) ) self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , UpperCAmelCase_ ) ) def __snake_case ( self ): # `get_file_from_repo` returns None if the file does not exist self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) ) # The function raises if the repository does not exist. with self.assertRaisesRegex(UpperCAmelCase_ , '''is not a valid model identifier''' ): get_file_from_repo('''bert-base-case''' , UpperCAmelCase_ ) # The function raises if the revision does not exist. with self.assertRaisesRegex(UpperCAmelCase_ , '''is not a valid git identifier''' ): get_file_from_repo('''bert-base-cased''' , UpperCAmelCase_ , revision='''ahaha''' ) lowerCAmelCase = get_file_from_repo('''bert-base-cased''' , UpperCAmelCase_ ) # The name is the cached name which is not very easy to test, so instead we load the content. lowerCAmelCase = json.loads(open(UpperCAmelCase_ , '''r''' ).read() ) self.assertEqual(config['''hidden_size'''] , 7_68 ) def __snake_case ( self ): with tempfile.TemporaryDirectory() as tmp_dir: lowerCAmelCase = Path(UpperCAmelCase_ ) / '''a.txt''' filename.touch() self.assertEqual(get_file_from_repo(UpperCAmelCase_ , '''a.txt''' ) , str(UpperCAmelCase_ ) ) self.assertIsNone(get_file_from_repo(UpperCAmelCase_ , '''b.txt''' ) )
33
0
def UpperCAmelCase ( _snake_case , _snake_case , _snake_case ): def update_area_of_max_square(_snake_case , _snake_case ) -> int: # BASE CASE if row >= rows or col >= cols: return 0 lowerCAmelCase = update_area_of_max_square(_lowerCamelCase , col + 1 ) lowerCAmelCase = update_area_of_max_square(row + 1 , col + 1 ) lowerCAmelCase = update_area_of_max_square(row + 1 , _lowerCamelCase ) if mat[row][col]: lowerCAmelCase = 1 + min([right, diagonal, down] ) lowerCAmelCase = max(largest_square_area[0] , _lowerCamelCase ) return sub_problem_sol else: return 0 lowerCAmelCase = [0] update_area_of_max_square(0 , 0 ) return largest_square_area[0] def UpperCAmelCase ( _snake_case , _snake_case , _snake_case ): def update_area_of_max_square_using_dp_array( _snake_case , _snake_case , _snake_case ) -> int: if row >= rows or col >= cols: return 0 if dp_array[row][col] != -1: return dp_array[row][col] lowerCAmelCase = update_area_of_max_square_using_dp_array(_lowerCamelCase , col + 1 , _lowerCamelCase ) lowerCAmelCase = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , _lowerCamelCase ) lowerCAmelCase = update_area_of_max_square_using_dp_array(row + 1 , _lowerCamelCase , _lowerCamelCase ) if mat[row][col]: lowerCAmelCase = 1 + min([right, diagonal, down] ) lowerCAmelCase = max(largest_square_area[0] , _lowerCamelCase ) lowerCAmelCase = sub_problem_sol return sub_problem_sol else: return 0 lowerCAmelCase = [0] lowerCAmelCase = [[-1] * cols for _ in range(_lowerCamelCase )] update_area_of_max_square_using_dp_array(0 , 0 , _lowerCamelCase ) return largest_square_area[0] def UpperCAmelCase ( _snake_case , _snake_case , _snake_case ): lowerCAmelCase = [[0] * (cols + 1) for _ in range(rows + 1 )] lowerCAmelCase = 0 for row in range(rows - 1 , -1 , -1 ): for col in range(cols - 1 , -1 , -1 ): lowerCAmelCase = dp_array[row][col + 1] lowerCAmelCase = dp_array[row + 1][col + 1] lowerCAmelCase = dp_array[row + 1][col] if mat[row][col] == 1: lowerCAmelCase = 1 + min(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) lowerCAmelCase = max(dp_array[row][col] , _lowerCamelCase ) else: lowerCAmelCase = 0 return largest_square_area def UpperCAmelCase ( _snake_case , _snake_case , _snake_case ): lowerCAmelCase = [0] * (cols + 1) lowerCAmelCase = [0] * (cols + 1) lowerCAmelCase = 0 for row in range(rows - 1 , -1 , -1 ): for col in range(cols - 1 , -1 , -1 ): lowerCAmelCase = current_row[col + 1] lowerCAmelCase = next_row[col + 1] lowerCAmelCase = next_row[col] if mat[row][col] == 1: lowerCAmelCase = 1 + min(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) lowerCAmelCase = max(current_row[col] , _lowerCamelCase ) else: lowerCAmelCase = 0 lowerCAmelCase = current_row return largest_square_area if __name__ == "__main__": import doctest doctest.testmod() print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
716
from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = True , UpperCAmelCase_ = None , UpperCAmelCase_ = False , UpperCAmelCase_ = None , UpperCAmelCase_ = True , UpperCAmelCase_ = "arrow" , **UpperCAmelCase_ , ): super().__init__( split=UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , keep_in_memory=UpperCAmelCase_ , streaming=UpperCAmelCase_ , **UpperCAmelCase_ , ) lowerCAmelCase = load_from_cache_file lowerCAmelCase = file_format lowerCAmelCase = Spark( df=UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , working_dir=UpperCAmelCase_ , **UpperCAmelCase_ , ) def __snake_case ( self ): if self.streaming: return self.builder.as_streaming_dataset(split=self.split ) lowerCAmelCase = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=UpperCAmelCase_ , file_format=self._file_format , ) return self.builder.as_dataset(split=self.split )
33
0
from dataclasses import asdict, dataclass from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ =logging.get_logger(__name__) # TODO Update this UpperCAmelCase_ ={ """facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""", # See all ESM models at https://huggingface.co/models?filter=esm } class __UpperCamelCase ( _A ): '''simple docstring''' __a : int ="""esm""" def __init__( self , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=7_68 , UpperCAmelCase_=12 , UpperCAmelCase_=12 , UpperCAmelCase_=30_72 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=10_26 , UpperCAmelCase_=0.02 , UpperCAmelCase_=1E-1_2 , UpperCAmelCase_="absolute" , UpperCAmelCase_=True , UpperCAmelCase_=None , UpperCAmelCase_=False , UpperCAmelCase_=False , UpperCAmelCase_=None , UpperCAmelCase_=None , **UpperCAmelCase_ , ): super().__init__(pad_token_id=__lowerCamelCase , mask_token_id=__lowerCamelCase , **__lowerCamelCase ) lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = max_position_embeddings lowerCAmelCase = initializer_range lowerCAmelCase = layer_norm_eps lowerCAmelCase = position_embedding_type lowerCAmelCase = use_cache lowerCAmelCase = emb_layer_norm_before lowerCAmelCase = token_dropout lowerCAmelCase = is_folding_model if is_folding_model: if esmfold_config is None: logger.info('''No esmfold_config supplied for folding model, using default values.''' ) lowerCAmelCase = EsmFoldConfig() elif isinstance(__lowerCamelCase , __lowerCamelCase ): lowerCAmelCase = EsmFoldConfig(**__lowerCamelCase ) lowerCAmelCase = esmfold_config if vocab_list is None: logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' ) lowerCAmelCase = get_default_vocab_list() else: lowerCAmelCase = vocab_list else: lowerCAmelCase = None lowerCAmelCase = None if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , __lowerCamelCase ): raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' ) def __snake_case ( self ): lowerCAmelCase = super().to_dict() if isinstance(self.esmfold_config , __lowerCamelCase ): lowerCAmelCase = self.esmfold_config.to_dict() return output @dataclass class __UpperCamelCase : '''simple docstring''' __a : Any =None __a : int =True __a : Any =False __a : Optional[int] =False __a : int =False __a : int =0 __a : Optional[Any] =True __a : Union[str, Any] =False __a : Optional[Any] =1_2_8 __a : int =None def __snake_case ( self ): if self.trunk is None: lowerCAmelCase = TrunkConfig() elif isinstance(self.trunk , __lowerCamelCase ): lowerCAmelCase = TrunkConfig(**self.trunk ) def __snake_case ( self ): lowerCAmelCase = asdict(self ) lowerCAmelCase = self.trunk.to_dict() return output @dataclass class __UpperCamelCase : '''simple docstring''' __a : Union[str, Any] =4_8 __a : List[Any] =1_0_2_4 __a : List[Any] =1_2_8 __a : List[Any] =3_2 __a : int =3_2 __a : Optional[int] =3_2 __a : str =0 __a : Any =0 __a : Optional[Any] =False __a : Union[str, Any] =4 __a : Dict =1_2_8 __a : Optional[int] =None def __snake_case ( self ): if self.structure_module is None: lowerCAmelCase = StructureModuleConfig() elif isinstance(self.structure_module , __lowerCamelCase ): lowerCAmelCase = StructureModuleConfig(**self.structure_module ) if self.max_recycles <= 0: raise ValueError(F"""`max_recycles` should be positive, got {self.max_recycles}.""" ) if self.sequence_state_dim % self.sequence_state_dim != 0: raise ValueError( '''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got''' F""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" ) if self.pairwise_state_dim % self.pairwise_state_dim != 0: raise ValueError( '''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got''' F""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" ) lowerCAmelCase = self.sequence_state_dim // self.sequence_head_width lowerCAmelCase = self.pairwise_state_dim // self.pairwise_head_width if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width: raise ValueError( '''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got''' F""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" ) if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width: raise ValueError( '''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got''' F""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" ) if self.pairwise_state_dim % 2 != 0: raise ValueError(F"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" ) if self.dropout >= 0.4: raise ValueError(F"""`dropout` should not be greater than 0.4, got {self.dropout}.""" ) def __snake_case ( self ): lowerCAmelCase = asdict(self ) lowerCAmelCase = self.structure_module.to_dict() return output @dataclass class __UpperCamelCase : '''simple docstring''' __a : Optional[int] =3_8_4 __a : Optional[int] =1_2_8 __a : List[Any] =1_6 __a : str =1_2_8 __a : Optional[int] =1_2 __a : Union[str, Any] =4 __a : List[str] =8 __a : int =0.1 __a : int =8 __a : Optional[int] =1 __a : Optional[int] =2 __a : Optional[Any] =7 __a : List[str] =1_0 __a : List[str] =1e-8 __a : Tuple =1e5 def __snake_case ( self ): return asdict(self ) def UpperCAmelCase ( ): return ( "<cls>", "<pad>", "<eos>", "<unk>", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "<null_1>", "<mask>", )
717
import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def UpperCAmelCase ( _snake_case = 3 ): if isinstance(_snake_case , _snake_case ): raise TypeError('''number of qubits must be a integer.''' ) if number_of_qubits <= 0: raise ValueError('''number of qubits must be > 0.''' ) if math.floor(_snake_case ) != number_of_qubits: raise ValueError('''number of qubits must be exact integer.''' ) if number_of_qubits > 10: raise ValueError('''number of qubits too large to simulate(>10).''' ) lowerCAmelCase = QuantumRegister(_snake_case , '''qr''' ) lowerCAmelCase = ClassicalRegister(_snake_case , '''cr''' ) lowerCAmelCase = QuantumCircuit(_snake_case , _snake_case ) lowerCAmelCase = number_of_qubits for i in range(_snake_case ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(_snake_case ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , _snake_case , _snake_case ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(_snake_case , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(_snake_case , _snake_case ) # simulate with 10000 shots lowerCAmelCase = Aer.get_backend('''qasm_simulator''' ) lowerCAmelCase = execute(_snake_case , _snake_case , shots=10000 ) return job.result().get_counts(_snake_case ) if __name__ == "__main__": print( F'''Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}''' )
33
0
import argparse import torch from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel from transformers.utils import logging logging.set_verbosity_info() def UpperCAmelCase ( _snake_case , _snake_case , _snake_case , _snake_case ): lowerCAmelCase = FunnelConfig.from_json_file(a_ ) print(F"""Building PyTorch model from configuration: {config}""" ) lowerCAmelCase = FunnelBaseModel(a_ ) if base_model else FunnelModel(a_ ) # Load weights from tf checkpoint load_tf_weights_in_funnel(a_ , a_ , a_ ) # Save pytorch-model print(F"""Save PyTorch model to {pytorch_dump_path}""" ) torch.save(model.state_dict() , a_ ) if __name__ == "__main__": UpperCAmelCase_ =argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help="""The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.""", ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--base_model""", action="""store_true""", help="""Whether you want just the base model (no decoder) or not.""" ) UpperCAmelCase_ =parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model )
718
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from typing import Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import randn_tensor from .scheduling_utils import SchedulerMixin class __UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __a : Any =1 @register_to_config def __init__( self , UpperCAmelCase_=20_00 , UpperCAmelCase_=0.1 , UpperCAmelCase_=20 , UpperCAmelCase_=1E-3 ): lowerCAmelCase = None lowerCAmelCase = None lowerCAmelCase = None def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ): lowerCAmelCase = torch.linspace(1 , self.config.sampling_eps , UpperCAmelCase_ , device=UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None ): if self.timesteps is None: raise ValueError( '''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' ) # TODO(Patrick) better comments + non-PyTorch # postprocess model score lowerCAmelCase = ( -0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min ) lowerCAmelCase = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) ) lowerCAmelCase = std.flatten() while len(std.shape ) < len(score.shape ): lowerCAmelCase = std.unsqueeze(-1 ) lowerCAmelCase = -score / std # compute lowerCAmelCase = -1.0 / len(self.timesteps ) lowerCAmelCase = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min) lowerCAmelCase = beta_t.flatten() while len(beta_t.shape ) < len(x.shape ): lowerCAmelCase = beta_t.unsqueeze(-1 ) lowerCAmelCase = -0.5 * beta_t * x lowerCAmelCase = torch.sqrt(UpperCAmelCase_ ) lowerCAmelCase = drift - diffusion**2 * score lowerCAmelCase = x + drift * dt # add noise lowerCAmelCase = randn_tensor(x.shape , layout=x.layout , generator=UpperCAmelCase_ , device=x.device , dtype=x.dtype ) lowerCAmelCase = x_mean + diffusion * math.sqrt(-dt ) * noise return x, x_mean def __len__( self ): return self.config.num_train_timesteps
33
0
def UpperCAmelCase ( _snake_case , _snake_case ): lowerCAmelCase = '''''' for i in table: res += inp[i - 1] return res def UpperCAmelCase ( _snake_case ): return data[1:] + data[0] def UpperCAmelCase ( _snake_case , _snake_case ): lowerCAmelCase = '''''' for i in range(len(_A ) ): if a[i] == b[i]: res += "0" else: res += "1" return res def UpperCAmelCase ( _snake_case , _snake_case ): lowerCAmelCase = int('''0b''' + data[0] + data[-1] , 2 ) lowerCAmelCase = int('''0b''' + data[1:3] , 2 ) return bin(s[row][col] )[2:] def UpperCAmelCase ( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): lowerCAmelCase = message[:4] lowerCAmelCase = message[4:] lowerCAmelCase = apply_table(_A , _A ) lowerCAmelCase = xor(_A , _A ) lowerCAmelCase = apply_sbox(_A , temp[:4] ) # noqa: E741 lowerCAmelCase = apply_sbox(_A , temp[4:] ) lowerCAmelCase = '''0''' * (2 - len(_A )) + l # noqa: E741 lowerCAmelCase = '''0''' * (2 - len(_A )) + r lowerCAmelCase = apply_table(l + r , _A ) lowerCAmelCase = xor(_A , _A ) return temp + right if __name__ == "__main__": UpperCAmelCase_ =input("""Enter 10 bit key: """) UpperCAmelCase_ =input("""Enter 8 bit message: """) UpperCAmelCase_ =[6, 3, 7, 4, 8, 5, 10, 9] UpperCAmelCase_ =[3, 5, 2, 7, 4, 10, 1, 9, 8, 6] UpperCAmelCase_ =[2, 4, 3, 1] UpperCAmelCase_ =[2, 6, 3, 1, 4, 8, 5, 7] UpperCAmelCase_ =[4, 1, 3, 5, 7, 2, 8, 6] UpperCAmelCase_ =[4, 1, 2, 3, 2, 3, 4, 1] UpperCAmelCase_ =[[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]] UpperCAmelCase_ =[[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]] # key generation UpperCAmelCase_ =apply_table(key, paa_table) UpperCAmelCase_ =temp[:5] UpperCAmelCase_ =temp[5:] UpperCAmelCase_ =left_shift(left) UpperCAmelCase_ =left_shift(right) UpperCAmelCase_ =apply_table(left + right, pa_table) UpperCAmelCase_ =left_shift(left) UpperCAmelCase_ =left_shift(right) UpperCAmelCase_ =left_shift(left) UpperCAmelCase_ =left_shift(right) UpperCAmelCase_ =apply_table(left + right, pa_table) # encryption UpperCAmelCase_ =apply_table(message, IP) UpperCAmelCase_ =function(expansion, sa, sa, keya, temp) UpperCAmelCase_ =temp[4:] + temp[:4] UpperCAmelCase_ =function(expansion, sa, sa, keya, temp) UpperCAmelCase_ =apply_table(temp, IP_inv) print("""Cipher text is:""", CT) # decryption UpperCAmelCase_ =apply_table(CT, IP) UpperCAmelCase_ =function(expansion, sa, sa, keya, temp) UpperCAmelCase_ =temp[4:] + temp[:4] UpperCAmelCase_ =function(expansion, sa, sa, keya, temp) UpperCAmelCase_ =apply_table(temp, IP_inv) print("""Plain text after decypting is:""", PT)
719
from collections import Counter from pathlib import Path from typing import Optional, Tuple import yaml class __UpperCamelCase ( yaml.SafeLoader ): '''simple docstring''' def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = [self.constructed_objects[key_node] for key_node, _ in node.value] lowerCAmelCase = [tuple(UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else key for key in keys] lowerCAmelCase = Counter(UpperCAmelCase_ ) lowerCAmelCase = [key for key in counter if counter[key] > 1] if duplicate_keys: raise TypeError(F"""Got duplicate yaml keys: {duplicate_keys}""" ) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_=False ): lowerCAmelCase = super().construct_mapping(UpperCAmelCase_ , deep=UpperCAmelCase_ ) self._check_no_duplicates_on_constructed_node(UpperCAmelCase_ ) return mapping def UpperCAmelCase ( _snake_case ): lowerCAmelCase = list(readme_content.splitlines() ) if full_content and full_content[0] == "---" and "---" in full_content[1:]: lowerCAmelCase = full_content[1:].index('''---''' ) + 1 lowerCAmelCase = '''\n'''.join(full_content[1:sep_idx] ) return yamlblock, "\n".join(full_content[sep_idx + 1 :] ) return None, "\n".join(_snake_case ) class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : Any ={"""train_eval_index"""} # train-eval-index in the YAML metadata @classmethod def __snake_case ( cls , UpperCAmelCase_ ): with open(UpperCAmelCase_ , encoding='''utf-8''' ) as readme_file: lowerCAmelCase , lowerCAmelCase = _split_yaml_from_readme(readme_file.read() ) if yaml_string is not None: return cls.from_yaml_string(UpperCAmelCase_ ) else: return cls() def __snake_case ( self , UpperCAmelCase_ ): if path.exists(): with open(UpperCAmelCase_ , encoding='''utf-8''' ) as readme_file: lowerCAmelCase = readme_file.read() else: lowerCAmelCase = None lowerCAmelCase = self._to_readme(UpperCAmelCase_ ) with open(UpperCAmelCase_ , '''w''' , encoding='''utf-8''' ) as readme_file: readme_file.write(UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_ = None ): if readme_content is not None: lowerCAmelCase , lowerCAmelCase = _split_yaml_from_readme(UpperCAmelCase_ ) lowerCAmelCase = '''---\n''' + self.to_yaml_string() + '''---\n''' + content else: lowerCAmelCase = '''---\n''' + self.to_yaml_string() + '''---\n''' return full_content @classmethod def __snake_case ( cls , UpperCAmelCase_ ): lowerCAmelCase = yaml.load(UpperCAmelCase_ , Loader=_NoDuplicateSafeLoader ) or {} # Convert the YAML keys to DatasetMetadata fields lowerCAmelCase = { (key.replace('''-''' , '''_''' ) if key.replace('''-''' , '''_''' ) in cls._FIELDS_WITH_DASHES else key): value for key, value in metadata_dict.items() } return cls(**UpperCAmelCase_ ) def __snake_case ( self ): return yaml.safe_dump( { (key.replace('''_''' , '''-''' ) if key in self._FIELDS_WITH_DASHES else key): value for key, value in self.items() } , sort_keys=UpperCAmelCase_ , allow_unicode=UpperCAmelCase_ , encoding='''utf-8''' , ).decode('''utf-8''' ) UpperCAmelCase_ ={ """image-classification""": [], """translation""": [], """image-segmentation""": [], """fill-mask""": [], """automatic-speech-recognition""": [], """token-classification""": [], """sentence-similarity""": [], """audio-classification""": [], """question-answering""": [], """summarization""": [], """zero-shot-classification""": [], """table-to-text""": [], """feature-extraction""": [], """other""": [], """multiple-choice""": [], """text-classification""": [], """text-to-image""": [], """text2text-generation""": [], """zero-shot-image-classification""": [], """tabular-classification""": [], """tabular-regression""": [], """image-to-image""": [], """tabular-to-text""": [], """unconditional-image-generation""": [], """text-retrieval""": [], """text-to-speech""": [], """object-detection""": [], """audio-to-audio""": [], """text-generation""": [], """conversational""": [], """table-question-answering""": [], """visual-question-answering""": [], """image-to-text""": [], """reinforcement-learning""": [], """voice-activity-detection""": [], """time-series-forecasting""": [], """document-question-answering""": [], } if __name__ == "__main__": from argparse import ArgumentParser UpperCAmelCase_ =ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""") ap.add_argument("""readme_filepath""") UpperCAmelCase_ =ap.parse_args() UpperCAmelCase_ =Path(args.readme_filepath) UpperCAmelCase_ =DatasetMetadata.from_readme(readme_filepath) print(dataset_metadata) dataset_metadata.to_readme(readme_filepath)
33
0
def UpperCAmelCase ( _snake_case , _snake_case ): if density <= 0: raise ValueError('''Impossible fluid density''' ) if bulk_modulus <= 0: raise ValueError('''Impossible bulk modulus''' ) return (bulk_modulus / density) ** 0.5 if __name__ == "__main__": import doctest doctest.testmod()
720
import unittest from huggingface_hub import hf_hub_download from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor from transformers.pipelines import VideoClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_decord, require_tf, require_torch, require_torch_or_tf, require_vision, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf @require_vision @require_decord class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' __a : str =MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): lowerCAmelCase = hf_hub_download( repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' ) lowerCAmelCase = VideoClassificationPipeline(model=UpperCAmelCase_ , image_processor=UpperCAmelCase_ , top_k=2 ) lowerCAmelCase = [ example_video_filepath, '''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''', ] return video_classifier, examples def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ): for example in examples: lowerCAmelCase = video_classifier(UpperCAmelCase_ ) self.assertEqual( UpperCAmelCase_ , [ {'''score''': ANY(UpperCAmelCase_ ), '''label''': ANY(UpperCAmelCase_ )}, {'''score''': ANY(UpperCAmelCase_ ), '''label''': ANY(UpperCAmelCase_ )}, ] , ) @require_torch def __snake_case ( self ): lowerCAmelCase = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification''' lowerCAmelCase = VideoMAEFeatureExtractor( size={'''shortest_edge''': 10} , crop_size={'''height''': 10, '''width''': 10} ) lowerCAmelCase = pipeline( '''video-classification''' , model=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , frame_sampling_rate=4 ) lowerCAmelCase = hf_hub_download(repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' ) lowerCAmelCase = video_classifier(UpperCAmelCase_ , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase_ , decimals=4 ) , [{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}] , ) lowerCAmelCase = video_classifier( [ video_file_path, video_file_path, ] , top_k=2 , ) self.assertEqual( nested_simplify(UpperCAmelCase_ , decimals=4 ) , [ [{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}], [{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}], ] , ) @require_tf def __snake_case ( self ): pass
33
0
import math from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP class __UpperCamelCase ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' __a : int =4_2 __a : Union[str, Any] =None def UpperCAmelCase ( _snake_case , _snake_case=0.999 , _snake_case="cosine" , ): if alpha_transform_type == "cosine": def alpha_bar_fn(_snake_case ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(_snake_case ): return math.exp(t * -12.0 ) else: raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" ) lowerCAmelCase = [] for i in range(__UpperCamelCase ): lowerCAmelCase = i / num_diffusion_timesteps lowerCAmelCase = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(__UpperCamelCase ) / alpha_bar_fn(__UpperCamelCase ) , __UpperCamelCase ) ) return torch.tensor(__UpperCamelCase , dtype=torch.floataa ) class __UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): '''simple docstring''' @register_to_config def __init__( self , UpperCAmelCase_ = 10_00 , UpperCAmelCase_ = "fixed_small_log" , UpperCAmelCase_ = True , UpperCAmelCase_ = 1.0 , UpperCAmelCase_ = "epsilon" , UpperCAmelCase_ = "squaredcos_cap_v2" , ): if beta_schedule != "squaredcos_cap_v2": raise ValueError('''UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'''' ) lowerCAmelCase = betas_for_alpha_bar(_lowercase ) lowerCAmelCase = 1.0 - self.betas lowerCAmelCase = torch.cumprod(self.alphas , dim=0 ) lowerCAmelCase = torch.tensor(1.0 ) # standard deviation of the initial noise distribution lowerCAmelCase = 1.0 # setable values lowerCAmelCase = None lowerCAmelCase = torch.from_numpy(np.arange(0 , _lowercase )[::-1].copy() ) lowerCAmelCase = variance_type def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ): return sample def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ): lowerCAmelCase = num_inference_steps lowerCAmelCase = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1) lowerCAmelCase = (np.arange(0 , _lowercase ) * step_ratio).round()[::-1].copy().astype(np.intaa ) lowerCAmelCase = torch.from_numpy(_lowercase ).to(_lowercase ) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None ): if prev_timestep is None: lowerCAmelCase = t - 1 lowerCAmelCase = self.alphas_cumprod[t] lowerCAmelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one lowerCAmelCase = 1 - alpha_prod_t lowerCAmelCase = 1 - alpha_prod_t_prev if prev_timestep == t - 1: lowerCAmelCase = self.betas[t] else: lowerCAmelCase = 1 - alpha_prod_t / alpha_prod_t_prev # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample lowerCAmelCase = beta_prod_t_prev / beta_prod_t * beta if variance_type is None: lowerCAmelCase = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small_log": lowerCAmelCase = torch.log(torch.clamp(_lowercase , min=1E-2_0 ) ) lowerCAmelCase = torch.exp(0.5 * variance ) elif variance_type == "learned_range": # NOTE difference with DDPM scheduler lowerCAmelCase = variance.log() lowerCAmelCase = beta.log() lowerCAmelCase = (predicted_variance + 1) / 2 lowerCAmelCase = frac * max_log + (1 - frac) * min_log return variance def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_=None , UpperCAmelCase_ = True , ): lowerCAmelCase = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range": lowerCAmelCase = torch.split(_lowercase , sample.shape[1] , dim=1 ) else: lowerCAmelCase = None # 1. compute alphas, betas if prev_timestep is None: lowerCAmelCase = t - 1 lowerCAmelCase = self.alphas_cumprod[t] lowerCAmelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one lowerCAmelCase = 1 - alpha_prod_t lowerCAmelCase = 1 - alpha_prod_t_prev if prev_timestep == t - 1: lowerCAmelCase = self.betas[t] lowerCAmelCase = self.alphas[t] else: lowerCAmelCase = 1 - alpha_prod_t / alpha_prod_t_prev lowerCAmelCase = 1 - beta # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": lowerCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": lowerCAmelCase = model_output else: raise ValueError( F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`""" ''' for the UnCLIPScheduler.''' ) # 3. Clip "predicted x_0" if self.config.clip_sample: lowerCAmelCase = torch.clamp( _lowercase , -self.config.clip_sample_range , self.config.clip_sample_range ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf lowerCAmelCase = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t lowerCAmelCase = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf lowerCAmelCase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise lowerCAmelCase = 0 if t > 0: lowerCAmelCase = randn_tensor( model_output.shape , dtype=model_output.dtype , generator=_lowercase , device=model_output.device ) lowerCAmelCase = self._get_variance( _lowercase , predicted_variance=_lowercase , prev_timestep=_lowercase , ) if self.variance_type == "fixed_small_log": lowerCAmelCase = variance elif self.variance_type == "learned_range": lowerCAmelCase = (0.5 * variance).exp() else: raise ValueError( F"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`""" ''' for the UnCLIPScheduler.''' ) lowerCAmelCase = variance * variance_noise lowerCAmelCase = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return UnCLIPSchedulerOutput(prev_sample=_lowercase , pred_original_sample=_lowercase ) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ): lowerCAmelCase = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype ) lowerCAmelCase = timesteps.to(original_samples.device ) lowerCAmelCase = alphas_cumprod[timesteps] ** 0.5 lowerCAmelCase = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ): lowerCAmelCase = sqrt_alpha_prod.unsqueeze(-1 ) lowerCAmelCase = (1 - alphas_cumprod[timesteps]) ** 0.5 lowerCAmelCase = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ): lowerCAmelCase = sqrt_one_minus_alpha_prod.unsqueeze(-1 ) lowerCAmelCase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples
721
import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class __UpperCamelCase ( __UpperCAmelCase , unittest.TestCase ): '''simple docstring''' __a : Any ="""hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline""" def __snake_case ( self , UpperCAmelCase_=0 ): lowerCAmelCase = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(UpperCAmelCase_ ) ) lowerCAmelCase = np.random.RandomState(UpperCAmelCase_ ) lowerCAmelCase = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 3, '''strength''': 0.75, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def __snake_case ( self ): lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 1_28, 1_28, 3) lowerCAmelCase = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087] ) assert np.abs(image_slice - expected_slice ).max() < 1E-1 def __snake_case ( self ): lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowerCAmelCase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=UpperCAmelCase_ ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) lowerCAmelCase = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __snake_case ( self ): lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowerCAmelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) # warmup pass to apply optimizations lowerCAmelCase = pipe(**self.get_dummy_inputs() ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) lowerCAmelCase = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __snake_case ( self ): lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowerCAmelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) lowerCAmelCase = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __snake_case ( self ): lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowerCAmelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) lowerCAmelCase = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __snake_case ( self ): lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) lowerCAmelCase = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 @nightly @require_onnxruntime @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' @property def __snake_case ( self ): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def __snake_case ( self ): lowerCAmelCase = ort.SessionOptions() lowerCAmelCase = False return options def __snake_case ( self ): lowerCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) lowerCAmelCase = init_image.resize((7_68, 5_12) ) # using the PNDM scheduler by default lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = '''A fantasy landscape, trending on artstation''' lowerCAmelCase = np.random.RandomState(0 ) lowerCAmelCase = pipe( prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCAmelCase_ , output_type='''np''' , ) lowerCAmelCase = output.images lowerCAmelCase = images[0, 2_55:2_58, 3_83:3_86, -1] assert images.shape == (1, 5_12, 7_68, 3) lowerCAmelCase = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def __snake_case ( self ): lowerCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) lowerCAmelCase = init_image.resize((7_68, 5_12) ) lowerCAmelCase = LMSDiscreteScheduler.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' ) lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = '''A fantasy landscape, trending on artstation''' lowerCAmelCase = np.random.RandomState(0 ) lowerCAmelCase = pipe( prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=UpperCAmelCase_ , output_type='''np''' , ) lowerCAmelCase = output.images lowerCAmelCase = images[0, 2_55:2_58, 3_83:3_86, -1] assert images.shape == (1, 5_12, 7_68, 3) lowerCAmelCase = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
33
0
print((lambda quine: quine % quine)("""print((lambda quine: quine %% quine)(%r))"""))
700
import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def UpperCAmelCase ( _snake_case ): lowerCAmelCase = args.pruning_method lowerCAmelCase = args.threshold lowerCAmelCase = args.model_name_or_path.rstrip('''/''' ) lowerCAmelCase = args.target_model_path print(F"""Load fine-pruned model from {model_name_or_path}""" ) lowerCAmelCase = torch.load(os.path.join(_snake_case , '''pytorch_model.bin''' ) ) lowerCAmelCase = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: lowerCAmelCase = tensor print(F"""Copied layer {name}""" ) elif "classifier" in name or "qa_output" in name: lowerCAmelCase = tensor print(F"""Copied layer {name}""" ) elif "bias" in name: lowerCAmelCase = tensor print(F"""Copied layer {name}""" ) else: if pruning_method == "magnitude": lowerCAmelCase = MagnitudeBinarizer.apply(inputs=_snake_case , threshold=_snake_case ) lowerCAmelCase = tensor * mask print(F"""Pruned layer {name}""" ) elif pruning_method == "topK": if "mask_scores" in name: continue lowerCAmelCase = name[:-6] lowerCAmelCase = model[F"""{prefix_}mask_scores"""] lowerCAmelCase = TopKBinarizer.apply(_snake_case , _snake_case ) lowerCAmelCase = tensor * mask print(F"""Pruned layer {name}""" ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue lowerCAmelCase = name[:-6] lowerCAmelCase = model[F"""{prefix_}mask_scores"""] lowerCAmelCase = ThresholdBinarizer.apply(_snake_case , _snake_case , _snake_case ) lowerCAmelCase = tensor * mask print(F"""Pruned layer {name}""" ) elif pruning_method == "l0": if "mask_scores" in name: continue lowerCAmelCase = name[:-6] lowerCAmelCase = model[F"""{prefix_}mask_scores"""] lowerCAmelCase , lowerCAmelCase = -0.1, 1.1 lowerCAmelCase = torch.sigmoid(_snake_case ) lowerCAmelCase = s * (r - l) + l lowerCAmelCase = s_bar.clamp(min=0.0 , max=1.0 ) lowerCAmelCase = tensor * mask print(F"""Pruned layer {name}""" ) else: raise ValueError('''Unknown pruning method''' ) if target_model_path is None: lowerCAmelCase = os.path.join( os.path.dirname(_snake_case ) , F"""bertarized_{os.path.basename(_snake_case )}""" ) if not os.path.isdir(_snake_case ): shutil.copytree(_snake_case , _snake_case ) print(F"""\nCreated folder {target_model_path}""" ) torch.save(_snake_case , os.path.join(_snake_case , '''pytorch_model.bin''' ) ) print('''\nPruned model saved! See you later!''' ) if __name__ == "__main__": UpperCAmelCase_ =argparse.ArgumentParser() parser.add_argument( """--pruning_method""", choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""], type=str, required=True, help=( """Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,""" """ sigmoied_threshold = Soft movement pruning)""" ), ) parser.add_argument( """--threshold""", type=float, required=False, help=( """For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.""" """For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.""" """Not needed for `l0`""" ), ) parser.add_argument( """--model_name_or_path""", type=str, required=True, help="""Folder containing the model that was previously fine-pruned""", ) parser.add_argument( """--target_model_path""", default=None, type=str, required=False, help="""Folder containing the model that was previously fine-pruned""", ) UpperCAmelCase_ =parser.parse_args() main(args)
33
0
import os import sys import tempfile import torch from .state import AcceleratorState from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment def UpperCAmelCase ( _snake_case , _snake_case=() , _snake_case=None , _snake_case="no" , _snake_case="29500" ): lowerCAmelCase = False lowerCAmelCase = False if any(key.startswith('''KAGGLE''' ) for key in os.environ.keys() ): lowerCAmelCase = True elif "IPython" in sys.modules: lowerCAmelCase = '''google.colab''' in str(sys.modules['''IPython'''].get_ipython() ) try: lowerCAmelCase = PrecisionType(mixed_precision.lower() ) except ValueError: raise ValueError( F"""Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.""" ) if (in_colab or in_kaggle) and (os.environ.get('''TPU_NAME''' , __SCREAMING_SNAKE_CASE ) is not None): # TPU launch import torch_xla.distributed.xla_multiprocessing as xmp if len(AcceleratorState._shared_state ) > 0: raise ValueError( '''To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside ''' '''your training function. Restart your notebook and make sure no cells initializes an ''' '''`Accelerator`.''' ) if num_processes is None: lowerCAmelCase = 8 lowerCAmelCase = PrepareForLaunch(__SCREAMING_SNAKE_CASE , distributed_type='''TPU''' ) print(F"""Launching a training on {num_processes} TPU cores.""" ) xmp.spawn(__SCREAMING_SNAKE_CASE , args=__SCREAMING_SNAKE_CASE , nprocs=__SCREAMING_SNAKE_CASE , start_method='''fork''' ) elif in_colab: # No need for a distributed launch otherwise as it's either CPU or one GPU. if torch.cuda.is_available(): print('''Launching training on one GPU.''' ) else: print('''Launching training on one CPU.''' ) function(*__SCREAMING_SNAKE_CASE ) else: if num_processes is None: raise ValueError( '''You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.''' ) if num_processes > 1: # Multi-GPU launch from torch.multiprocessing import start_processes from torch.multiprocessing.spawn import ProcessRaisedException if len(AcceleratorState._shared_state ) > 0: raise ValueError( '''To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized ''' '''inside your training function. Restart your notebook and make sure no cells initializes an ''' '''`Accelerator`.''' ) if torch.cuda.is_initialized(): raise ValueError( '''To launch a multi-GPU training from your notebook, you need to avoid running any instruction ''' '''using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA ''' '''function.''' ) # torch.distributed will expect a few environment variable to be here. We set the ones common to each # process here (the other ones will be set be the launcher). with patch_environment( world_size=__SCREAMING_SNAKE_CASE , master_addr='''127.0.01''' , master_port=__SCREAMING_SNAKE_CASE , mixed_precision=__SCREAMING_SNAKE_CASE ): lowerCAmelCase = PrepareForLaunch(__SCREAMING_SNAKE_CASE , distributed_type='''MULTI_GPU''' ) print(F"""Launching training on {num_processes} GPUs.""" ) try: start_processes(__SCREAMING_SNAKE_CASE , args=__SCREAMING_SNAKE_CASE , nprocs=__SCREAMING_SNAKE_CASE , start_method='''fork''' ) except ProcessRaisedException as e: if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]: raise RuntimeError( '''CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. ''' '''This likely stems from an outside import causing issues once the `notebook_launcher()` is called. ''' '''Please review your imports and test them when running the `notebook_launcher()` to identify ''' '''which one is problematic.''' ) from e else: # No need for a distributed launch otherwise as it's either CPU, GPU or MPS. if is_mps_available(): lowerCAmelCase = '''1''' print('''Launching training on MPS.''' ) elif torch.cuda.is_available(): print('''Launching training on one GPU.''' ) else: print('''Launching training on CPU.''' ) function(*__SCREAMING_SNAKE_CASE ) def UpperCAmelCase ( _snake_case , _snake_case=() , _snake_case=2 ): from torch.multiprocessing import start_processes with tempfile.NamedTemporaryFile() as tmp_file: # torch.distributed will expect a few environment variable to be here. We set the ones common to each # process here (the other ones will be set be the launcher). with patch_environment( world_size=__SCREAMING_SNAKE_CASE , master_addr='''127.0.01''' , master_port='''29500''' , accelerate_mixed_precision='''no''' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='''yes''' , ): lowerCAmelCase = PrepareForLaunch(__SCREAMING_SNAKE_CASE , debug=__SCREAMING_SNAKE_CASE ) start_processes(__SCREAMING_SNAKE_CASE , args=__SCREAMING_SNAKE_CASE , nprocs=__SCREAMING_SNAKE_CASE , start_method='''fork''' )
701
import os import re from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCAmelCase_ =logging.get_logger(__name__) UpperCAmelCase_ ={ """vocab_file""": """vocab.txt""", """merges_file""": """bpe.codes""", } UpperCAmelCase_ ={ """vocab_file""": { """vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt""", """vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt""", }, """merges_file""": { """vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes""", """vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes""", }, } UpperCAmelCase_ ={ """vinai/phobert-base""": 256, """vinai/phobert-large""": 256, } def UpperCAmelCase ( _snake_case ): lowerCAmelCase = set() lowerCAmelCase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCAmelCase = char lowerCAmelCase = set(_snake_case ) return pairs class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : Union[str, Any] =VOCAB_FILES_NAMES __a : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP __a : Optional[int] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_="<s>" , UpperCAmelCase_="</s>" , UpperCAmelCase_="</s>" , UpperCAmelCase_="<s>" , UpperCAmelCase_="<unk>" , UpperCAmelCase_="<pad>" , UpperCAmelCase_="<mask>" , **UpperCAmelCase_ , ): super().__init__( bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , **UpperCAmelCase_ , ) lowerCAmelCase = vocab_file lowerCAmelCase = merges_file lowerCAmelCase = {} lowerCAmelCase = 0 lowerCAmelCase = 1 lowerCAmelCase = 2 lowerCAmelCase = 3 self.add_from_file(UpperCAmelCase_ ) lowerCAmelCase = {v: k for k, v in self.encoder.items()} with open(UpperCAmelCase_ , encoding='''utf-8''' ) as merges_handle: lowerCAmelCase = merges_handle.read().split('''\n''' )[:-1] lowerCAmelCase = [tuple(merge.split()[:-1] ) for merge in merges] lowerCAmelCase = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) ) lowerCAmelCase = {} def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCAmelCase = [self.cls_token_id] lowerCAmelCase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ ) if token_ids_a is None: return [1] + ([0] * len(UpperCAmelCase_ )) + [1] return [1] + ([0] * len(UpperCAmelCase_ )) + [1, 1] + ([0] * len(UpperCAmelCase_ )) + [1] def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ): lowerCAmelCase = [self.sep_token_id] lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def __snake_case ( self ): return len(self.encoder ) def __snake_case ( self ): return dict(self.encoder , **self.added_tokens_encoder ) def __snake_case ( self , UpperCAmelCase_ ): if token in self.cache: return self.cache[token] lowerCAmelCase = tuple(UpperCAmelCase_ ) lowerCAmelCase = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] ) lowerCAmelCase = get_pairs(UpperCAmelCase_ ) if not pairs: return token while True: lowerCAmelCase = min(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : self.bpe_ranks.get(UpperCAmelCase_ , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break lowerCAmelCase , lowerCAmelCase = bigram lowerCAmelCase = [] lowerCAmelCase = 0 while i < len(UpperCAmelCase_ ): try: lowerCAmelCase = word.index(UpperCAmelCase_ , UpperCAmelCase_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCAmelCase = j if word[i] == first and i < len(UpperCAmelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCAmelCase = tuple(UpperCAmelCase_ ) lowerCAmelCase = new_word if len(UpperCAmelCase_ ) == 1: break else: lowerCAmelCase = get_pairs(UpperCAmelCase_ ) lowerCAmelCase = '''@@ '''.join(UpperCAmelCase_ ) lowerCAmelCase = word[:-4] lowerCAmelCase = word return word def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = [] lowerCAmelCase = re.findall(r'''\S+\n?''' , UpperCAmelCase_ ) for token in words: split_tokens.extend(list(self.bpe(UpperCAmelCase_ ).split(''' ''' ) ) ) return split_tokens def __snake_case ( self , UpperCAmelCase_ ): return self.encoder.get(UpperCAmelCase_ , self.encoder.get(self.unk_token ) ) def __snake_case ( self , UpperCAmelCase_ ): return self.decoder.get(UpperCAmelCase_ , self.unk_token ) def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = ''' '''.join(UpperCAmelCase_ ).replace('''@@ ''' , '''''' ).strip() return out_string def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ): if not os.path.isdir(UpperCAmelCase_ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase = os.path.join( UpperCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCAmelCase = os.path.join( UpperCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ): copyfile(self.vocab_file , UpperCAmelCase_ ) if os.path.abspath(self.merges_file ) != os.path.abspath(UpperCAmelCase_ ): copyfile(self.merges_file , UpperCAmelCase_ ) return out_vocab_file, out_merge_file def __snake_case ( self , UpperCAmelCase_ ): if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): try: with open(UpperCAmelCase_ , '''r''' , encoding='''utf-8''' ) as fd: self.add_from_file(UpperCAmelCase_ ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception(F"""Incorrect encoding detected in {f}, please rebuild the dataset""" ) return lowerCAmelCase = f.readlines() for lineTmp in lines: lowerCAmelCase = lineTmp.strip() lowerCAmelCase = line.rfind(''' ''' ) if idx == -1: raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt>\'''' ) lowerCAmelCase = line[:idx] lowerCAmelCase = len(self.encoder )
33
0
from __future__ import annotations from cmath import sqrt def UpperCAmelCase ( _snake_case , _snake_case , _snake_case ): if a == 0: raise ValueError('''Coefficient \'a\' must not be zero.''' ) lowerCAmelCase = b * b - 4 * a * c lowerCAmelCase = (-b + sqrt(_snake_case )) / (2 * a) lowerCAmelCase = (-b - sqrt(_snake_case )) / (2 * a) return ( root_a.real if not root_a.imag else root_a, root_a.real if not root_a.imag else root_a, ) def UpperCAmelCase ( ): lowerCAmelCase , lowerCAmelCase = quadratic_roots(a=5 , b=6 , c=1 ) print(F"""The solutions are: {solutiona} and {solutiona}""" ) if __name__ == "__main__": main()
702
from __future__ import annotations from typing import Generic, TypeVar UpperCAmelCase_ =TypeVar("""T""") class __UpperCamelCase ( Generic[T] ): '''simple docstring''' def __init__( self , UpperCAmelCase_ ): lowerCAmelCase = data lowerCAmelCase = self lowerCAmelCase = 0 class __UpperCamelCase ( Generic[T] ): '''simple docstring''' def __init__( self ): # map from node name to the node object lowerCAmelCase = {} def __snake_case ( self , UpperCAmelCase_ ): # create a new set with x as its member lowerCAmelCase = DisjointSetTreeNode(UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_ ): # find the set x belongs to (with path-compression) lowerCAmelCase = self.map[data] if elem_ref != elem_ref.parent: lowerCAmelCase = self.find_set(elem_ref.parent.data ) return elem_ref.parent def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ): # helper function for union operation if nodea.rank > nodea.rank: lowerCAmelCase = nodea else: lowerCAmelCase = nodea if nodea.rank == nodea.rank: nodea.rank += 1 def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ): # merge 2 disjoint sets self.link(self.find_set(UpperCAmelCase_ ) , self.find_set(UpperCAmelCase_ ) ) class __UpperCamelCase ( Generic[T] ): '''simple docstring''' def __init__( self ): # connections: map from the node to the neighbouring nodes (with weights) lowerCAmelCase = {} def __snake_case ( self , UpperCAmelCase_ ): # add a node ONLY if its not present in the graph if node not in self.connections: lowerCAmelCase = {} def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): # add an edge with the given weight self.add_node(UpperCAmelCase_ ) self.add_node(UpperCAmelCase_ ) lowerCAmelCase = weight lowerCAmelCase = weight def __snake_case ( self ): lowerCAmelCase = [] lowerCAmelCase = set() for start in self.connections: for end in self.connections[start]: if (start, end) not in seen: seen.add((end, start) ) edges.append((start, end, self.connections[start][end]) ) edges.sort(key=lambda UpperCAmelCase_ : x[2] ) # creating the disjoint set lowerCAmelCase = DisjointSetTree[T]() for node in self.connections: disjoint_set.make_set(UpperCAmelCase_ ) # MST generation lowerCAmelCase = 0 lowerCAmelCase = 0 lowerCAmelCase = GraphUndirectedWeighted[T]() while num_edges < len(self.connections ) - 1: lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = edges[index] index += 1 lowerCAmelCase = disjoint_set.find_set(UpperCAmelCase_ ) lowerCAmelCase = disjoint_set.find_set(UpperCAmelCase_ ) if parent_u != parent_v: num_edges += 1 graph.add_edge(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) disjoint_set.union(UpperCAmelCase_ , UpperCAmelCase_ ) return graph
33
0
import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler UpperCAmelCase_ =16 UpperCAmelCase_ =32 def UpperCAmelCase ( _snake_case , _snake_case = 16 , _snake_case = "bert-base-cased" ): lowerCAmelCase = AutoTokenizer.from_pretrained(snake_case_ ) lowerCAmelCase = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(_snake_case ): # max_length=None => use the model max length (it's actually the default) lowerCAmelCase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=snake_case_ , max_length=snake_case_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset lowerCAmelCase = datasets.map( snake_case_ , batched=snake_case_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=snake_case_ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowerCAmelCase = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(_snake_case ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(snake_case_ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' ) return tokenizer.pad(snake_case_ , padding='''longest''' , return_tensors='''pt''' ) # Instantiate dataloaders. lowerCAmelCase = DataLoader( tokenized_datasets['''train'''] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ ) lowerCAmelCase = DataLoader( tokenized_datasets['''validation'''] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ ) return train_dataloader, eval_dataloader def UpperCAmelCase ( _snake_case , _snake_case ): # Initialize accelerator lowerCAmelCase = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowerCAmelCase = config["lr"] lowerCAmelCase = int(config['''num_epochs'''] ) lowerCAmelCase = int(config['''seed'''] ) lowerCAmelCase = int(config['''batch_size'''] ) lowerCAmelCase = args.model_name_or_path set_seed(snake_case_ ) lowerCAmelCase = get_dataloaders(snake_case_ , snake_case_ , snake_case_ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained(snake_case_ , return_dict=snake_case_ ) # Instantiate optimizer lowerCAmelCase = ( AdamW if accelerator.state.deepspeed_plugin is None or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) lowerCAmelCase = optimizer_cls(params=model.parameters() , lr=snake_case_ ) if accelerator.state.deepspeed_plugin is not None: lowerCAmelCase = accelerator.state.deepspeed_plugin.deepspeed_config[ "gradient_accumulation_steps" ] else: lowerCAmelCase = 1 lowerCAmelCase = (len(snake_case_ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): lowerCAmelCase = get_linear_schedule_with_warmup( optimizer=snake_case_ , num_warmup_steps=0 , num_training_steps=snake_case_ , ) else: lowerCAmelCase = DummyScheduler(snake_case_ , total_num_steps=snake_case_ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowerCAmelCase = accelerator.prepare( snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # We need to keep track of how many total steps we have iterated over lowerCAmelCase = 0 # We also need to keep track of the stating epoch so files are named properly lowerCAmelCase = 0 # Now we train the model lowerCAmelCase = evaluate.load('''glue''' , '''mrpc''' ) lowerCAmelCase = 0 lowerCAmelCase = {} for epoch in range(snake_case_ , snake_case_ ): model.train() for step, batch in enumerate(snake_case_ ): lowerCAmelCase = model(**snake_case_ ) lowerCAmelCase = outputs.loss lowerCAmelCase = loss / gradient_accumulation_steps accelerator.backward(snake_case_ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 model.eval() lowerCAmelCase = 0 for step, batch in enumerate(snake_case_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowerCAmelCase = model(**snake_case_ ) lowerCAmelCase = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times lowerCAmelCase = accelerator.gather( (predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(snake_case_ ) - 1: lowerCAmelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen] lowerCAmelCase = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=snake_case_ , references=snake_case_ , ) lowerCAmelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" , snake_case_ ) lowerCAmelCase = eval_metric["accuracy"] if best_performance < eval_metric["accuracy"]: lowerCAmelCase = eval_metric["accuracy"] if args.performance_lower_bound is not None: assert ( args.performance_lower_bound <= best_performance ), F"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}""" accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , '''all_results.json''' ) , '''w''' ) as f: json.dump(snake_case_ , snake_case_ ) def UpperCAmelCase ( ): lowerCAmelCase = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' ) parser.add_argument( '''--model_name_or_path''' , type=snake_case_ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=snake_case_ , ) parser.add_argument( '''--output_dir''' , type=snake_case_ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , ) parser.add_argument( '''--performance_lower_bound''' , type=snake_case_ , default=snake_case_ , help='''Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.''' , ) parser.add_argument( '''--num_epochs''' , type=snake_case_ , default=3 , help='''Number of train epochs.''' , ) lowerCAmelCase = parser.parse_args() lowerCAmelCase = {"lr": 2E-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16} training_function(snake_case_ , snake_case_ ) if __name__ == "__main__": main()
703
def UpperCAmelCase ( _snake_case , _snake_case , _snake_case ): def count_of_possible_combinations(_snake_case ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(_snake_case ) def UpperCAmelCase ( _snake_case , _snake_case , _snake_case ): def count_of_possible_combinations_with_dp_array( _snake_case , _snake_case ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] lowerCAmelCase = sum( count_of_possible_combinations_with_dp_array(target - item , _snake_case ) for item in array ) lowerCAmelCase = answer return answer lowerCAmelCase = [-1] * (target + 1) return count_of_possible_combinations_with_dp_array(_snake_case , _snake_case ) def UpperCAmelCase ( _snake_case , _snake_case , _snake_case ): lowerCAmelCase = [0] * (target + 1) lowerCAmelCase = 1 for i in range(1 , target + 1 ): for j in range(_snake_case ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase_ =3 UpperCAmelCase_ =5 UpperCAmelCase_ =[1, 2, 5] print(combination_sum_iv(n, array, target))
33
0
from math import factorial def UpperCAmelCase ( _snake_case , _snake_case , _snake_case ): if successes > trials: raise ValueError('''successes must be lower or equal to trials''' ) if trials < 0 or successes < 0: raise ValueError('''the function is defined for non-negative integers''' ) if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not isinstance(_lowerCamelCase , _lowerCamelCase ): raise ValueError('''the function is defined for non-negative integers''' ) if not 0 < prob < 1: raise ValueError('''prob has to be in range of 1 - 0''' ) lowerCAmelCase = (prob**successes) * ((1 - prob) ** (trials - successes)) # Calculate the binomial coefficient: n! / k!(n-k)! lowerCAmelCase = float(factorial(_lowerCamelCase ) ) coefficient /= factorial(_lowerCamelCase ) * factorial(trials - successes ) return probability * coefficient if __name__ == "__main__": from doctest import testmod testmod() print("""Probability of 2 successes out of 4 trails""") print("""with probability of 0.75 is:""", end=""" """) print(binomial_distribution(2, 4, 0.75))
704
import torch from diffusers import StableDiffusionPipeline UpperCAmelCase_ ="""path-to-your-trained-model""" UpperCAmelCase_ =StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("""cuda""") UpperCAmelCase_ ="""A photo of sks dog in a bucket""" UpperCAmelCase_ =pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0] image.save("""dog-bucket.png""")
33
0
'''simple docstring''' from __future__ import annotations import time import numpy as np UpperCAmelCase_ =[8, 5, 9, 7] UpperCAmelCase_ =[ [2, 0, 1, 1], [0, 1, 2, 1], [4, 0, 0, 3], [0, 2, 1, 0], [1, 0, 3, 0], ] UpperCAmelCase_ =[ [3, 2, 1, 4], [0, 2, 5, 2], [5, 1, 0, 5], [1, 5, 3, 0], [3, 0, 3, 3], ] class __UpperCamelCase : '''simple docstring''' def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ): lowerCAmelCase = claim_vector lowerCAmelCase = allocated_resources_table lowerCAmelCase = maximum_claim_table def __snake_case ( self ): return [ sum(p_item[i] for p_item in self.__allocated_resources_table ) for i in range(len(self.__allocated_resources_table[0] ) ) ] def __snake_case ( self ): return np.array(self.__claim_vector ) - np.array( self.__processes_resource_summation() ) def __snake_case ( self ): return [ list(np.array(self.__maximum_claim_table[i] ) - np.array(_a ) ) for i, allocated_resource in enumerate(self.__allocated_resources_table ) ] def __snake_case ( self ): return {self.__need().index(_a ): i for i in self.__need()} def __snake_case ( self , **UpperCAmelCase_ ): lowerCAmelCase = self.__need() lowerCAmelCase = self.__allocated_resources_table lowerCAmelCase = self.__available_resources() lowerCAmelCase = self.__need_index_manager() for kw, val in kwargs.items(): if kw and val is True: self.__pretty_data() print('''_''' * 50 + '''\n''' ) while need_list: lowerCAmelCase = False for each_need in need_list: lowerCAmelCase = True for index, need in enumerate(_a ): if need > available_resources[index]: lowerCAmelCase = False break if execution: lowerCAmelCase = True # get the original index of the process from ind_ctrl db for original_need_index, need_clone in need_index_manager.items(): if each_need == need_clone: lowerCAmelCase = original_need_index print(F"""Process {process_number + 1} is executing.""" ) # remove the process run from stack need_list.remove(_a ) # update available/freed resources stack lowerCAmelCase = np.array(_a ) + np.array( alloc_resources_table[process_number] ) print( '''Updated available resource stack for processes: ''' + ''' '''.join([str(_a ) for x in available_resources] ) ) break if safe: print('''The process is in a safe state.\n''' ) else: print('''System in unsafe state. Aborting...\n''' ) break def __snake_case ( self ): print(''' ''' * 9 + '''Allocated Resource Table''' ) for item in self.__allocated_resources_table: print( F"""P{self.__allocated_resources_table.index(_a ) + 1}""" + ''' '''.join(F"""{it:>8}""" for it in item ) + '''\n''' ) print(''' ''' * 9 + '''System Resource Table''' ) for item in self.__maximum_claim_table: print( F"""P{self.__maximum_claim_table.index(_a ) + 1}""" + ''' '''.join(F"""{it:>8}""" for it in item ) + '''\n''' ) print( '''Current Usage by Active Processes: ''' + ''' '''.join(str(_a ) for x in self.__claim_vector ) ) print( '''Initial Available Resources: ''' + ''' '''.join(str(_a ) for x in self.__available_resources() ) ) time.sleep(1 ) if __name__ == "__main__": import doctest doctest.testmod()
705
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase_ ={ """configuration_jukebox""": [ """JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """JukeboxConfig""", """JukeboxPriorConfig""", """JukeboxVQVAEConfig""", ], """tokenization_jukebox""": ["""JukeboxTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ =[ """JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST""", """JukeboxModel""", """JukeboxPreTrainedModel""", """JukeboxVQVAE""", """JukeboxPrior""", ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys UpperCAmelCase_ =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
33
0
from math import ceil def UpperCAmelCase ( _snake_case = 1001 ): lowerCAmelCase = 1 for i in range(1 , int(ceil(n / 2.0 ) ) ): lowerCAmelCase = 2 * i + 1 lowerCAmelCase = 2 * i lowerCAmelCase = total + 4 * odd**2 - 6 * even return total if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution()) else: try: UpperCAmelCase_ =int(sys.argv[1]) print(solution(n)) except ValueError: print("""Invalid entry - please enter a number""")
706
import json import os import pickle import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers import is_faiss_available from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bart.tokenization_bart import BartTokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch if is_faiss_available(): import faiss @require_faiss class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' def __snake_case ( self ): lowerCAmelCase = tempfile.mkdtemp() lowerCAmelCase = 8 # DPR tok lowerCAmelCase = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] lowerCAmelCase = os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ ) lowerCAmelCase = os.path.join(UpperCAmelCase_ , DPR_VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) # BART tok lowerCAmelCase = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] lowerCAmelCase = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) ) lowerCAmelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] lowerCAmelCase = {'''unk_token''': '''<unk>'''} lowerCAmelCase = os.path.join(self.tmpdirname , '''bart_tokenizer''' ) os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ ) lowerCAmelCase = os.path.join(UpperCAmelCase_ , BART_VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCAmelCase = os.path.join(UpperCAmelCase_ , BART_VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(UpperCAmelCase_ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(UpperCAmelCase_ ) ) def __snake_case ( self ): return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) ) def __snake_case ( self ): return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) ) def __snake_case ( self ): return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) ) def __snake_case ( self ): shutil.rmtree(self.tmpdirname ) def __snake_case ( self ): lowerCAmelCase = Dataset.from_dict( { '''id''': ['''0''', '''1'''], '''text''': ['''foo''', '''bar'''], '''title''': ['''Foo''', '''Bar'''], '''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )], } ) dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT ) return dataset def __snake_case ( self ): lowerCAmelCase = self.get_dummy_dataset() lowerCAmelCase = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , ) with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset: lowerCAmelCase = dataset lowerCAmelCase = RagRetriever( UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) return retriever def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = self.get_dummy_dataset() lowerCAmelCase = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , ) if from_disk: lowerCAmelCase = os.path.join(self.tmpdirname , '''dataset''' ) lowerCAmelCase = os.path.join(self.tmpdirname , '''index.faiss''' ) dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) ) dataset.drop_index('''embeddings''' ) dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) ) del dataset lowerCAmelCase = RagRetriever( UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) else: lowerCAmelCase = RagRetriever( UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , UpperCAmelCase_ ) , ) return retriever def __snake_case ( self ): lowerCAmelCase = Dataset.from_dict( { '''id''': ['''0''', '''1'''], '''text''': ['''foo''', '''bar'''], '''title''': ['''Foo''', '''Bar'''], '''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )], } ) dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT ) lowerCAmelCase = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' ) dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' ) pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) ) lowerCAmelCase = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' ) lowerCAmelCase = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset} pickle.dump(UpperCAmelCase_ , open(UpperCAmelCase_ , '''wb''' ) ) lowerCAmelCase = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , ) lowerCAmelCase = RagRetriever( UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() ) return retriever def __snake_case ( self ): lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_canonical_hf_index_retriever() lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(UpperCAmelCase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , UpperCAmelCase_ ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def __snake_case ( self ): lowerCAmelCase = self.get_dummy_canonical_hf_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset: lowerCAmelCase = self.get_dummy_dataset() retriever.save_pretrained(UpperCAmelCase_ ) lowerCAmelCase = RagRetriever.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=1 ) self.assertTrue(out is not None ) def __snake_case ( self ): lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(UpperCAmelCase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , UpperCAmelCase_ ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def __snake_case ( self ): lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(UpperCAmelCase_ ) lowerCAmelCase = RagRetriever.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=1 ) self.assertTrue(out is not None ) def __snake_case ( self ): lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(UpperCAmelCase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , UpperCAmelCase_ ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def __snake_case ( self ): lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(UpperCAmelCase_ ) lowerCAmelCase = RagRetriever.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=1 ) self.assertTrue(out is not None ) def __snake_case ( self ): lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_legacy_index_retriever() lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(UpperCAmelCase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''text'''] ) , UpperCAmelCase_ ) self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def __snake_case ( self ): lowerCAmelCase = self.get_dummy_legacy_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(UpperCAmelCase_ ) lowerCAmelCase = RagRetriever.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=1 ) self.assertTrue(out is not None ) @require_torch @require_tokenizers @require_sentencepiece def __snake_case ( self ): import torch lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_canonical_hf_index_retriever() lowerCAmelCase = [[5, 7], [10, 11]] lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever(UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_ ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = ( out['''context_input_ids'''], out['''context_attention_mask'''], out['''retrieved_doc_embeds'''], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , np.ndarray ) lowerCAmelCase = retriever( UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_ , return_tensors='''pt''' , ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = ( # noqa: F841 out['''context_input_ids'''], out['''context_attention_mask'''], out['''retrieved_doc_embeds'''], out['''doc_ids'''], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(UpperCAmelCase_ , torch.Tensor ) self.assertIsInstance(UpperCAmelCase_ , torch.Tensor ) self.assertIsInstance(UpperCAmelCase_ , torch.Tensor ) @require_torch @require_tokenizers @require_sentencepiece def __snake_case ( self ): lowerCAmelCase = self.get_dpr_ctx_encoder_tokenizer() lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ ) retriever.set_ctx_encoder_tokenizer(UpperCAmelCase_ ) lowerCAmelCase = [[5, 7], [10, 11]] lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever(UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_ ) self.assertEqual( len(UpperCAmelCase_ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs self.assertEqual( all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , UpperCAmelCase_ ) # check for doc token related keys in dictionary.
33
0
import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import numpy as np import pytest from datasets.arrow_dataset import Dataset from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex from .utils import require_elasticsearch, require_faiss UpperCAmelCase_ =pytest.mark.integration @require_faiss class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' def __snake_case ( self ): lowerCAmelCase = Dataset.from_dict({'''filename''': ['''my_name-train''' + '''_''' + str(__lowerCAmelCase ) for x in np.arange(30 ).tolist()]} ) return dset def __snake_case ( self ): import faiss lowerCAmelCase = self._create_dummy_dataset() lowerCAmelCase = dset.map( lambda UpperCAmelCase_ , UpperCAmelCase_ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase ) lowerCAmelCase = dset.add_faiss_index('''vecs''' , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT ) lowerCAmelCase , lowerCAmelCase = dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' ) dset.drop_index('''vecs''' ) def __snake_case ( self ): import faiss lowerCAmelCase = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT , ) lowerCAmelCase , lowerCAmelCase = dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' ) def __snake_case ( self ): import faiss lowerCAmelCase = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' , metric_type=faiss.METRIC_INNER_PRODUCT , ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=__lowerCAmelCase ) as tmp_file: dset.save_faiss_index('''vecs''' , tmp_file.name ) dset.load_faiss_index('''vecs2''' , tmp_file.name ) os.unlink(tmp_file.name ) lowerCAmelCase , lowerCAmelCase = dset.get_nearest_examples('''vecs2''' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' ) def __snake_case ( self ): lowerCAmelCase = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' ) dset.drop_index('''vecs''' ) self.assertRaises(__lowerCAmelCase , partial(dset.get_nearest_examples , '''vecs2''' , np.ones(5 , dtype=np.floataa ) ) ) def __snake_case ( self ): from elasticsearch import Elasticsearch lowerCAmelCase = self._create_dummy_dataset() with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch( '''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk: lowerCAmelCase = {'''acknowledged''': True} mocked_bulk.return_value([(True, None)] * 30 ) lowerCAmelCase = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 29}]}} lowerCAmelCase = Elasticsearch() dset.add_elasticsearch_index('''filename''' , es_client=__lowerCAmelCase ) lowerCAmelCase , lowerCAmelCase = dset.get_nearest_examples('''filename''' , '''my_name-train_29''' ) self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' ) @require_faiss class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' def __snake_case ( self ): import faiss lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) # add vectors index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsNotNone(index.faiss_index ) self.assertEqual(index.faiss_index.ntotal , 5 ) index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) ) self.assertEqual(index.faiss_index.ntotal , 10 ) # single query lowerCAmelCase = np.zeros(5 , dtype=np.floataa ) lowerCAmelCase = 1 lowerCAmelCase , lowerCAmelCase = index.search(__lowerCAmelCase ) self.assertRaises(__lowerCAmelCase , index.search , query.reshape(-1 , 1 ) ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) # batched queries lowerCAmelCase = np.eye(5 , dtype=np.floataa )[::-1] lowerCAmelCase , lowerCAmelCase = index.search_batch(__lowerCAmelCase ) self.assertRaises(__lowerCAmelCase , index.search_batch , queries[0] ) lowerCAmelCase = [scores[0] for scores in total_scores] lowerCAmelCase = [indices[0] for indices in total_indices] self.assertGreater(np.min(__lowerCAmelCase ) , 0 ) self.assertListEqual([4, 3, 2, 1, 0] , __lowerCAmelCase ) def __snake_case ( self ): import faiss lowerCAmelCase = FaissIndex(string_factory='''Flat''' ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) lowerCAmelCase = FaissIndex(string_factory='''LSH''' ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexLSH ) with self.assertRaises(__lowerCAmelCase ): lowerCAmelCase = FaissIndex(string_factory='''Flat''' , custom_index=faiss.IndexFlat(5 ) ) def __snake_case ( self ): import faiss lowerCAmelCase = faiss.IndexFlat(5 ) lowerCAmelCase = FaissIndex(custom_index=__lowerCAmelCase ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) def __snake_case ( self ): import faiss lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=__lowerCAmelCase ) as tmp_file: index.save(tmp_file.name ) lowerCAmelCase = FaissIndex.load(tmp_file.name ) os.unlink(tmp_file.name ) lowerCAmelCase = np.zeros(5 , dtype=np.floataa ) lowerCAmelCase = 1 lowerCAmelCase , lowerCAmelCase = index.search(__lowerCAmelCase ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) @require_faiss def UpperCAmelCase ( _snake_case ): import faiss lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) lowerCAmelCase = '''index.faiss''' lowerCAmelCase = F"""mock://{index_name}""" index.save(__snake_case , storage_options=mockfs.storage_options ) lowerCAmelCase = FaissIndex.load(__snake_case , storage_options=mockfs.storage_options ) lowerCAmelCase = np.zeros(5 , dtype=np.floataa ) lowerCAmelCase = 1 lowerCAmelCase , lowerCAmelCase = index.search(__snake_case ) assert scores[0] > 0 assert indices[0] == 1 @require_elasticsearch class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' def __snake_case ( self ): from elasticsearch import Elasticsearch with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch( '''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk: lowerCAmelCase = Elasticsearch() lowerCAmelCase = {'''acknowledged''': True} lowerCAmelCase = ElasticSearchIndex(es_client=__lowerCAmelCase ) mocked_bulk.return_value([(True, None)] * 3 ) index.add_documents(['''foo''', '''bar''', '''foobar'''] ) # single query lowerCAmelCase = '''foo''' lowerCAmelCase = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}} lowerCAmelCase , lowerCAmelCase = index.search(__lowerCAmelCase ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # single query with timeout lowerCAmelCase = '''foo''' lowerCAmelCase = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}} lowerCAmelCase , lowerCAmelCase = index.search(__lowerCAmelCase , request_timeout=30 ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # batched queries lowerCAmelCase = ['''foo''', '''bar''', '''foobar'''] lowerCAmelCase = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}} lowerCAmelCase , lowerCAmelCase = index.search_batch(__lowerCAmelCase ) lowerCAmelCase = [scores[0] for scores in total_scores] lowerCAmelCase = [indices[0] for indices in total_indices] self.assertGreater(np.min(__lowerCAmelCase ) , 0 ) self.assertListEqual([1, 1, 1] , __lowerCAmelCase ) # batched queries with timeout lowerCAmelCase = ['''foo''', '''bar''', '''foobar'''] lowerCAmelCase = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}} lowerCAmelCase , lowerCAmelCase = index.search_batch(__lowerCAmelCase , request_timeout=30 ) lowerCAmelCase = [scores[0] for scores in total_scores] lowerCAmelCase = [indices[0] for indices in total_indices] self.assertGreater(np.min(__lowerCAmelCase ) , 0 ) self.assertListEqual([1, 1, 1] , __lowerCAmelCase )
707
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ =logging.get_logger(__name__) UpperCAmelCase_ ={ """google/switch-base-8""": """https://huggingface.co/google/switch-base-8/blob/main/config.json""", } class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : Any ="""switch_transformers""" __a : Union[str, Any] =["""past_key_values"""] __a : Dict ={"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""} def __init__( self , UpperCAmelCase_=3_21_28 , UpperCAmelCase_=7_68 , UpperCAmelCase_=64 , UpperCAmelCase_=20_48 , UpperCAmelCase_=64 , UpperCAmelCase_=12 , UpperCAmelCase_=3 , UpperCAmelCase_=12 , UpperCAmelCase_=3 , UpperCAmelCase_=12 , UpperCAmelCase_=8 , UpperCAmelCase_=False , UpperCAmelCase_=0.01 , UpperCAmelCase_="float32" , UpperCAmelCase_=False , UpperCAmelCase_=32 , UpperCAmelCase_=1_28 , UpperCAmelCase_=0.1 , UpperCAmelCase_=1E-6 , UpperCAmelCase_=0.001 , UpperCAmelCase_=0.001 , UpperCAmelCase_=1.0 , UpperCAmelCase_="relu" , UpperCAmelCase_=True , UpperCAmelCase_=False , UpperCAmelCase_=True , UpperCAmelCase_=0 , UpperCAmelCase_=1 , **UpperCAmelCase_ , ): lowerCAmelCase = vocab_size lowerCAmelCase = d_model lowerCAmelCase = d_kv lowerCAmelCase = d_ff lowerCAmelCase = num_sparse_encoder_layers lowerCAmelCase = num_layers lowerCAmelCase = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry lowerCAmelCase = num_sparse_decoder_layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_encoder_layers > 0: lowerCAmelCase = self.num_layers // self.num_sparse_encoder_layers else: lowerCAmelCase = self.num_layers # HACK: this will create 0 sparse layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_decoder_layers > 0: lowerCAmelCase = self.num_decoder_layers // self.num_sparse_decoder_layers else: lowerCAmelCase = self.num_decoder_layers # HACK: this will create 0 sparse layers lowerCAmelCase = num_heads lowerCAmelCase = num_experts lowerCAmelCase = expert_capacity lowerCAmelCase = router_bias lowerCAmelCase = router_jitter_noise if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(F"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" ) lowerCAmelCase = router_dtype lowerCAmelCase = router_ignore_padding_tokens lowerCAmelCase = relative_attention_num_buckets lowerCAmelCase = relative_attention_max_distance lowerCAmelCase = dropout_rate lowerCAmelCase = layer_norm_epsilon lowerCAmelCase = initializer_factor lowerCAmelCase = feed_forward_proj lowerCAmelCase = use_cache lowerCAmelCase = add_router_probs lowerCAmelCase = router_z_loss_coef lowerCAmelCase = router_aux_loss_coef lowerCAmelCase = self.feed_forward_proj.split('''-''' ) lowerCAmelCase = act_info[-1] lowerCAmelCase = act_info[0] == '''gated''' if len(UpperCAmelCase_ ) > 1 and act_info[0] != "gated" or len(UpperCAmelCase_ ) > 2: raise ValueError( F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.""" '''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ''' '''\'gated-gelu\' or \'relu\'''' ) # for backwards compatibility if feed_forward_proj == "gated-gelu": lowerCAmelCase = '''gelu_new''' super().__init__( pad_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , **UpperCAmelCase_ , )
33
0
import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionTextToImagePipeline from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device UpperCAmelCase_ =False class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' pass @nightly @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __snake_case ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __snake_case ( self ): lowerCAmelCase = VersatileDiffusionTextToImagePipeline.from_pretrained('''shi-labs/versatile-diffusion''' ) # remove text_unet pipe.remove_unused_weights() pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) lowerCAmelCase = """A painting of a squirrel eating a burger """ lowerCAmelCase = torch.manual_seed(0 ) lowerCAmelCase = pipe( prompt=lowercase_ , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(lowercase_ ) lowerCAmelCase = VersatileDiffusionTextToImagePipeline.from_pretrained(lowercase_ ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) lowerCAmelCase = generator.manual_seed(0 ) lowerCAmelCase = pipe( prompt=lowercase_ , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass" def __snake_case ( self ): lowerCAmelCase = VersatileDiffusionTextToImagePipeline.from_pretrained( '''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) lowerCAmelCase = """A painting of a squirrel eating a burger """ lowerCAmelCase = torch.manual_seed(0 ) lowerCAmelCase = pipe( prompt=lowercase_ , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images lowerCAmelCase = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) lowerCAmelCase = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
708
from __future__ import annotations import math from collections import Counter from string import ascii_lowercase def UpperCAmelCase ( _snake_case ): lowerCAmelCase , lowerCAmelCase = analyze_text(_snake_case ) lowerCAmelCase = list(''' ''' + ascii_lowercase ) # what is our total sum of probabilities. lowerCAmelCase = sum(single_char_strings.values() ) # one length string lowerCAmelCase = 0 # for each alpha we go in our dict and if it is in it we calculate entropy for ch in my_alphas: if ch in single_char_strings: lowerCAmelCase = single_char_strings[ch] lowerCAmelCase = my_str / all_sum my_fir_sum += prob * math.loga(_snake_case ) # entropy formula. # print entropy print(F"""{round(-1 * my_fir_sum ):.1f}""" ) # two len string lowerCAmelCase = sum(two_char_strings.values() ) lowerCAmelCase = 0 # for each alpha (two in size) calculate entropy. for cha in my_alphas: for cha in my_alphas: lowerCAmelCase = cha + cha if sequence in two_char_strings: lowerCAmelCase = two_char_strings[sequence] lowerCAmelCase = int(_snake_case ) / all_sum my_sec_sum += prob * math.loga(_snake_case ) # print second entropy print(F"""{round(-1 * my_sec_sum ):.1f}""" ) # print the difference between them print(F"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" ) def UpperCAmelCase ( _snake_case ): lowerCAmelCase = Counter() # type: ignore lowerCAmelCase = Counter() # type: ignore single_char_strings[text[-1]] += 1 # first case when we have space at start. two_char_strings[" " + text[0]] += 1 for i in range(0 , len(_snake_case ) - 1 ): single_char_strings[text[i]] += 1 two_char_strings[text[i : i + 2]] += 1 return single_char_strings, two_char_strings def UpperCAmelCase ( ): import doctest doctest.testmod() # text = ( # "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark " # "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest " # "jointure saw horrible. He private he on be imagine suppose. Fertile " # "beloved evident through no service elderly is. Blind there if every no so " # "at. Own neglected you preferred way sincerity delivered his attempted. To " # "of message cottage windows do besides against uncivil. Delightful " # "unreserved impossible few estimating men favourable see entreaties. She " # "propriety immediate was improving. He or entrance humoured likewise " # "moderate. Much nor game son say feel. Fat make met can must form into " # "gate. Me we offending prevailed discovery. " # ) # calculate_prob(text) if __name__ == "__main__": main()
33
0
import unittest import numpy as np from transformers import is_flax_available from transformers.testing_utils import require_flax from ..test_modeling_flax_common import ids_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.generation import ( FlaxForcedBOSTokenLogitsProcessor, FlaxForcedEOSTokenLogitsProcessor, FlaxLogitsProcessorList, FlaxMinLengthLogitsProcessor, FlaxTemperatureLogitsWarper, FlaxTopKLogitsWarper, FlaxTopPLogitsWarper, ) @require_flax class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ): lowerCAmelCase = jnp.ones((batch_size, length) ) / length return scores def __snake_case ( self ): lowerCAmelCase = None lowerCAmelCase = 20 lowerCAmelCase = self._get_uniform_logits(batch_size=2 , length=lowerCAmelCase__ ) # tweak scores to not be uniform anymore lowerCAmelCase = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch lowerCAmelCase = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch # compute softmax lowerCAmelCase = jax.nn.softmax(lowerCAmelCase__ , axis=-1 ) lowerCAmelCase = FlaxTemperatureLogitsWarper(temperature=0.5 ) lowerCAmelCase = FlaxTemperatureLogitsWarper(temperature=1.3 ) lowerCAmelCase = jax.nn.softmax(temp_dist_warper_sharper(lowerCAmelCase__ , scores.copy() , cur_len=lowerCAmelCase__ ) , axis=-1 ) lowerCAmelCase = jax.nn.softmax(temp_dist_warper_smoother(lowerCAmelCase__ , scores.copy() , cur_len=lowerCAmelCase__ ) , axis=-1 ) # uniform distribution stays uniform self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) ) self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) ) # sharp peaks get higher, valleys get lower self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() ) self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() ) # smooth peaks get lower, valleys get higher self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() ) self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() ) def __snake_case ( self ): lowerCAmelCase = None lowerCAmelCase = 10 lowerCAmelCase = 2 # create ramp distribution lowerCAmelCase = np.broadcast_to(np.arange(lowerCAmelCase__ )[None, :] , (batch_size, vocab_size) ).copy() lowerCAmelCase = ramp_logits[1:, : vocab_size // 2] + vocab_size lowerCAmelCase = FlaxTopKLogitsWarper(3 ) lowerCAmelCase = top_k_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ ) # check that correct tokens are filtered self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] ) self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] ) # check special case lowerCAmelCase = 5 lowerCAmelCase = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 ) lowerCAmelCase = np.broadcast_to(np.arange(lowerCAmelCase__ )[None, :] , (batch_size, length) ).copy() lowerCAmelCase = top_k_warp_safety_check(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ ) # min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] ) def __snake_case ( self ): lowerCAmelCase = None lowerCAmelCase = 10 lowerCAmelCase = 2 # create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper) lowerCAmelCase = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) ) lowerCAmelCase = FlaxTopPLogitsWarper(0.8 ) lowerCAmelCase = np.exp(top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ ) ) # dist should be filtered to keep min num values so that sum is >= top_p # exp (-inf) => 0 lowerCAmelCase = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] ) self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) ) # check edge cases with negative and extreme logits lowerCAmelCase = np.broadcast_to(np.arange(lowerCAmelCase__ )[None, :] , (batch_size, vocab_size) ).copy() - ( vocab_size // 2 ) # make ramp_logits more extreme lowerCAmelCase = ramp_logits[1] * 100.0 # make sure at least 2 tokens are kept lowerCAmelCase = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 ) lowerCAmelCase = top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ ) # first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2. self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] ) def __snake_case ( self ): lowerCAmelCase = 20 lowerCAmelCase = 4 lowerCAmelCase = 0 lowerCAmelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCAmelCase__ ) # check that min length is applied at length 5 lowerCAmelCase = ids_tensor((batch_size, 20) , vocab_size=20 ) lowerCAmelCase = 5 lowerCAmelCase = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__ ) lowerCAmelCase = min_dist_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ ) self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] ) # check that min length is not applied anymore at length 15 lowerCAmelCase = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__ ) lowerCAmelCase = 15 lowerCAmelCase = min_dist_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ ) self.assertFalse(jnp.isinf(lowerCAmelCase__ ).any() ) def __snake_case ( self ): lowerCAmelCase = 20 lowerCAmelCase = 4 lowerCAmelCase = 0 lowerCAmelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase__ ) # check that all scores are -inf except the bos_token_id score lowerCAmelCase = ids_tensor((batch_size, 1) , vocab_size=20 ) lowerCAmelCase = 1 lowerCAmelCase = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__ ) lowerCAmelCase = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ ) self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero # check that bos_token_id is not forced if current length is greater than 1 lowerCAmelCase = 3 lowerCAmelCase = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__ ) lowerCAmelCase = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ ) self.assertFalse(jnp.isinf(lowerCAmelCase__ ).any() ) def __snake_case ( self ): lowerCAmelCase = 20 lowerCAmelCase = 4 lowerCAmelCase = 0 lowerCAmelCase = 5 lowerCAmelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ ) # check that all scores are -inf except the eos_token_id when max_length is reached lowerCAmelCase = ids_tensor((batch_size, 4) , vocab_size=20 ) lowerCAmelCase = 4 lowerCAmelCase = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__ ) lowerCAmelCase = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ ) self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero # check that eos_token_id is not forced if max_length is not reached lowerCAmelCase = 3 lowerCAmelCase = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__ ) lowerCAmelCase = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ ) self.assertFalse(jnp.isinf(lowerCAmelCase__ ).any() ) def __snake_case ( self ): lowerCAmelCase = 4 lowerCAmelCase = 10 lowerCAmelCase = 15 lowerCAmelCase = 2 lowerCAmelCase = 1 lowerCAmelCase = 15 # dummy input_ids and scores lowerCAmelCase = ids_tensor((batch_size, sequence_length) , lowerCAmelCase__ ) lowerCAmelCase = input_ids.copy() lowerCAmelCase = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__ ) lowerCAmelCase = scores.copy() # instantiate all dist processors lowerCAmelCase = FlaxTemperatureLogitsWarper(temperature=0.5 ) lowerCAmelCase = FlaxTopKLogitsWarper(3 ) lowerCAmelCase = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors lowerCAmelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCAmelCase__ ) lowerCAmelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase__ ) lowerCAmelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ ) lowerCAmelCase = 10 # no processor list lowerCAmelCase = temp_dist_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ ) lowerCAmelCase = top_k_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ ) lowerCAmelCase = top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ ) lowerCAmelCase = min_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ ) lowerCAmelCase = bos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ ) lowerCAmelCase = eos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ ) # with processor list lowerCAmelCase = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) lowerCAmelCase = processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ ) # scores should be equal self.assertTrue(jnp.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() ) def __snake_case ( self ): lowerCAmelCase = 4 lowerCAmelCase = 10 lowerCAmelCase = 15 lowerCAmelCase = 2 lowerCAmelCase = 1 lowerCAmelCase = 15 # dummy input_ids and scores lowerCAmelCase = ids_tensor((batch_size, sequence_length) , lowerCAmelCase__ ) lowerCAmelCase = input_ids.copy() lowerCAmelCase = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__ ) lowerCAmelCase = scores.copy() # instantiate all dist processors lowerCAmelCase = FlaxTemperatureLogitsWarper(temperature=0.5 ) lowerCAmelCase = FlaxTopKLogitsWarper(3 ) lowerCAmelCase = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors lowerCAmelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCAmelCase__ ) lowerCAmelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase__ ) lowerCAmelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ ) lowerCAmelCase = 10 # no processor list def run_no_processor_list(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): lowerCAmelCase = temp_dist_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ ) lowerCAmelCase = top_k_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ ) lowerCAmelCase = top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ ) lowerCAmelCase = min_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ ) lowerCAmelCase = bos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ ) lowerCAmelCase = eos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ ) return scores # with processor list def run_processor_list(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): lowerCAmelCase = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) lowerCAmelCase = processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__ ) return scores lowerCAmelCase = jax.jit(lowerCAmelCase__ ) lowerCAmelCase = jax.jit(lowerCAmelCase__ ) lowerCAmelCase = jitted_run_no_processor_list(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) lowerCAmelCase = jitted_run_processor_list(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # scores should be equal self.assertTrue(jnp.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
709
import random import unittest import torch from diffusers import IFInpaintingSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class __UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): '''simple docstring''' __a : Tuple =IFInpaintingSuperResolutionPipeline __a : Dict =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""} __a : int =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""} ) __a : Union[str, Any] =PipelineTesterMixin.required_optional_params - {"""latents"""} def __snake_case ( self ): return self._get_superresolution_dummy_components() def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_=0 ): if str(UpperCAmelCase_ ).startswith('''mps''' ): lowerCAmelCase = torch.manual_seed(UpperCAmelCase_ ) else: lowerCAmelCase = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ ) lowerCAmelCase = floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ ) lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ ) lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ ) lowerCAmelCase = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''original_image''': original_image, '''mask_image''': mask_image, '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def __snake_case ( self ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def __snake_case ( self ): self._test_save_load_optional_components() @unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' ) def __snake_case ( self ): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def __snake_case ( self ): self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def __snake_case ( self ): self._test_save_load_local() def __snake_case ( self ): self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
33
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available UpperCAmelCase_ ={ '''configuration_chinese_clip''': [ '''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ChineseCLIPConfig''', '''ChineseCLIPOnnxConfig''', '''ChineseCLIPTextConfig''', '''ChineseCLIPVisionConfig''', ], '''processing_chinese_clip''': ['''ChineseCLIPProcessor'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ =['''ChineseCLIPFeatureExtractor'''] UpperCAmelCase_ =['''ChineseCLIPImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ =[ '''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ChineseCLIPModel''', '''ChineseCLIPPreTrainedModel''', '''ChineseCLIPTextModel''', '''ChineseCLIPVisionModel''', ] if TYPE_CHECKING: from .configuration_chinese_clip import ( CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, ChineseCLIPConfig, ChineseCLIPOnnxConfig, ChineseCLIPTextConfig, ChineseCLIPVisionConfig, ) from .processing_chinese_clip import ChineseCLIPProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_chinese_clip import ( CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, ChineseCLIPModel, ChineseCLIPPreTrainedModel, ChineseCLIPTextModel, ChineseCLIPVisionModel, ) else: import sys UpperCAmelCase_ =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
710
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) UpperCAmelCase_ ={ """configuration_efficientformer""": [ """EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """EfficientFormerConfig""", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ =["""EfficientFormerImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ =[ """EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """EfficientFormerForImageClassification""", """EfficientFormerForImageClassificationWithTeacher""", """EfficientFormerModel""", """EfficientFormerPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ =[ """TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFEfficientFormerForImageClassification""", """TFEfficientFormerForImageClassificationWithTeacher""", """TFEfficientFormerModel""", """TFEfficientFormerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientformer import EfficientFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientformer import ( EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientFormerForImageClassification, EfficientFormerForImageClassificationWithTeacher, EfficientFormerModel, EfficientFormerPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, TFEfficientFormerPreTrainedModel, ) else: import sys UpperCAmelCase_ =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
33
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase_ =logging.get_logger(__name__) UpperCAmelCase_ ={ 'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json', } class __UpperCamelCase ( UpperCamelCase_ ): '''simple docstring''' __a : Tuple ="""data2vec-text""" def __init__( self , UpperCAmelCase_=3_05_22 , UpperCAmelCase_=7_68 , UpperCAmelCase_=12 , UpperCAmelCase_=12 , UpperCAmelCase_=30_72 , UpperCAmelCase_="gelu" , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=5_12 , UpperCAmelCase_=2 , UpperCAmelCase_=0.02 , UpperCAmelCase_=1E-1_2 , UpperCAmelCase_=1 , UpperCAmelCase_=0 , UpperCAmelCase_=2 , UpperCAmelCase_="absolute" , UpperCAmelCase_=True , UpperCAmelCase_=None , **UpperCAmelCase_ , ): super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ ) lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = hidden_act lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = max_position_embeddings lowerCAmelCase = type_vocab_size lowerCAmelCase = initializer_range lowerCAmelCase = layer_norm_eps lowerCAmelCase = position_embedding_type lowerCAmelCase = use_cache lowerCAmelCase = classifier_dropout class __UpperCamelCase ( UpperCamelCase_ ): '''simple docstring''' @property def __snake_case ( self ): if self.task == "multiple-choice": lowerCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: lowerCAmelCase = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
711
import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline UpperCAmelCase_ =datasets.utils.logging.get_logger(__name__) @dataclass class __UpperCamelCase ( datasets.BuilderConfig ): '''simple docstring''' __a : Optional[datasets.Features] =None __a : str ="utf-8" __a : Optional[str] =None __a : Optional[str] =None __a : bool =True # deprecated __a : Optional[int] =None # deprecated __a : int =1_0 << 2_0 # 10MB __a : Optional[bool] =None class __UpperCamelCase ( datasets.ArrowBasedBuilder ): '''simple docstring''' __a : str =JsonConfig def __snake_case ( self ): if self.config.block_size is not None: logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' ) lowerCAmelCase = self.config.block_size if self.config.use_threads is not True: logger.warning( '''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' ) if self.config.newlines_in_values is not None: raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' ) return datasets.DatasetInfo(features=self.config.features ) def __snake_case ( self , UpperCAmelCase_ ): if not self.config.data_files: raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) lowerCAmelCase = dl_manager.download_and_extract(self.config.data_files ) if isinstance(UpperCAmelCase_ , (str, list, tuple) ): lowerCAmelCase = data_files if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): lowerCAmelCase = [files] lowerCAmelCase = [dl_manager.iter_files(UpperCAmelCase_ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] lowerCAmelCase = [] for split_name, files in data_files.items(): if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): lowerCAmelCase = [files] lowerCAmelCase = [dl_manager.iter_files(UpperCAmelCase_ ) for file in files] splits.append(datasets.SplitGenerator(name=UpperCAmelCase_ , gen_kwargs={'''files''': files} ) ) return splits def __snake_case ( self , UpperCAmelCase_ ): if self.config.features is not None: # adding missing columns for column_name in set(self.config.features ) - set(pa_table.column_names ): lowerCAmelCase = self.config.features.arrow_schema.field(UpperCAmelCase_ ).type lowerCAmelCase = pa_table.append_column(UpperCAmelCase_ , pa.array([None] * len(UpperCAmelCase_ ) , type=UpperCAmelCase_ ) ) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example lowerCAmelCase = table_cast(UpperCAmelCase_ , self.config.features.arrow_schema ) return pa_table def __snake_case ( self , UpperCAmelCase_ ): for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase_ ) ): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(UpperCAmelCase_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: lowerCAmelCase = json.load(UpperCAmelCase_ ) # We keep only the field we are interested in lowerCAmelCase = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(UpperCAmelCase_ , (list, tuple) ): lowerCAmelCase = set().union(*[row.keys() for row in dataset] ) lowerCAmelCase = {col: [row.get(UpperCAmelCase_ ) for row in dataset] for col in keys} else: lowerCAmelCase = dataset lowerCAmelCase = pa.Table.from_pydict(UpperCAmelCase_ ) yield file_idx, self._cast_table(UpperCAmelCase_ ) # If the file has one json object per line else: with open(UpperCAmelCase_ , '''rb''' ) as f: lowerCAmelCase = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small lowerCAmelCase = max(self.config.chunksize // 32 , 16 << 10 ) lowerCAmelCase = ( self.config.encoding_errors if self.config.encoding_errors is not None else '''strict''' ) while True: lowerCAmelCase = f.read(self.config.chunksize ) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(UpperCAmelCase_ ) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": lowerCAmelCase = batch.decode(self.config.encoding , errors=UpperCAmelCase_ ).encode('''utf-8''' ) try: while True: try: lowerCAmelCase = paj.read_json( io.BytesIO(UpperCAmelCase_ ) , read_options=paj.ReadOptions(block_size=UpperCAmelCase_ ) ) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(UpperCAmelCase_ , pa.ArrowInvalid ) and "straddling" not in str(UpperCAmelCase_ ) or block_size > len(UpperCAmelCase_ ) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( F"""Batch of {len(UpperCAmelCase_ )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" ) block_size *= 2 except pa.ArrowInvalid as e: try: with open( UpperCAmelCase_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: lowerCAmelCase = json.load(UpperCAmelCase_ ) except json.JSONDecodeError: logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase_ )}: {e}""" ) raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): # list is the only sequence type supported in JSON try: lowerCAmelCase = set().union(*[row.keys() for row in dataset] ) lowerCAmelCase = {col: [row.get(UpperCAmelCase_ ) for row in dataset] for col in keys} lowerCAmelCase = pa.Table.from_pydict(UpperCAmelCase_ ) except (pa.ArrowInvalid, AttributeError) as e: logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase_ )}: {e}""" ) raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None yield file_idx, self._cast_table(UpperCAmelCase_ ) break else: logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase_ )}: {e}""" ) raise ValueError( F"""Not able to read records in the JSON file at {file}. """ F"""You should probably indicate the field of the JSON file containing your records. """ F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """ F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(UpperCAmelCase_ ) batch_idx += 1
33
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase_ =logging.get_logger(__name__) UpperCAmelCase_ ={ """google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""", """google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""", """google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""", # See all BigBird models at https://huggingface.co/models?filter=big_bird } class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : Dict ="""big_bird""" def __init__( self , UpperCAmelCase_=5_03_58 , UpperCAmelCase_=7_68 , UpperCAmelCase_=12 , UpperCAmelCase_=12 , UpperCAmelCase_=30_72 , UpperCAmelCase_="gelu_new" , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=40_96 , UpperCAmelCase_=2 , UpperCAmelCase_=0.02 , UpperCAmelCase_=1E-1_2 , UpperCAmelCase_=True , UpperCAmelCase_=0 , UpperCAmelCase_=1 , UpperCAmelCase_=2 , UpperCAmelCase_=66 , UpperCAmelCase_="block_sparse" , UpperCAmelCase_=True , UpperCAmelCase_=False , UpperCAmelCase_=64 , UpperCAmelCase_=3 , UpperCAmelCase_=None , **UpperCAmelCase_ , ): super().__init__( pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , sep_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) lowerCAmelCase = vocab_size lowerCAmelCase = max_position_embeddings lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_act lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = initializer_range lowerCAmelCase = type_vocab_size lowerCAmelCase = layer_norm_eps lowerCAmelCase = use_cache lowerCAmelCase = rescale_embeddings lowerCAmelCase = attention_type lowerCAmelCase = use_bias lowerCAmelCase = block_size lowerCAmelCase = num_random_blocks lowerCAmelCase = classifier_dropout class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' @property def __snake_case ( self ): if self.task == "multiple-choice": lowerCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: lowerCAmelCase = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
712
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices UpperCAmelCase_ =logging.get_logger(__name__) class __UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __a : Optional[Any] ="""maskformer-swin""" __a : Optional[int] ={ """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self , UpperCAmelCase_=2_24 , UpperCAmelCase_=4 , UpperCAmelCase_=3 , UpperCAmelCase_=96 , UpperCAmelCase_=[2, 2, 6, 2] , UpperCAmelCase_=[3, 6, 12, 24] , UpperCAmelCase_=7 , UpperCAmelCase_=4.0 , UpperCAmelCase_=True , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.1 , UpperCAmelCase_="gelu" , UpperCAmelCase_=False , UpperCAmelCase_=0.02 , UpperCAmelCase_=1E-5 , UpperCAmelCase_=None , UpperCAmelCase_=None , **UpperCAmelCase_ , ): super().__init__(**UpperCAmelCase_ ) lowerCAmelCase = image_size lowerCAmelCase = patch_size lowerCAmelCase = num_channels lowerCAmelCase = embed_dim lowerCAmelCase = depths lowerCAmelCase = len(UpperCAmelCase_ ) lowerCAmelCase = num_heads lowerCAmelCase = window_size lowerCAmelCase = mlp_ratio lowerCAmelCase = qkv_bias lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = drop_path_rate lowerCAmelCase = hidden_act lowerCAmelCase = use_absolute_embeddings lowerCAmelCase = layer_norm_eps lowerCAmelCase = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model lowerCAmelCase = int(embed_dim * 2 ** (len(UpperCAmelCase_ ) - 1) ) lowerCAmelCase = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(UpperCAmelCase_ ) + 1 )] lowerCAmelCase , lowerCAmelCase = get_aligned_output_features_output_indices( out_features=UpperCAmelCase_ , out_indices=UpperCAmelCase_ , stage_names=self.stage_names )
33
0
import unittest from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class __UpperCamelCase : '''simple docstring''' @staticmethod def __snake_case ( *UpperCAmelCase_ , **UpperCAmelCase_ ): pass @is_pipeline_test @require_vision @require_torch class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' __a : Union[str, Any] =MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): lowerCAmelCase = pipeline( '''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' ) lowerCAmelCase = [ { """image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""", """candidate_labels""": ["""cat""", """remote""", """couch"""], } ] return object_detector, examples def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ): lowerCAmelCase = object_detector(examples[0] , threshold=0.0 ) lowerCAmelCase = len(UpperCAmelCase_ ) self.assertGreater(UpperCAmelCase_ , 0 ) self.assertEqual( UpperCAmelCase_ , [ { '''score''': ANY(UpperCAmelCase_ ), '''label''': ANY(UpperCAmelCase_ ), '''box''': {'''xmin''': ANY(UpperCAmelCase_ ), '''ymin''': ANY(UpperCAmelCase_ ), '''xmax''': ANY(UpperCAmelCase_ ), '''ymax''': ANY(UpperCAmelCase_ )}, } for i in range(UpperCAmelCase_ ) ] , ) @require_tf @unittest.skip('''Zero Shot Object Detection not implemented in TF''' ) def __snake_case ( self ): pass @require_torch def __snake_case ( self ): lowerCAmelCase = pipeline( '''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' ) lowerCAmelCase = object_detector( '''./tests/fixtures/tests_samples/COCO/000000039769.png''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=0.64 , ) self.assertEqual( nested_simplify(UpperCAmelCase_ , decimals=4 ) , [ {'''score''': 0.7235, '''label''': '''cat''', '''box''': {'''xmin''': 2_04, '''ymin''': 1_67, '''xmax''': 2_32, '''ymax''': 1_90}}, {'''score''': 0.7218, '''label''': '''remote''', '''box''': {'''xmin''': 2_04, '''ymin''': 1_67, '''xmax''': 2_32, '''ymax''': 1_90}}, {'''score''': 0.7184, '''label''': '''couch''', '''box''': {'''xmin''': 2_04, '''ymin''': 1_67, '''xmax''': 2_32, '''ymax''': 1_90}}, {'''score''': 0.6748, '''label''': '''remote''', '''box''': {'''xmin''': 5_71, '''ymin''': 83, '''xmax''': 5_98, '''ymax''': 1_03}}, {'''score''': 0.6656, '''label''': '''cat''', '''box''': {'''xmin''': 5_71, '''ymin''': 83, '''xmax''': 5_98, '''ymax''': 1_03}}, {'''score''': 0.6614, '''label''': '''couch''', '''box''': {'''xmin''': 5_71, '''ymin''': 83, '''xmax''': 5_98, '''ymax''': 1_03}}, {'''score''': 0.6456, '''label''': '''remote''', '''box''': {'''xmin''': 4_94, '''ymin''': 1_05, '''xmax''': 5_21, '''ymax''': 1_27}}, {'''score''': 0.642, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 2_74, '''xmax''': 93, '''ymax''': 2_97}}, {'''score''': 0.6419, '''label''': '''cat''', '''box''': {'''xmin''': 4_94, '''ymin''': 1_05, '''xmax''': 5_21, '''ymax''': 1_27}}, ] , ) lowerCAmelCase = object_detector( [ { '''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''', '''candidate_labels''': ['''cat''', '''remote''', '''couch'''], } ] , threshold=0.64 , ) self.assertEqual( nested_simplify(UpperCAmelCase_ , decimals=4 ) , [ [ {'''score''': 0.7235, '''label''': '''cat''', '''box''': {'''xmin''': 2_04, '''ymin''': 1_67, '''xmax''': 2_32, '''ymax''': 1_90}}, {'''score''': 0.7218, '''label''': '''remote''', '''box''': {'''xmin''': 2_04, '''ymin''': 1_67, '''xmax''': 2_32, '''ymax''': 1_90}}, {'''score''': 0.7184, '''label''': '''couch''', '''box''': {'''xmin''': 2_04, '''ymin''': 1_67, '''xmax''': 2_32, '''ymax''': 1_90}}, {'''score''': 0.6748, '''label''': '''remote''', '''box''': {'''xmin''': 5_71, '''ymin''': 83, '''xmax''': 5_98, '''ymax''': 1_03}}, {'''score''': 0.6656, '''label''': '''cat''', '''box''': {'''xmin''': 5_71, '''ymin''': 83, '''xmax''': 5_98, '''ymax''': 1_03}}, {'''score''': 0.6614, '''label''': '''couch''', '''box''': {'''xmin''': 5_71, '''ymin''': 83, '''xmax''': 5_98, '''ymax''': 1_03}}, {'''score''': 0.6456, '''label''': '''remote''', '''box''': {'''xmin''': 4_94, '''ymin''': 1_05, '''xmax''': 5_21, '''ymax''': 1_27}}, {'''score''': 0.642, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 2_74, '''xmax''': 93, '''ymax''': 2_97}}, {'''score''': 0.6419, '''label''': '''cat''', '''box''': {'''xmin''': 4_94, '''ymin''': 1_05, '''xmax''': 5_21, '''ymax''': 1_27}}, ] ] , ) @require_torch @slow def __snake_case ( self ): lowerCAmelCase = pipeline('''zero-shot-object-detection''' ) lowerCAmelCase = object_detector( '''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , ) self.assertEqual( nested_simplify(UpperCAmelCase_ , decimals=4 ) , [ {'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 3_24, '''ymin''': 20, '''xmax''': 6_40, '''ymax''': 3_73}}, {'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 1_77, '''ymax''': 1_15}}, {'''score''': 0.2537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 3_15, '''ymax''': 4_72}}, {'''score''': 0.1474, '''label''': '''remote''', '''box''': {'''xmin''': 3_35, '''ymin''': 74, '''xmax''': 3_71, '''ymax''': 1_87}}, {'''score''': 0.1208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_42, '''ymax''': 4_76}}, ] , ) lowerCAmelCase = object_detector( [ { '''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''', '''candidate_labels''': ['''cat''', '''remote''', '''couch'''], }, { '''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''', '''candidate_labels''': ['''cat''', '''remote''', '''couch'''], }, ] , ) self.assertEqual( nested_simplify(UpperCAmelCase_ , decimals=4 ) , [ [ {'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 3_24, '''ymin''': 20, '''xmax''': 6_40, '''ymax''': 3_73}}, {'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 1_77, '''ymax''': 1_15}}, {'''score''': 0.2537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 3_15, '''ymax''': 4_72}}, {'''score''': 0.1474, '''label''': '''remote''', '''box''': {'''xmin''': 3_35, '''ymin''': 74, '''xmax''': 3_71, '''ymax''': 1_87}}, {'''score''': 0.1208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_42, '''ymax''': 4_76}}, ], [ {'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 3_24, '''ymin''': 20, '''xmax''': 6_40, '''ymax''': 3_73}}, {'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 1_77, '''ymax''': 1_15}}, {'''score''': 0.2537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 3_15, '''ymax''': 4_72}}, {'''score''': 0.1474, '''label''': '''remote''', '''box''': {'''xmin''': 3_35, '''ymin''': 74, '''xmax''': 3_71, '''ymax''': 1_87}}, {'''score''': 0.1208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_42, '''ymax''': 4_76}}, ], ] , ) @require_tf @unittest.skip('''Zero Shot Object Detection not implemented in TF''' ) def __snake_case ( self ): pass @require_torch @slow def __snake_case ( self ): lowerCAmelCase = 0.2 lowerCAmelCase = pipeline('''zero-shot-object-detection''' ) lowerCAmelCase = object_detector( '''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=UpperCAmelCase_ , ) self.assertEqual( nested_simplify(UpperCAmelCase_ , decimals=4 ) , [ {'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 3_24, '''ymin''': 20, '''xmax''': 6_40, '''ymax''': 3_73}}, {'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 1_77, '''ymax''': 1_15}}, {'''score''': 0.2537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 3_15, '''ymax''': 4_72}}, ] , ) @require_torch @slow def __snake_case ( self ): lowerCAmelCase = 2 lowerCAmelCase = pipeline('''zero-shot-object-detection''' ) lowerCAmelCase = object_detector( '''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , top_k=UpperCAmelCase_ , ) self.assertEqual( nested_simplify(UpperCAmelCase_ , decimals=4 ) , [ {'''score''': 0.2868, '''label''': '''cat''', '''box''': {'''xmin''': 3_24, '''ymin''': 20, '''xmax''': 6_40, '''ymax''': 3_73}}, {'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 1_77, '''ymax''': 1_15}}, ] , )
713
from collections.abc import Sequence def UpperCAmelCase ( _snake_case , _snake_case = False ): if not arr: return 0 lowerCAmelCase = 0 if allow_empty_subarrays else float('''-inf''' ) lowerCAmelCase = 0.0 for num in arr: lowerCAmelCase = max(0 if allow_empty_subarrays else num , curr_sum + num ) lowerCAmelCase = max(_snake_case , _snake_case ) return max_sum if __name__ == "__main__": from doctest import testmod testmod() UpperCAmelCase_ =[-2, 1, -3, 4, -1, 2, 1, -5, 4] print(F'''{max_subarray_sum(nums) = }''')
33
0
import gc import unittest import numpy as np import torch from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class __UpperCamelCase ( lowercase__ , unittest.TestCase ): '''simple docstring''' __a : Tuple =DanceDiffusionPipeline __a : Any =UNCONDITIONAL_AUDIO_GENERATION_PARAMS __a : Optional[Any] =PipelineTesterMixin.required_optional_params - { """callback""", """latents""", """callback_steps""", """output_type""", """num_images_per_prompt""", } __a : str =UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS __a : Optional[Any] =False __a : List[Any] =False def __snake_case ( self ): torch.manual_seed(0 ) lowerCAmelCase = UNetaDModel( block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=5_12 , sample_rate=1_60_00 , in_channels=2 , out_channels=2 , flip_sin_to_cos=__lowercase , use_timestep_embedding=__lowercase , time_embedding_type='''fourier''' , mid_block_type='''UNetMidBlock1D''' , down_block_types=('''DownBlock1DNoSkip''', '''DownBlock1D''', '''AttnDownBlock1D''') , up_block_types=('''AttnUpBlock1D''', '''UpBlock1D''', '''UpBlock1DNoSkip''') , ) lowerCAmelCase = IPNDMScheduler() lowerCAmelCase = { '''unet''': unet, '''scheduler''': scheduler, } return components def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_=0 ): if str(__lowercase ).startswith('''mps''' ): lowerCAmelCase = torch.manual_seed(__lowercase ) else: lowerCAmelCase = torch.Generator(device=__lowercase ).manual_seed(__lowercase ) lowerCAmelCase = { '''batch_size''': 1, '''generator''': generator, '''num_inference_steps''': 4, } return inputs def __snake_case ( self ): lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase = self.get_dummy_components() lowerCAmelCase = DanceDiffusionPipeline(**__lowercase ) lowerCAmelCase = pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) lowerCAmelCase = self.get_dummy_inputs(__lowercase ) lowerCAmelCase = pipe(**__lowercase ) lowerCAmelCase = output.audios lowerCAmelCase = audio[0, -3:, -3:] assert audio.shape == (1, 2, components["unet"].sample_size) lowerCAmelCase = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2 @skip_mps def __snake_case ( self ): return super().test_save_load_local() @skip_mps def __snake_case ( self ): return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) @skip_mps def __snake_case ( self ): return super().test_save_load_optional_components() @skip_mps def __snake_case ( self ): return super().test_attention_slicing_forward_pass() def __snake_case ( self ): super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __snake_case ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __snake_case ( self ): lowerCAmelCase = torch_device lowerCAmelCase = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' ) lowerCAmelCase = pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) lowerCAmelCase = torch.manual_seed(0 ) lowerCAmelCase = pipe(generator=__lowercase , num_inference_steps=1_00 , audio_length_in_s=4.096 ) lowerCAmelCase = output.audios lowerCAmelCase = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) lowerCAmelCase = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2 def __snake_case ( self ): lowerCAmelCase = torch_device lowerCAmelCase = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' , torch_dtype=torch.floataa ) lowerCAmelCase = pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) lowerCAmelCase = torch.manual_seed(0 ) lowerCAmelCase = pipe(generator=__lowercase , num_inference_steps=1_00 , audio_length_in_s=4.096 ) lowerCAmelCase = output.audios lowerCAmelCase = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) lowerCAmelCase = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
714
import os import pickle import unittest from transformers import AutoTokenizer from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.models.bert_japanese.tokenization_bert_japanese import ( VOCAB_FILES_NAMES, BertJapaneseTokenizer, CharacterTokenizer, JumanppTokenizer, MecabTokenizer, SudachiTokenizer, WordpieceTokenizer, ) from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi from ...test_tokenization_common import TokenizerTesterMixin @custom_tokenizers class __UpperCamelCase ( __UpperCAmelCase , unittest.TestCase ): '''simple docstring''' __a : Any =BertJapaneseTokenizer __a : Optional[int] =False __a : int =True def __snake_case ( self ): super().setUp() lowerCAmelCase = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは''', '''世界''', '''##世界''', '''、''', '''##、''', '''。''', '''##。''', ] lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = '''こんにちは、世界。 \nこんばんは、世界。''' lowerCAmelCase = '''こんにちは 、 世界 。 こんばんは 、 世界 。''' return input_text, output_text def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase , lowerCAmelCase = self.get_input_output_texts(UpperCAmelCase_ ) lowerCAmelCase = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) lowerCAmelCase = tokenizer.decode(UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ ) return text, ids def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class(self.vocab_file ) lowerCAmelCase = tokenizer.tokenize('''こんにちは、世界。\nこんばんは、世界。''' ) self.assertListEqual(UpperCAmelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''mecab''' ) self.assertIsNotNone(UpperCAmelCase_ ) lowerCAmelCase = '''こんにちは、世界。\nこんばんは、世界。''' lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) lowerCAmelCase = os.path.join(self.tmpdirname , '''tokenizer.bin''' ) with open(UpperCAmelCase_ , '''wb''' ) as handle: pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ ) with open(UpperCAmelCase_ , '''rb''' ) as handle: lowerCAmelCase = pickle.load(UpperCAmelCase_ ) lowerCAmelCase = tokenizer_new.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def __snake_case ( self ): lowerCAmelCase = MecabTokenizer(mecab_dic='''ipadic''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) def __snake_case ( self ): try: lowerCAmelCase = MecabTokenizer(mecab_dic='''unidic_lite''' ) except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) def __snake_case ( self ): try: lowerCAmelCase = MecabTokenizer(mecab_dic='''unidic''' ) except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) def __snake_case ( self ): lowerCAmelCase = MecabTokenizer(do_lower_case=UpperCAmelCase_ , mecab_dic='''ipadic''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iphone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) def __snake_case ( self ): try: lowerCAmelCase = MecabTokenizer( do_lower_case=UpperCAmelCase_ , normalize_text=UpperCAmelCase_ , mecab_option='''-d /usr/local/lib/mecab/dic/jumandic''' ) except RuntimeError: # if dict doesn't exist in the system, previous code raises this error. return self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , ) def __snake_case ( self ): lowerCAmelCase = MecabTokenizer(normalize_text=UpperCAmelCase_ , mecab_dic='''ipadic''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。'''] , ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''sudachi''' ) self.assertIsNotNone(UpperCAmelCase_ ) lowerCAmelCase = '''こんにちは、世界。\nこんばんは、世界。''' lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) lowerCAmelCase = os.path.join(self.tmpdirname , '''tokenizer.bin''' ) with open(UpperCAmelCase_ , '''wb''' ) as handle: pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ ) with open(UpperCAmelCase_ , '''rb''' ) as handle: lowerCAmelCase = pickle.load(UpperCAmelCase_ ) lowerCAmelCase = tokenizer_new.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='''core''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''A''' ) self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国''', '''人''', '''参政''', '''権'''] ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''B''' ) self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人''', '''参政権'''] ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''C''' ) self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人参政権'''] ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(do_lower_case=UpperCAmelCase_ , sudachi_dict_type='''core''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(normalize_text=UpperCAmelCase_ , sudachi_dict_type='''core''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', '''\u3000''', '''。''', ''' ''', ''' '''] , ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(trim_whitespace=UpperCAmelCase_ , sudachi_dict_type='''core''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''jumanpp''' ) self.assertIsNotNone(UpperCAmelCase_ ) lowerCAmelCase = '''こんにちは、世界。\nこんばんは、世界。''' lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) lowerCAmelCase = os.path.join(self.tmpdirname , '''tokenizer.bin''' ) with open(UpperCAmelCase_ , '''wb''' ) as handle: pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ ) with open(UpperCAmelCase_ , '''rb''' ) as handle: lowerCAmelCase = pickle.load(UpperCAmelCase_ ) lowerCAmelCase = tokenizer_new.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = JumanppTokenizer(do_lower_case=UpperCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = JumanppTokenizer(normalize_text=UpperCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''ア''', '''ッ''', '''フ''', '''゚''', '''ル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = JumanppTokenizer(trim_whitespace=UpperCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''。'''] , ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize('''ありがとうございますm(_ _)m見つけるのが大変です。''' ) , ['''ありがとう''', '''ございます''', '''m(_ _)m''', '''見つける''', '''の''', '''が''', '''大変です''', '''。'''] , ) def __snake_case ( self ): lowerCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは'''] lowerCAmelCase = {} for i, token in enumerate(UpperCAmelCase_ ): lowerCAmelCase = i lowerCAmelCase = WordpieceTokenizer(vocab=UpperCAmelCase_ , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こんにちは'''] ) self.assertListEqual(tokenizer.tokenize('''こんばんは''' ) , ['''こん''', '''##ばんは'''] ) self.assertListEqual(tokenizer.tokenize('''こんばんは こんばんにちは こんにちは''' ) , ['''こん''', '''##ばんは''', '''[UNK]''', '''こんにちは'''] ) def __snake_case ( self ): lowerCAmelCase = BertJapaneseTokenizer.from_pretrained('''nlp-waseda/roberta-base-japanese-with-auto-jumanpp''' ) lowerCAmelCase = tokenizer.subword_tokenizer lowerCAmelCase = subword_tokenizer.tokenize('''国境 の 長い トンネル を 抜ける と 雪国 であった 。''' ) self.assertListEqual(UpperCAmelCase_ , ['''▁国境''', '''▁の''', '''▁長い''', '''▁トンネル''', '''▁を''', '''▁抜ける''', '''▁と''', '''▁雪''', '''国''', '''▁であった''', '''▁。'''] ) lowerCAmelCase = subword_tokenizer.tokenize('''こんばんは こんばん にち は こんにちは''' ) self.assertListEqual(UpperCAmelCase_ , ['''▁こん''', '''ばん''', '''は''', '''▁こん''', '''ばん''', '''▁に''', '''ち''', '''▁は''', '''▁こんにちは'''] ) def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese''' ) lowerCAmelCase = tokenizer.encode('''ありがとう。''' , add_special_tokens=UpperCAmelCase_ ) lowerCAmelCase = tokenizer.encode('''どういたしまして。''' , add_special_tokens=UpperCAmelCase_ ) lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ ) lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_ ) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_a + [3] @custom_tokenizers class __UpperCamelCase ( __UpperCAmelCase , unittest.TestCase ): '''simple docstring''' __a : Union[str, Any] =BertJapaneseTokenizer __a : Optional[int] =False def __snake_case ( self ): super().setUp() lowerCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。'''] lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def __snake_case ( self , **UpperCAmelCase_ ): return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='''character''' , **UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = '''こんにちは、世界。 \nこんばんは、世界。''' lowerCAmelCase = '''こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。''' return input_text, output_text def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='''character''' ) lowerCAmelCase = tokenizer.tokenize('''こんにちは、世界。 \nこんばんは、世界。''' ) self.assertListEqual( UpperCAmelCase_ , ['''こ''', '''ん''', '''に''', '''ち''', '''は''', '''、''', '''世''', '''界''', '''。''', '''こ''', '''ん''', '''ば''', '''ん''', '''は''', '''、''', '''世''', '''界''', '''。'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] ) def __snake_case ( self ): lowerCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。'''] lowerCAmelCase = {} for i, token in enumerate(UpperCAmelCase_ ): lowerCAmelCase = i lowerCAmelCase = CharacterTokenizer(vocab=UpperCAmelCase_ , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''は'''] ) self.assertListEqual(tokenizer.tokenize('''こんにちほ''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''[UNK]'''] ) def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese-char''' ) lowerCAmelCase = tokenizer.encode('''ありがとう。''' , add_special_tokens=UpperCAmelCase_ ) lowerCAmelCase = tokenizer.encode('''どういたしまして。''' , add_special_tokens=UpperCAmelCase_ ) lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ ) lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_ ) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_a + [3] @custom_tokenizers class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __snake_case ( self ): lowerCAmelCase = '''cl-tohoku/bert-base-japanese''' lowerCAmelCase = AutoTokenizer.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __snake_case ( self ): lowerCAmelCase = '''cl-tohoku/bert-base-japanese''' with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm: BertTokenizer.from_pretrained(UpperCAmelCase_ ) self.assertTrue( cm.records[0].message.startswith( '''The tokenizer class you load from this checkpoint is not the same type as the class this function''' ''' is called from.''' ) ) lowerCAmelCase = '''bert-base-cased''' with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm: BertJapaneseTokenizer.from_pretrained(UpperCAmelCase_ ) self.assertTrue( cm.records[0].message.startswith( '''The tokenizer class you load from this checkpoint is not the same type as the class this function''' ''' is called from.''' ) )
33
0
from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __UpperCamelCase ( __snake_case ): '''simple docstring''' __a : Optional[int] =['image_processor', 'tokenizer'] __a : Tuple ='BlipImageProcessor' __a : Optional[int] ='AutoTokenizer' def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ ): lowerCAmelCase = False super().__init__(A_ , A_ ) lowerCAmelCase = self.image_processor def __call__( self , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = True , UpperCAmelCase_ = False , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = 0 , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = False , UpperCAmelCase_ = False , UpperCAmelCase_ = False , UpperCAmelCase_ = False , UpperCAmelCase_ = False , UpperCAmelCase_ = True , UpperCAmelCase_ = None , **UpperCAmelCase_ , ): if images is None and text is None: raise ValueError('''You have to specify either images or text.''' ) # Get only text if images is None: lowerCAmelCase = self.tokenizer lowerCAmelCase = self.tokenizer( text=A_ , add_special_tokens=A_ , padding=A_ , truncation=A_ , max_length=A_ , stride=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , return_overflowing_tokens=A_ , return_special_tokens_mask=A_ , return_offsets_mapping=A_ , return_token_type_ids=A_ , return_length=A_ , verbose=A_ , return_tensors=A_ , **A_ , ) return text_encoding # add pixel_values lowerCAmelCase = self.image_processor(A_ , return_tensors=A_ ) if text is not None: lowerCAmelCase = self.tokenizer( text=A_ , add_special_tokens=A_ , padding=A_ , truncation=A_ , max_length=A_ , stride=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , return_overflowing_tokens=A_ , return_special_tokens_mask=A_ , return_offsets_mapping=A_ , return_token_type_ids=A_ , return_length=A_ , verbose=A_ , return_tensors=A_ , **A_ , ) else: lowerCAmelCase = None if text_encoding is not None: encoding_image_processor.update(A_ ) return encoding_image_processor def __snake_case ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ): return self.tokenizer.batch_decode(*A_ , **A_ ) def __snake_case ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ): return self.tokenizer.decode(*A_ , **A_ ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def __snake_case ( self ): lowerCAmelCase = self.tokenizer.model_input_names lowerCAmelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
715
import json import os import tempfile import unittest import unittest.mock as mock from pathlib import Path from requests.exceptions import HTTPError from transformers.utils import ( CONFIG_NAME, FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TRANSFORMERS_CACHE, WEIGHTS_NAME, cached_file, get_file_from_repo, has_file, ) UpperCAmelCase_ ="""hf-internal-testing/tiny-random-bert""" UpperCAmelCase_ =os.path.join(TRANSFORMERS_CACHE, """models--hf-internal-testing--tiny-random-bert""") UpperCAmelCase_ ="""9b8c223d42b2188cb49d29af482996f9d0f3e5a6""" class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __snake_case ( self ): lowerCAmelCase = cached_file(UpperCAmelCase_ , UpperCAmelCase_ ) # Should have downloaded the file in here self.assertTrue(os.path.isdir(UpperCAmelCase_ ) ) # Cache should contain at least those three subfolders: for subfolder in ["blobs", "refs", "snapshots"]: self.assertTrue(os.path.isdir(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) ) ) with open(os.path.join(UpperCAmelCase_ , '''refs''' , '''main''' ) ) as f: lowerCAmelCase = f.read() self.assertEqual(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , '''snapshots''' , UpperCAmelCase_ , UpperCAmelCase_ ) ) self.assertTrue(os.path.isfile(UpperCAmelCase_ ) ) # File is cached at the same place the second time. lowerCAmelCase = cached_file(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) # Using a specific revision to test the full commit hash. lowerCAmelCase = cached_file(UpperCAmelCase_ , UpperCAmelCase_ , revision='''9b8c223''' ) self.assertEqual(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , '''snapshots''' , UpperCAmelCase_ , UpperCAmelCase_ ) ) def __snake_case ( self ): with self.assertRaisesRegex(UpperCAmelCase_ , '''is not a valid model identifier''' ): lowerCAmelCase = cached_file('''tiny-random-bert''' , UpperCAmelCase_ ) with self.assertRaisesRegex(UpperCAmelCase_ , '''is not a valid git identifier''' ): lowerCAmelCase = cached_file(UpperCAmelCase_ , UpperCAmelCase_ , revision='''aaaa''' ) with self.assertRaisesRegex(UpperCAmelCase_ , '''does not appear to have a file named''' ): lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' ) def __snake_case ( self ): with self.assertRaisesRegex(UpperCAmelCase_ , '''does not appear to have a file named''' ): lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' ) with open(os.path.join(UpperCAmelCase_ , '''refs''' , '''main''' ) ) as f: lowerCAmelCase = f.read() self.assertTrue(os.path.isfile(os.path.join(UpperCAmelCase_ , '''.no_exist''' , UpperCAmelCase_ , '''conf''' ) ) ) lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' , _raise_exceptions_for_missing_entries=UpperCAmelCase_ ) self.assertIsNone(UpperCAmelCase_ ) lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' , local_files_only=UpperCAmelCase_ , _raise_exceptions_for_missing_entries=UpperCAmelCase_ ) self.assertIsNone(UpperCAmelCase_ ) lowerCAmelCase = mock.Mock() lowerCAmelCase = 5_00 lowerCAmelCase = {} lowerCAmelCase = HTTPError lowerCAmelCase = {} # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('''requests.Session.request''' , return_value=UpperCAmelCase_ ) as mock_head: lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' , _raise_exceptions_for_connection_errors=UpperCAmelCase_ ) self.assertIsNone(UpperCAmelCase_ ) # This check we did call the fake head request mock_head.assert_called() def __snake_case ( self ): self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , UpperCAmelCase_ ) ) self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , UpperCAmelCase_ ) ) self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , UpperCAmelCase_ ) ) def __snake_case ( self ): # `get_file_from_repo` returns None if the file does not exist self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) ) # The function raises if the repository does not exist. with self.assertRaisesRegex(UpperCAmelCase_ , '''is not a valid model identifier''' ): get_file_from_repo('''bert-base-case''' , UpperCAmelCase_ ) # The function raises if the revision does not exist. with self.assertRaisesRegex(UpperCAmelCase_ , '''is not a valid git identifier''' ): get_file_from_repo('''bert-base-cased''' , UpperCAmelCase_ , revision='''ahaha''' ) lowerCAmelCase = get_file_from_repo('''bert-base-cased''' , UpperCAmelCase_ ) # The name is the cached name which is not very easy to test, so instead we load the content. lowerCAmelCase = json.loads(open(UpperCAmelCase_ , '''r''' ).read() ) self.assertEqual(config['''hidden_size'''] , 7_68 ) def __snake_case ( self ): with tempfile.TemporaryDirectory() as tmp_dir: lowerCAmelCase = Path(UpperCAmelCase_ ) / '''a.txt''' filename.touch() self.assertEqual(get_file_from_repo(UpperCAmelCase_ , '''a.txt''' ) , str(UpperCAmelCase_ ) ) self.assertIsNone(get_file_from_repo(UpperCAmelCase_ , '''b.txt''' ) )
33
0
import warnings from ...utils import logging from .image_processing_chinese_clip import ChineseCLIPImageProcessor UpperCAmelCase_ =logging.get_logger(__name__) class __UpperCamelCase ( UpperCamelCase__ ): '''simple docstring''' def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ): warnings.warn( '''The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use ChineseCLIPImageProcessor instead.''' , UpperCAmelCase_ , ) super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
716
from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = True , UpperCAmelCase_ = None , UpperCAmelCase_ = False , UpperCAmelCase_ = None , UpperCAmelCase_ = True , UpperCAmelCase_ = "arrow" , **UpperCAmelCase_ , ): super().__init__( split=UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , keep_in_memory=UpperCAmelCase_ , streaming=UpperCAmelCase_ , **UpperCAmelCase_ , ) lowerCAmelCase = load_from_cache_file lowerCAmelCase = file_format lowerCAmelCase = Spark( df=UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , working_dir=UpperCAmelCase_ , **UpperCAmelCase_ , ) def __snake_case ( self ): if self.streaming: return self.builder.as_streaming_dataset(split=self.split ) lowerCAmelCase = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=UpperCAmelCase_ , file_format=self._file_format , ) return self.builder.as_dataset(split=self.split )
33
0
from ...utils import deprecate from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401 from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401 deprecate( """stable diffusion controlnet""", """0.22.0""", """Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""", standard_warn=False, stacklevel=3, )
717
import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def UpperCAmelCase ( _snake_case = 3 ): if isinstance(_snake_case , _snake_case ): raise TypeError('''number of qubits must be a integer.''' ) if number_of_qubits <= 0: raise ValueError('''number of qubits must be > 0.''' ) if math.floor(_snake_case ) != number_of_qubits: raise ValueError('''number of qubits must be exact integer.''' ) if number_of_qubits > 10: raise ValueError('''number of qubits too large to simulate(>10).''' ) lowerCAmelCase = QuantumRegister(_snake_case , '''qr''' ) lowerCAmelCase = ClassicalRegister(_snake_case , '''cr''' ) lowerCAmelCase = QuantumCircuit(_snake_case , _snake_case ) lowerCAmelCase = number_of_qubits for i in range(_snake_case ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(_snake_case ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , _snake_case , _snake_case ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(_snake_case , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(_snake_case , _snake_case ) # simulate with 10000 shots lowerCAmelCase = Aer.get_backend('''qasm_simulator''' ) lowerCAmelCase = execute(_snake_case , _snake_case , shots=10000 ) return job.result().get_counts(_snake_case ) if __name__ == "__main__": print( F'''Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}''' )
33
0
from __future__ import annotations from PIL import Image # Define glider example UpperCAmelCase_ =[ [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], ] # Define blinker example UpperCAmelCase_ =[[0, 1, 0], [0, 1, 0], [0, 1, 0]] def UpperCAmelCase ( _snake_case ): lowerCAmelCase = [] for i in range(len(lowercase__ ) ): lowerCAmelCase = [] for j in range(len(cells[i] ) ): # Get the number of live neighbours lowerCAmelCase = 0 if i > 0 and j > 0: neighbour_count += cells[i - 1][j - 1] if i > 0: neighbour_count += cells[i - 1][j] if i > 0 and j < len(cells[i] ) - 1: neighbour_count += cells[i - 1][j + 1] if j > 0: neighbour_count += cells[i][j - 1] if j < len(cells[i] ) - 1: neighbour_count += cells[i][j + 1] if i < len(lowercase__ ) - 1 and j > 0: neighbour_count += cells[i + 1][j - 1] if i < len(lowercase__ ) - 1: neighbour_count += cells[i + 1][j] if i < len(lowercase__ ) - 1 and j < len(cells[i] ) - 1: neighbour_count += cells[i + 1][j + 1] # Rules of the game of life (excerpt from Wikipedia): # 1. Any live cell with two or three live neighbours survives. # 2. Any dead cell with three live neighbours becomes a live cell. # 3. All other live cells die in the next generation. # Similarly, all other dead cells stay dead. lowerCAmelCase = cells[i][j] == 1 if ( (alive and 2 <= neighbour_count <= 3) or not alive and neighbour_count == 3 ): next_generation_row.append(1 ) else: next_generation_row.append(0 ) next_generation.append(lowercase__ ) return next_generation def UpperCAmelCase ( _snake_case , _snake_case ): lowerCAmelCase = [] for _ in range(lowercase__ ): # Create output image lowerCAmelCase = Image.new('''RGB''' , (len(cells[0] ), len(lowercase__ )) ) lowerCAmelCase = img.load() # Save cells to image for x in range(len(lowercase__ ) ): for y in range(len(cells[0] ) ): lowerCAmelCase = 255 - cells[y][x] * 255 lowerCAmelCase = (colour, colour, colour) # Save image images.append(lowercase__ ) lowerCAmelCase = new_generation(lowercase__ ) return images if __name__ == "__main__": UpperCAmelCase_ =generate_images(GLIDER, 16) images[0].save("""out.gif""", save_all=True, append_images=images[1:])
718
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from typing import Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import randn_tensor from .scheduling_utils import SchedulerMixin class __UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __a : Any =1 @register_to_config def __init__( self , UpperCAmelCase_=20_00 , UpperCAmelCase_=0.1 , UpperCAmelCase_=20 , UpperCAmelCase_=1E-3 ): lowerCAmelCase = None lowerCAmelCase = None lowerCAmelCase = None def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ): lowerCAmelCase = torch.linspace(1 , self.config.sampling_eps , UpperCAmelCase_ , device=UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None ): if self.timesteps is None: raise ValueError( '''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' ) # TODO(Patrick) better comments + non-PyTorch # postprocess model score lowerCAmelCase = ( -0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min ) lowerCAmelCase = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) ) lowerCAmelCase = std.flatten() while len(std.shape ) < len(score.shape ): lowerCAmelCase = std.unsqueeze(-1 ) lowerCAmelCase = -score / std # compute lowerCAmelCase = -1.0 / len(self.timesteps ) lowerCAmelCase = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min) lowerCAmelCase = beta_t.flatten() while len(beta_t.shape ) < len(x.shape ): lowerCAmelCase = beta_t.unsqueeze(-1 ) lowerCAmelCase = -0.5 * beta_t * x lowerCAmelCase = torch.sqrt(UpperCAmelCase_ ) lowerCAmelCase = drift - diffusion**2 * score lowerCAmelCase = x + drift * dt # add noise lowerCAmelCase = randn_tensor(x.shape , layout=x.layout , generator=UpperCAmelCase_ , device=x.device , dtype=x.dtype ) lowerCAmelCase = x_mean + diffusion * math.sqrt(-dt ) * noise return x, x_mean def __len__( self ): return self.config.num_train_timesteps
33
0
from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) UpperCAmelCase_ =logging.get_logger(__name__) # pylint: disable=invalid-name UpperCAmelCase_ ='\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")\n >>> pipe_prior.to("cuda")\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")\n >>> pipe.to("cuda")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save("cat.png")\n ```\n' def UpperCAmelCase ( _snake_case , _snake_case , _snake_case=8 ): lowerCAmelCase = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 lowerCAmelCase = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class __UpperCamelCase ( __lowercase ): '''simple docstring''' def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ): super().__init__() self.register_modules( unet=__a , scheduler=__a , movq=__a , ) lowerCAmelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): if latents is None: lowerCAmelCase = randn_tensor(__a , generator=__a , device=__a , dtype=__a ) else: if latents.shape != shape: raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" ) lowerCAmelCase = latents.to(__a ) lowerCAmelCase = latents * scheduler.init_noise_sigma return latents def __snake_case ( self , UpperCAmelCase_=0 ): if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('''Please install accelerate via `pip install accelerate`''' ) lowerCAmelCase = torch.device(F"""cuda:{gpu_id}""" ) lowerCAmelCase = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(__a , __a ) def __snake_case ( self , UpperCAmelCase_=0 ): if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ): from accelerate import cpu_offload_with_hook else: raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' ) lowerCAmelCase = torch.device(F"""cuda:{gpu_id}""" ) if self.device.type != "cpu": self.to('''cpu''' , silence_dtype_warnings=__a ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) lowerCAmelCase = None for cpu_offloaded_model in [self.unet, self.movq]: lowerCAmelCase = cpu_offload_with_hook(__a , __a , prev_module_hook=__a ) # We'll offload the last model manually. lowerCAmelCase = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def __snake_case ( self ): if not hasattr(self.unet , '''_hf_hook''' ): return self.device for module in self.unet.modules(): if ( hasattr(__a , '''_hf_hook''' ) and hasattr(module._hf_hook , '''execution_device''' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(__a ) def __call__( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 5_12 , UpperCAmelCase_ = 5_12 , UpperCAmelCase_ = 1_00 , UpperCAmelCase_ = 4.0 , UpperCAmelCase_ = 1 , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = "pil" , UpperCAmelCase_ = True , ): lowerCAmelCase = self._execution_device lowerCAmelCase = guidance_scale > 1.0 if isinstance(__a , __a ): lowerCAmelCase = torch.cat(__a , dim=0 ) lowerCAmelCase = image_embeds.shape[0] * num_images_per_prompt if isinstance(__a , __a ): lowerCAmelCase = torch.cat(__a , dim=0 ) if do_classifier_free_guidance: lowerCAmelCase = image_embeds.repeat_interleave(__a , dim=0 ) lowerCAmelCase = negative_image_embeds.repeat_interleave(__a , dim=0 ) lowerCAmelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__a ) self.scheduler.set_timesteps(__a , device=__a ) lowerCAmelCase = self.scheduler.timesteps lowerCAmelCase = self.unet.config.in_channels lowerCAmelCase = downscale_height_and_width(__a , __a , self.movq_scale_factor ) # create initial latent lowerCAmelCase = self.prepare_latents( (batch_size, num_channels_latents, height, width) , image_embeds.dtype , __a , __a , __a , self.scheduler , ) for i, t in enumerate(self.progress_bar(__a ) ): # expand the latents if we are doing classifier free guidance lowerCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents lowerCAmelCase = {"""image_embeds""": image_embeds} lowerCAmelCase = self.unet( sample=__a , timestep=__a , encoder_hidden_states=__a , added_cond_kwargs=__a , return_dict=__a , )[0] if do_classifier_free_guidance: lowerCAmelCase = noise_pred.split(latents.shape[1] , dim=1 ) lowerCAmelCase = noise_pred.chunk(2 ) lowerCAmelCase = variance_pred.chunk(2 ) lowerCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) lowerCAmelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , '''variance_type''' ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): lowerCAmelCase = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 lowerCAmelCase = self.scheduler.step( __a , __a , __a , generator=__a , )[0] # post-processing lowerCAmelCase = self.movq.decode(__a , force_not_quantize=__a )["""sample"""] if output_type not in ["pt", "np", "pil"]: raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" ) if output_type in ["np", "pil"]: lowerCAmelCase = image * 0.5 + 0.5 lowerCAmelCase = image.clamp(0 , 1 ) lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": lowerCAmelCase = self.numpy_to_pil(__a ) if not return_dict: return (image,) return ImagePipelineOutput(images=__a )
719
from collections import Counter from pathlib import Path from typing import Optional, Tuple import yaml class __UpperCamelCase ( yaml.SafeLoader ): '''simple docstring''' def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = [self.constructed_objects[key_node] for key_node, _ in node.value] lowerCAmelCase = [tuple(UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else key for key in keys] lowerCAmelCase = Counter(UpperCAmelCase_ ) lowerCAmelCase = [key for key in counter if counter[key] > 1] if duplicate_keys: raise TypeError(F"""Got duplicate yaml keys: {duplicate_keys}""" ) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_=False ): lowerCAmelCase = super().construct_mapping(UpperCAmelCase_ , deep=UpperCAmelCase_ ) self._check_no_duplicates_on_constructed_node(UpperCAmelCase_ ) return mapping def UpperCAmelCase ( _snake_case ): lowerCAmelCase = list(readme_content.splitlines() ) if full_content and full_content[0] == "---" and "---" in full_content[1:]: lowerCAmelCase = full_content[1:].index('''---''' ) + 1 lowerCAmelCase = '''\n'''.join(full_content[1:sep_idx] ) return yamlblock, "\n".join(full_content[sep_idx + 1 :] ) return None, "\n".join(_snake_case ) class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : Any ={"""train_eval_index"""} # train-eval-index in the YAML metadata @classmethod def __snake_case ( cls , UpperCAmelCase_ ): with open(UpperCAmelCase_ , encoding='''utf-8''' ) as readme_file: lowerCAmelCase , lowerCAmelCase = _split_yaml_from_readme(readme_file.read() ) if yaml_string is not None: return cls.from_yaml_string(UpperCAmelCase_ ) else: return cls() def __snake_case ( self , UpperCAmelCase_ ): if path.exists(): with open(UpperCAmelCase_ , encoding='''utf-8''' ) as readme_file: lowerCAmelCase = readme_file.read() else: lowerCAmelCase = None lowerCAmelCase = self._to_readme(UpperCAmelCase_ ) with open(UpperCAmelCase_ , '''w''' , encoding='''utf-8''' ) as readme_file: readme_file.write(UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_ = None ): if readme_content is not None: lowerCAmelCase , lowerCAmelCase = _split_yaml_from_readme(UpperCAmelCase_ ) lowerCAmelCase = '''---\n''' + self.to_yaml_string() + '''---\n''' + content else: lowerCAmelCase = '''---\n''' + self.to_yaml_string() + '''---\n''' return full_content @classmethod def __snake_case ( cls , UpperCAmelCase_ ): lowerCAmelCase = yaml.load(UpperCAmelCase_ , Loader=_NoDuplicateSafeLoader ) or {} # Convert the YAML keys to DatasetMetadata fields lowerCAmelCase = { (key.replace('''-''' , '''_''' ) if key.replace('''-''' , '''_''' ) in cls._FIELDS_WITH_DASHES else key): value for key, value in metadata_dict.items() } return cls(**UpperCAmelCase_ ) def __snake_case ( self ): return yaml.safe_dump( { (key.replace('''_''' , '''-''' ) if key in self._FIELDS_WITH_DASHES else key): value for key, value in self.items() } , sort_keys=UpperCAmelCase_ , allow_unicode=UpperCAmelCase_ , encoding='''utf-8''' , ).decode('''utf-8''' ) UpperCAmelCase_ ={ """image-classification""": [], """translation""": [], """image-segmentation""": [], """fill-mask""": [], """automatic-speech-recognition""": [], """token-classification""": [], """sentence-similarity""": [], """audio-classification""": [], """question-answering""": [], """summarization""": [], """zero-shot-classification""": [], """table-to-text""": [], """feature-extraction""": [], """other""": [], """multiple-choice""": [], """text-classification""": [], """text-to-image""": [], """text2text-generation""": [], """zero-shot-image-classification""": [], """tabular-classification""": [], """tabular-regression""": [], """image-to-image""": [], """tabular-to-text""": [], """unconditional-image-generation""": [], """text-retrieval""": [], """text-to-speech""": [], """object-detection""": [], """audio-to-audio""": [], """text-generation""": [], """conversational""": [], """table-question-answering""": [], """visual-question-answering""": [], """image-to-text""": [], """reinforcement-learning""": [], """voice-activity-detection""": [], """time-series-forecasting""": [], """document-question-answering""": [], } if __name__ == "__main__": from argparse import ArgumentParser UpperCAmelCase_ =ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""") ap.add_argument("""readme_filepath""") UpperCAmelCase_ =ap.parse_args() UpperCAmelCase_ =Path(args.readme_filepath) UpperCAmelCase_ =DatasetMetadata.from_readme(readme_filepath) print(dataset_metadata) dataset_metadata.to_readme(readme_filepath)
33
0
from io import BytesIO from typing import List, Union import requests from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_decord_available(): import numpy as np from decord import VideoReader if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING UpperCAmelCase_ =logging.get_logger(__name__) @add_end_docstrings(__UpperCAmelCase ) class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ): super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ ) requires_backends(self , '''decord''' ) self.check_model_type(UpperCAmelCase__ ) def __snake_case ( self , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None ): lowerCAmelCase = {} if frame_sampling_rate is not None: lowerCAmelCase = frame_sampling_rate if num_frames is not None: lowerCAmelCase = num_frames lowerCAmelCase = {} if top_k is not None: lowerCAmelCase = top_k return preprocess_params, {}, postprocess_params def __call__( self , UpperCAmelCase_ , **UpperCAmelCase_ ): return super().__call__(UpperCAmelCase__ , **UpperCAmelCase__ ) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_=1 ): if num_frames is None: lowerCAmelCase = self.model.config.num_frames if video.startswith('''http://''' ) or video.startswith('''https://''' ): lowerCAmelCase = BytesIO(requests.get(UpperCAmelCase__ ).content ) lowerCAmelCase = VideoReader(UpperCAmelCase__ ) videoreader.seek(0 ) lowerCAmelCase = 0 lowerCAmelCase = num_frames * frame_sampling_rate - 1 lowerCAmelCase = np.linspace(UpperCAmelCase__ , UpperCAmelCase__ , num=UpperCAmelCase__ , dtype=np.intaa ) lowerCAmelCase = videoreader.get_batch(UpperCAmelCase__ ).asnumpy() lowerCAmelCase = list(UpperCAmelCase__ ) lowerCAmelCase = self.image_processor(UpperCAmelCase__ , return_tensors=self.framework ) return model_inputs def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = self.model(**UpperCAmelCase__ ) return model_outputs def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_=5 ): if top_k > self.model.config.num_labels: lowerCAmelCase = self.model.config.num_labels if self.framework == "pt": lowerCAmelCase = model_outputs.logits.softmax(-1 )[0] lowerCAmelCase , lowerCAmelCase = probs.topk(UpperCAmelCase__ ) else: raise ValueError(F"""Unsupported framework: {self.framework}""" ) lowerCAmelCase = scores.tolist() lowerCAmelCase = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(UpperCAmelCase__ , UpperCAmelCase__ )]
720
import unittest from huggingface_hub import hf_hub_download from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor from transformers.pipelines import VideoClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_decord, require_tf, require_torch, require_torch_or_tf, require_vision, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf @require_vision @require_decord class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' __a : str =MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): lowerCAmelCase = hf_hub_download( repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' ) lowerCAmelCase = VideoClassificationPipeline(model=UpperCAmelCase_ , image_processor=UpperCAmelCase_ , top_k=2 ) lowerCAmelCase = [ example_video_filepath, '''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''', ] return video_classifier, examples def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ): for example in examples: lowerCAmelCase = video_classifier(UpperCAmelCase_ ) self.assertEqual( UpperCAmelCase_ , [ {'''score''': ANY(UpperCAmelCase_ ), '''label''': ANY(UpperCAmelCase_ )}, {'''score''': ANY(UpperCAmelCase_ ), '''label''': ANY(UpperCAmelCase_ )}, ] , ) @require_torch def __snake_case ( self ): lowerCAmelCase = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification''' lowerCAmelCase = VideoMAEFeatureExtractor( size={'''shortest_edge''': 10} , crop_size={'''height''': 10, '''width''': 10} ) lowerCAmelCase = pipeline( '''video-classification''' , model=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , frame_sampling_rate=4 ) lowerCAmelCase = hf_hub_download(repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' ) lowerCAmelCase = video_classifier(UpperCAmelCase_ , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase_ , decimals=4 ) , [{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}] , ) lowerCAmelCase = video_classifier( [ video_file_path, video_file_path, ] , top_k=2 , ) self.assertEqual( nested_simplify(UpperCAmelCase_ , decimals=4 ) , [ [{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}], [{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}], ] , ) @require_tf def __snake_case ( self ): pass
33
0
import math import sys def UpperCAmelCase ( _snake_case ): lowerCAmelCase = '''''' try: with open(_snake_case , '''rb''' ) as binary_file: lowerCAmelCase = binary_file.read() for dat in data: lowerCAmelCase = F"""{dat:08b}""" result += curr_byte return result except OSError: print('''File not accessible''' ) sys.exit() def UpperCAmelCase ( _snake_case ): lowerCAmelCase = {'''0''': '''0''', '''1''': '''1'''} lowerCAmelCase , lowerCAmelCase = '''''', '''''' lowerCAmelCase = len(_snake_case ) for i in range(len(_snake_case ) ): curr_string += data_bits[i] if curr_string not in lexicon: continue lowerCAmelCase = lexicon[curr_string] result += last_match_id lowerCAmelCase = last_match_id + '''0''' if math.loga(_snake_case ).is_integer(): lowerCAmelCase = {} for curr_key in list(_snake_case ): lowerCAmelCase = lexicon.pop(_snake_case ) lowerCAmelCase = new_lex lowerCAmelCase = last_match_id + '''1''' index += 1 lowerCAmelCase = '''''' return result def UpperCAmelCase ( _snake_case , _snake_case ): lowerCAmelCase = 8 try: with open(_snake_case , '''wb''' ) as opened_file: lowerCAmelCase = [ to_write[i : i + byte_length] for i in range(0 , len(_snake_case ) , _snake_case ) ] if len(result_byte_array[-1] ) % byte_length == 0: result_byte_array.append('''10000000''' ) else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1] ) - 1 ) for elem in result_byte_array[:-1]: opened_file.write(int(_snake_case , 2 ).to_bytes(1 , byteorder='''big''' ) ) except OSError: print('''File not accessible''' ) sys.exit() def UpperCAmelCase ( _snake_case ): lowerCAmelCase = 0 for letter in data_bits: if letter == "1": break counter += 1 lowerCAmelCase = data_bits[counter:] lowerCAmelCase = data_bits[counter + 1 :] return data_bits def UpperCAmelCase ( _snake_case , _snake_case ): lowerCAmelCase = read_file_binary(_snake_case ) lowerCAmelCase = remove_prefix(_snake_case ) lowerCAmelCase = decompress_data(_snake_case ) write_file_binary(_snake_case , _snake_case ) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
721
import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class __UpperCamelCase ( __UpperCAmelCase , unittest.TestCase ): '''simple docstring''' __a : Any ="""hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline""" def __snake_case ( self , UpperCAmelCase_=0 ): lowerCAmelCase = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(UpperCAmelCase_ ) ) lowerCAmelCase = np.random.RandomState(UpperCAmelCase_ ) lowerCAmelCase = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 3, '''strength''': 0.75, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def __snake_case ( self ): lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 1_28, 1_28, 3) lowerCAmelCase = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087] ) assert np.abs(image_slice - expected_slice ).max() < 1E-1 def __snake_case ( self ): lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowerCAmelCase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=UpperCAmelCase_ ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) lowerCAmelCase = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __snake_case ( self ): lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowerCAmelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) # warmup pass to apply optimizations lowerCAmelCase = pipe(**self.get_dummy_inputs() ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) lowerCAmelCase = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __snake_case ( self ): lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowerCAmelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) lowerCAmelCase = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __snake_case ( self ): lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowerCAmelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) lowerCAmelCase = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def __snake_case ( self ): lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = self.get_dummy_inputs() lowerCAmelCase = pipe(**UpperCAmelCase_ ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) lowerCAmelCase = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 @nightly @require_onnxruntime @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' @property def __snake_case ( self ): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def __snake_case ( self ): lowerCAmelCase = ort.SessionOptions() lowerCAmelCase = False return options def __snake_case ( self ): lowerCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) lowerCAmelCase = init_image.resize((7_68, 5_12) ) # using the PNDM scheduler by default lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = '''A fantasy landscape, trending on artstation''' lowerCAmelCase = np.random.RandomState(0 ) lowerCAmelCase = pipe( prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCAmelCase_ , output_type='''np''' , ) lowerCAmelCase = output.images lowerCAmelCase = images[0, 2_55:2_58, 3_83:3_86, -1] assert images.shape == (1, 5_12, 7_68, 3) lowerCAmelCase = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def __snake_case ( self ): lowerCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) lowerCAmelCase = init_image.resize((7_68, 5_12) ) lowerCAmelCase = LMSDiscreteScheduler.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' ) lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCAmelCase = '''A fantasy landscape, trending on artstation''' lowerCAmelCase = np.random.RandomState(0 ) lowerCAmelCase = pipe( prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=UpperCAmelCase_ , output_type='''np''' , ) lowerCAmelCase = output.images lowerCAmelCase = images[0, 2_55:2_58, 3_83:3_86, -1] assert images.shape == (1, 5_12, 7_68, 3) lowerCAmelCase = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
33
0
import warnings from ...utils import logging from .image_processing_flava import FlavaImageProcessor UpperCAmelCase_ =logging.get_logger(__name__) class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ): warnings.warn( '''The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use FlavaImageProcessor instead.''' , UpperCAmelCase_ , ) super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
700
import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def UpperCAmelCase ( _snake_case ): lowerCAmelCase = args.pruning_method lowerCAmelCase = args.threshold lowerCAmelCase = args.model_name_or_path.rstrip('''/''' ) lowerCAmelCase = args.target_model_path print(F"""Load fine-pruned model from {model_name_or_path}""" ) lowerCAmelCase = torch.load(os.path.join(_snake_case , '''pytorch_model.bin''' ) ) lowerCAmelCase = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: lowerCAmelCase = tensor print(F"""Copied layer {name}""" ) elif "classifier" in name or "qa_output" in name: lowerCAmelCase = tensor print(F"""Copied layer {name}""" ) elif "bias" in name: lowerCAmelCase = tensor print(F"""Copied layer {name}""" ) else: if pruning_method == "magnitude": lowerCAmelCase = MagnitudeBinarizer.apply(inputs=_snake_case , threshold=_snake_case ) lowerCAmelCase = tensor * mask print(F"""Pruned layer {name}""" ) elif pruning_method == "topK": if "mask_scores" in name: continue lowerCAmelCase = name[:-6] lowerCAmelCase = model[F"""{prefix_}mask_scores"""] lowerCAmelCase = TopKBinarizer.apply(_snake_case , _snake_case ) lowerCAmelCase = tensor * mask print(F"""Pruned layer {name}""" ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue lowerCAmelCase = name[:-6] lowerCAmelCase = model[F"""{prefix_}mask_scores"""] lowerCAmelCase = ThresholdBinarizer.apply(_snake_case , _snake_case , _snake_case ) lowerCAmelCase = tensor * mask print(F"""Pruned layer {name}""" ) elif pruning_method == "l0": if "mask_scores" in name: continue lowerCAmelCase = name[:-6] lowerCAmelCase = model[F"""{prefix_}mask_scores"""] lowerCAmelCase , lowerCAmelCase = -0.1, 1.1 lowerCAmelCase = torch.sigmoid(_snake_case ) lowerCAmelCase = s * (r - l) + l lowerCAmelCase = s_bar.clamp(min=0.0 , max=1.0 ) lowerCAmelCase = tensor * mask print(F"""Pruned layer {name}""" ) else: raise ValueError('''Unknown pruning method''' ) if target_model_path is None: lowerCAmelCase = os.path.join( os.path.dirname(_snake_case ) , F"""bertarized_{os.path.basename(_snake_case )}""" ) if not os.path.isdir(_snake_case ): shutil.copytree(_snake_case , _snake_case ) print(F"""\nCreated folder {target_model_path}""" ) torch.save(_snake_case , os.path.join(_snake_case , '''pytorch_model.bin''' ) ) print('''\nPruned model saved! See you later!''' ) if __name__ == "__main__": UpperCAmelCase_ =argparse.ArgumentParser() parser.add_argument( """--pruning_method""", choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""], type=str, required=True, help=( """Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,""" """ sigmoied_threshold = Soft movement pruning)""" ), ) parser.add_argument( """--threshold""", type=float, required=False, help=( """For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.""" """For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.""" """Not needed for `l0`""" ), ) parser.add_argument( """--model_name_or_path""", type=str, required=True, help="""Folder containing the model that was previously fine-pruned""", ) parser.add_argument( """--target_model_path""", default=None, type=str, required=False, help="""Folder containing the model that was previously fine-pruned""", ) UpperCAmelCase_ =parser.parse_args() main(args)
33
0
from __future__ import annotations def UpperCAmelCase ( _snake_case ): # preprocessing the first row for i in range(1 , len(matrix[0] ) ): matrix[0][i] += matrix[0][i - 1] # preprocessing the first column for i in range(1 , len(lowercase__ ) ): matrix[i][0] += matrix[i - 1][0] # updating the path cost for current position for i in range(1 , len(lowercase__ ) ): for j in range(1 , len(matrix[0] ) ): matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] ) return matrix[-1][-1] if __name__ == "__main__": import doctest doctest.testmod()
701
import os import re from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCAmelCase_ =logging.get_logger(__name__) UpperCAmelCase_ ={ """vocab_file""": """vocab.txt""", """merges_file""": """bpe.codes""", } UpperCAmelCase_ ={ """vocab_file""": { """vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt""", """vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt""", }, """merges_file""": { """vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes""", """vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes""", }, } UpperCAmelCase_ ={ """vinai/phobert-base""": 256, """vinai/phobert-large""": 256, } def UpperCAmelCase ( _snake_case ): lowerCAmelCase = set() lowerCAmelCase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCAmelCase = char lowerCAmelCase = set(_snake_case ) return pairs class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : Union[str, Any] =VOCAB_FILES_NAMES __a : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP __a : Optional[int] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_="<s>" , UpperCAmelCase_="</s>" , UpperCAmelCase_="</s>" , UpperCAmelCase_="<s>" , UpperCAmelCase_="<unk>" , UpperCAmelCase_="<pad>" , UpperCAmelCase_="<mask>" , **UpperCAmelCase_ , ): super().__init__( bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , **UpperCAmelCase_ , ) lowerCAmelCase = vocab_file lowerCAmelCase = merges_file lowerCAmelCase = {} lowerCAmelCase = 0 lowerCAmelCase = 1 lowerCAmelCase = 2 lowerCAmelCase = 3 self.add_from_file(UpperCAmelCase_ ) lowerCAmelCase = {v: k for k, v in self.encoder.items()} with open(UpperCAmelCase_ , encoding='''utf-8''' ) as merges_handle: lowerCAmelCase = merges_handle.read().split('''\n''' )[:-1] lowerCAmelCase = [tuple(merge.split()[:-1] ) for merge in merges] lowerCAmelCase = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) ) lowerCAmelCase = {} def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCAmelCase = [self.cls_token_id] lowerCAmelCase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ ) if token_ids_a is None: return [1] + ([0] * len(UpperCAmelCase_ )) + [1] return [1] + ([0] * len(UpperCAmelCase_ )) + [1, 1] + ([0] * len(UpperCAmelCase_ )) + [1] def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ): lowerCAmelCase = [self.sep_token_id] lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def __snake_case ( self ): return len(self.encoder ) def __snake_case ( self ): return dict(self.encoder , **self.added_tokens_encoder ) def __snake_case ( self , UpperCAmelCase_ ): if token in self.cache: return self.cache[token] lowerCAmelCase = tuple(UpperCAmelCase_ ) lowerCAmelCase = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] ) lowerCAmelCase = get_pairs(UpperCAmelCase_ ) if not pairs: return token while True: lowerCAmelCase = min(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : self.bpe_ranks.get(UpperCAmelCase_ , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break lowerCAmelCase , lowerCAmelCase = bigram lowerCAmelCase = [] lowerCAmelCase = 0 while i < len(UpperCAmelCase_ ): try: lowerCAmelCase = word.index(UpperCAmelCase_ , UpperCAmelCase_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCAmelCase = j if word[i] == first and i < len(UpperCAmelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCAmelCase = tuple(UpperCAmelCase_ ) lowerCAmelCase = new_word if len(UpperCAmelCase_ ) == 1: break else: lowerCAmelCase = get_pairs(UpperCAmelCase_ ) lowerCAmelCase = '''@@ '''.join(UpperCAmelCase_ ) lowerCAmelCase = word[:-4] lowerCAmelCase = word return word def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = [] lowerCAmelCase = re.findall(r'''\S+\n?''' , UpperCAmelCase_ ) for token in words: split_tokens.extend(list(self.bpe(UpperCAmelCase_ ).split(''' ''' ) ) ) return split_tokens def __snake_case ( self , UpperCAmelCase_ ): return self.encoder.get(UpperCAmelCase_ , self.encoder.get(self.unk_token ) ) def __snake_case ( self , UpperCAmelCase_ ): return self.decoder.get(UpperCAmelCase_ , self.unk_token ) def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = ''' '''.join(UpperCAmelCase_ ).replace('''@@ ''' , '''''' ).strip() return out_string def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ): if not os.path.isdir(UpperCAmelCase_ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase = os.path.join( UpperCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCAmelCase = os.path.join( UpperCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ): copyfile(self.vocab_file , UpperCAmelCase_ ) if os.path.abspath(self.merges_file ) != os.path.abspath(UpperCAmelCase_ ): copyfile(self.merges_file , UpperCAmelCase_ ) return out_vocab_file, out_merge_file def __snake_case ( self , UpperCAmelCase_ ): if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): try: with open(UpperCAmelCase_ , '''r''' , encoding='''utf-8''' ) as fd: self.add_from_file(UpperCAmelCase_ ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception(F"""Incorrect encoding detected in {f}, please rebuild the dataset""" ) return lowerCAmelCase = f.readlines() for lineTmp in lines: lowerCAmelCase = lineTmp.strip() lowerCAmelCase = line.rfind(''' ''' ) if idx == -1: raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt>\'''' ) lowerCAmelCase = line[:idx] lowerCAmelCase = len(self.encoder )
33
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase_ ={ 'configuration_nllb_moe': [ 'NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'NllbMoeConfig', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ =[ 'NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST', 'NllbMoeForConditionalGeneration', 'NllbMoeModel', 'NllbMoePreTrainedModel', 'NllbMoeTop2Router', 'NllbMoeSparseMLP', ] if TYPE_CHECKING: from .configuration_nllb_moe import ( NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP, NllbMoeConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nllb_moe import ( NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST, NllbMoeForConditionalGeneration, NllbMoeModel, NllbMoePreTrainedModel, NllbMoeSparseMLP, NllbMoeTopaRouter, ) else: import sys UpperCAmelCase_ =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
702
from __future__ import annotations from typing import Generic, TypeVar UpperCAmelCase_ =TypeVar("""T""") class __UpperCamelCase ( Generic[T] ): '''simple docstring''' def __init__( self , UpperCAmelCase_ ): lowerCAmelCase = data lowerCAmelCase = self lowerCAmelCase = 0 class __UpperCamelCase ( Generic[T] ): '''simple docstring''' def __init__( self ): # map from node name to the node object lowerCAmelCase = {} def __snake_case ( self , UpperCAmelCase_ ): # create a new set with x as its member lowerCAmelCase = DisjointSetTreeNode(UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_ ): # find the set x belongs to (with path-compression) lowerCAmelCase = self.map[data] if elem_ref != elem_ref.parent: lowerCAmelCase = self.find_set(elem_ref.parent.data ) return elem_ref.parent def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ): # helper function for union operation if nodea.rank > nodea.rank: lowerCAmelCase = nodea else: lowerCAmelCase = nodea if nodea.rank == nodea.rank: nodea.rank += 1 def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ): # merge 2 disjoint sets self.link(self.find_set(UpperCAmelCase_ ) , self.find_set(UpperCAmelCase_ ) ) class __UpperCamelCase ( Generic[T] ): '''simple docstring''' def __init__( self ): # connections: map from the node to the neighbouring nodes (with weights) lowerCAmelCase = {} def __snake_case ( self , UpperCAmelCase_ ): # add a node ONLY if its not present in the graph if node not in self.connections: lowerCAmelCase = {} def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): # add an edge with the given weight self.add_node(UpperCAmelCase_ ) self.add_node(UpperCAmelCase_ ) lowerCAmelCase = weight lowerCAmelCase = weight def __snake_case ( self ): lowerCAmelCase = [] lowerCAmelCase = set() for start in self.connections: for end in self.connections[start]: if (start, end) not in seen: seen.add((end, start) ) edges.append((start, end, self.connections[start][end]) ) edges.sort(key=lambda UpperCAmelCase_ : x[2] ) # creating the disjoint set lowerCAmelCase = DisjointSetTree[T]() for node in self.connections: disjoint_set.make_set(UpperCAmelCase_ ) # MST generation lowerCAmelCase = 0 lowerCAmelCase = 0 lowerCAmelCase = GraphUndirectedWeighted[T]() while num_edges < len(self.connections ) - 1: lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = edges[index] index += 1 lowerCAmelCase = disjoint_set.find_set(UpperCAmelCase_ ) lowerCAmelCase = disjoint_set.find_set(UpperCAmelCase_ ) if parent_u != parent_v: num_edges += 1 graph.add_edge(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) disjoint_set.union(UpperCAmelCase_ , UpperCAmelCase_ ) return graph
33
0
from __future__ import annotations import unittest from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel @require_tf class __UpperCamelCase : '''simple docstring''' __a : int =BlenderbotSmallConfig __a : int ={} __a : List[str] ="""gelu""" def __init__( self , UpperCAmelCase_ , UpperCAmelCase_=13 , UpperCAmelCase_=7 , UpperCAmelCase_=True , UpperCAmelCase_=False , UpperCAmelCase_=99 , UpperCAmelCase_=32 , UpperCAmelCase_=2 , UpperCAmelCase_=4 , UpperCAmelCase_=37 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=20 , UpperCAmelCase_=2 , UpperCAmelCase_=1 , UpperCAmelCase_=0 , ): lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = seq_length lowerCAmelCase = is_training lowerCAmelCase = use_labels lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = max_position_embeddings lowerCAmelCase = eos_token_id lowerCAmelCase = pad_token_id lowerCAmelCase = bos_token_id def __snake_case ( self ): lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) lowerCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) lowerCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 ) lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) lowerCAmelCase = prepare_blenderbot_small_inputs_dict(lowercase__ , lowercase__ , lowercase__ ) return config, inputs_dict def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ): lowerCAmelCase = TFBlenderbotSmallModel(config=lowercase__ ).get_decoder() lowerCAmelCase = inputs_dict["""input_ids"""] lowerCAmelCase = input_ids[:1, :] lowerCAmelCase = inputs_dict["""attention_mask"""][:1, :] lowerCAmelCase = inputs_dict["""head_mask"""] lowerCAmelCase = 1 # first forward pass lowerCAmelCase = model(lowercase__ , attention_mask=lowercase__ , head_mask=lowercase__ , use_cache=lowercase__ ) lowerCAmelCase = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowerCAmelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and lowerCAmelCase = tf.concat([input_ids, next_tokens] , axis=-1 ) lowerCAmelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) lowerCAmelCase = model(lowercase__ , attention_mask=lowercase__ )[0] lowerCAmelCase = model(lowercase__ , attention_mask=lowercase__ , past_key_values=lowercase__ )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice lowerCAmelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx] lowerCAmelCase = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(lowercase__ , lowercase__ , rtol=1E-3 ) def UpperCAmelCase ( _snake_case , _snake_case , _snake_case , _snake_case=None , _snake_case=None , _snake_case=None , _snake_case=None , _snake_case=None , ): if attention_mask is None: lowerCAmelCase = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE__ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: lowerCAmelCase = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: lowerCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: lowerCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: lowerCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class __UpperCamelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): '''simple docstring''' __a : Dict =( (TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else () ) __a : Any =(TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else () __a : List[Any] =( { """conversational""": TFBlenderbotSmallForConditionalGeneration, """feature-extraction""": TFBlenderbotSmallModel, """summarization""": TFBlenderbotSmallForConditionalGeneration, """text2text-generation""": TFBlenderbotSmallForConditionalGeneration, """translation""": TFBlenderbotSmallForConditionalGeneration, } if is_tf_available() else {} ) __a : int =True __a : Optional[int] =False __a : Dict =False def __snake_case ( self ): lowerCAmelCase = TFBlenderbotSmallModelTester(self ) lowerCAmelCase = ConfigTester(self , config_class=lowercase__ ) def __snake_case ( self ): self.config_tester.run_common_tests() def __snake_case ( self ): lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowercase__ ) @require_tokenizers @require_tf class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' __a : Optional[Any] =[ """Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like """ """ i'm going to throw up.\nand why is that?""" ] __a : Optional[Any] ="""facebook/blenderbot_small-90M""" @cached_property def __snake_case ( self ): # use "old" tokenizer here because of bug when downloading new tokenizer return BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' ) @cached_property def __snake_case ( self ): lowerCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def __snake_case ( self ): lowerCAmelCase = self.tokenizer(self.src_text , return_tensors='''tf''' ) lowerCAmelCase = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=lowercase__ , ) lowerCAmelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowercase__ )[0] assert generated_words in ( "i don't know. i just feel like i'm going to throw up. it's not fun.", "i'm not sure. i just feel like i've been feeling like i have to be in a certain place", "i'm not sure. i just feel like i've been in a bad situation.", )
703
def UpperCAmelCase ( _snake_case , _snake_case , _snake_case ): def count_of_possible_combinations(_snake_case ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(_snake_case ) def UpperCAmelCase ( _snake_case , _snake_case , _snake_case ): def count_of_possible_combinations_with_dp_array( _snake_case , _snake_case ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] lowerCAmelCase = sum( count_of_possible_combinations_with_dp_array(target - item , _snake_case ) for item in array ) lowerCAmelCase = answer return answer lowerCAmelCase = [-1] * (target + 1) return count_of_possible_combinations_with_dp_array(_snake_case , _snake_case ) def UpperCAmelCase ( _snake_case , _snake_case , _snake_case ): lowerCAmelCase = [0] * (target + 1) lowerCAmelCase = 1 for i in range(1 , target + 1 ): for j in range(_snake_case ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase_ =3 UpperCAmelCase_ =5 UpperCAmelCase_ =[1, 2, 5] print(combination_sum_iv(n, array, target))
33
0
import copy import json import os import tempfile from transformers import is_torch_available from .test_configuration_utils import config_common_kwargs class __UpperCamelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self , UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_=True , UpperCAmelCase_=None , **UpperCAmelCase_ ): lowerCAmelCase = parent lowerCAmelCase = config_class lowerCAmelCase = has_text_modality lowerCAmelCase = kwargs lowerCAmelCase = common_properties def __snake_case ( self ): lowerCAmelCase = self.config_class(**self.inputs_dict ) lowerCAmelCase = ( ['''hidden_size''', '''num_attention_heads''', '''num_hidden_layers'''] if self.common_properties is None else self.common_properties ) # Add common fields for text models if self.has_text_modality: common_properties.extend(['''vocab_size'''] ) # Test that config has the common properties as getters for prop in common_properties: self.parent.assertTrue(hasattr(__snake_case , __snake_case ) , msg=F"""`{prop}` does not exist""" ) # Test that config has the common properties as setter for idx, name in enumerate(__snake_case ): try: setattr(__snake_case , __snake_case , __snake_case ) self.parent.assertEqual( getattr(__snake_case , __snake_case ) , __snake_case , msg=F"""`{name} value {idx} expected, but was {getattr(__snake_case , __snake_case )}""" ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass # Test if config class can be called with Config(prop_name=..) for idx, name in enumerate(__snake_case ): try: lowerCAmelCase = self.config_class(**{name: idx} ) self.parent.assertEqual( getattr(__snake_case , __snake_case ) , __snake_case , msg=F"""`{name} value {idx} expected, but was {getattr(__snake_case , __snake_case )}""" ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass def __snake_case ( self ): lowerCAmelCase = self.config_class(**self.inputs_dict ) lowerCAmelCase = json.loads(config.to_json_string() ) for key, value in self.inputs_dict.items(): self.parent.assertEqual(obj[key] , __snake_case ) def __snake_case ( self ): lowerCAmelCase = self.config_class(**self.inputs_dict ) with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase = os.path.join(__snake_case , '''config.json''' ) config_first.to_json_file(__snake_case ) lowerCAmelCase = self.config_class.from_json_file(__snake_case ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def __snake_case ( self ): lowerCAmelCase = self.config_class(**self.inputs_dict ) with tempfile.TemporaryDirectory() as tmpdirname: config_first.save_pretrained(__snake_case ) lowerCAmelCase = self.config_class.from_pretrained(__snake_case ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def __snake_case ( self ): lowerCAmelCase = self.config_class(**self.inputs_dict ) lowerCAmelCase = '''test''' with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase = os.path.join(__snake_case , __snake_case ) config_first.save_pretrained(__snake_case ) lowerCAmelCase = self.config_class.from_pretrained(__snake_case , subfolder=__snake_case ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def __snake_case ( self ): lowerCAmelCase = self.config_class(**self.inputs_dict , num_labels=5 ) self.parent.assertEqual(len(config.idalabel ) , 5 ) self.parent.assertEqual(len(config.labelaid ) , 5 ) lowerCAmelCase = 3 self.parent.assertEqual(len(config.idalabel ) , 3 ) self.parent.assertEqual(len(config.labelaid ) , 3 ) def __snake_case ( self ): if self.config_class.is_composition: return lowerCAmelCase = self.config_class() self.parent.assertIsNotNone(__snake_case ) def __snake_case ( self ): lowerCAmelCase = copy.deepcopy(__snake_case ) lowerCAmelCase = self.config_class(**__snake_case ) lowerCAmelCase = [] for key, value in config_common_kwargs.items(): if key == "torch_dtype": if not is_torch_available(): continue else: import torch if config.torch_dtype != torch.floataa: wrong_values.append(('''torch_dtype''', config.torch_dtype, torch.floataa) ) elif getattr(__snake_case , __snake_case ) != value: wrong_values.append((key, getattr(__snake_case , __snake_case ), value) ) if len(__snake_case ) > 0: lowerCAmelCase = '''\n'''.join([F"""- {v[0]}: got {v[1]} instead of {v[2]}""" for v in wrong_values] ) raise ValueError(F"""The following keys were not properly set in the config:\n{errors}""" ) def __snake_case ( self ): self.create_and_test_config_common_properties() self.create_and_test_config_to_json_string() self.create_and_test_config_to_json_file() self.create_and_test_config_from_and_save_pretrained() self.create_and_test_config_from_and_save_pretrained_subfolder() self.create_and_test_config_with_num_labels() self.check_config_can_be_init_without_params() self.check_config_arguments_init()
704
import torch from diffusers import StableDiffusionPipeline UpperCAmelCase_ ="""path-to-your-trained-model""" UpperCAmelCase_ =StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("""cuda""") UpperCAmelCase_ ="""A photo of sks dog in a bucket""" UpperCAmelCase_ =pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0] image.save("""dog-bucket.png""")
33
0
'''simple docstring''' import ast import os import re import shutil import tempfile import unittest from unittest import mock import torch from accelerate.test_utils.examples import compare_against_test from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow from accelerate.utils import write_basic_config # DataLoaders built from `test_samples/MRPC` for quick testing # Should mock `{script_name}.get_dataloaders` via: # @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders) UpperCAmelCase_ =[ """cross_validation.py""", """gradient_accumulation.py""", """local_sgd.py""", """multi_process_metrics.py""", """memory.py""", """automatic_gradient_accumulation.py""", """fsdp_with_peak_mem_tracking.py""", """deepspeed_with_config_support.py""", """megatron_lm_gpt_pretraining.py""", ] class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = None ): lowerCAmelCase = None lowerCAmelCase = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) ) lowerCAmelCase = os.path.abspath('''examples''' ) for item in os.listdir(UpperCAmelCase_ ): if item not in EXCLUDE_EXAMPLES: lowerCAmelCase = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) if os.path.isfile(UpperCAmelCase_ ) and ".py" in item_path: with self.subTest( tested_script=UpperCAmelCase_ , feature_script=UpperCAmelCase_ , tested_section='''main()''' if parser_only else '''training_function()''' , ): lowerCAmelCase = compare_against_test( os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase = '''\n'''.join(UpperCAmelCase_ ) if special_strings is not None: for string in special_strings: lowerCAmelCase = diff.replace(UpperCAmelCase_ , '''''' ) self.assertEqual(UpperCAmelCase_ , '''''' ) def __snake_case ( self ): self.one_complete_example('''complete_nlp_example.py''' , UpperCAmelCase_ ) self.one_complete_example('''complete_nlp_example.py''' , UpperCAmelCase_ ) def __snake_case ( self ): lowerCAmelCase = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) ) lowerCAmelCase = [ ''' ''' * 16 + '''{\n\n''', ''' ''' * 20 + '''"accuracy": eval_metric["accuracy"],\n\n''', ''' ''' * 20 + '''"f1": eval_metric["f1"],\n\n''', ''' ''' * 20 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''', ''' ''' * 20 + '''"epoch": epoch,\n\n''', ''' ''' * 16 + '''},\n\n''', ''' ''' * 16 + '''step=epoch,\n''', ''' ''' * 12, ''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''', ] self.one_complete_example('''complete_cv_example.py''' , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) self.one_complete_example('''complete_cv_example.py''' , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) @mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """1"""} ) class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : List[Any] =False @classmethod def __snake_case ( cls ): super().setUpClass() lowerCAmelCase = tempfile.mkdtemp() lowerCAmelCase = os.path.join(cls._tmpdir , '''default_config.yml''' ) write_basic_config(save_location=cls.configPath ) lowerCAmelCase = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath] @classmethod def __snake_case ( cls ): super().tearDownClass() shutil.rmtree(cls._tmpdir ) def __snake_case ( self ): lowerCAmelCase = F""" examples/by_feature/checkpointing.py --checkpointing_steps epoch --output_dir {self.tmpdir} """.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) ) def __snake_case ( self ): lowerCAmelCase = F""" examples/by_feature/checkpointing.py --checkpointing_steps 1 --output_dir {self.tmpdir} """.split() lowerCAmelCase = run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) ) def __snake_case ( self ): lowerCAmelCase = F""" examples/by_feature/checkpointing.py --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )} """.split() lowerCAmelCase = run_command(self._launch_args + testargs , return_stdout=UpperCAmelCase_ ) self.assertNotIn('''epoch 0:''' , UpperCAmelCase_ ) self.assertIn('''epoch 1:''' , UpperCAmelCase_ ) def __snake_case ( self ): lowerCAmelCase = F""" examples/by_feature/checkpointing.py --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )} """.split() lowerCAmelCase = run_command(self._launch_args + testargs , return_stdout=UpperCAmelCase_ ) if torch.cuda.is_available(): lowerCAmelCase = torch.cuda.device_count() else: lowerCAmelCase = 1 if num_processes > 1: self.assertNotIn('''epoch 0:''' , UpperCAmelCase_ ) self.assertIn('''epoch 1:''' , UpperCAmelCase_ ) else: self.assertIn('''epoch 0:''' , UpperCAmelCase_ ) self.assertIn('''epoch 1:''' , UpperCAmelCase_ ) @slow def __snake_case ( self ): lowerCAmelCase = ''' examples/by_feature/cross_validation.py --num_folds 2 '''.split() with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ): lowerCAmelCase = run_command(self._launch_args + testargs , return_stdout=UpperCAmelCase_ ) lowerCAmelCase = re.findall('''({.+})''' , UpperCAmelCase_ ) lowerCAmelCase = [r for r in results if '''accuracy''' in r][-1] lowerCAmelCase = ast.literal_eval(UpperCAmelCase_ ) self.assertGreaterEqual(results['''accuracy'''] , 0.75 ) def __snake_case ( self ): lowerCAmelCase = ['''examples/by_feature/multi_process_metrics.py'''] run_command(self._launch_args + testargs ) @require_trackers @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def __snake_case ( self ): with tempfile.TemporaryDirectory() as tmpdir: lowerCAmelCase = F""" examples/by_feature/tracking.py --with_tracking --project_dir {tmpdir} """.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase_ , '''tracking''' ) ) ) def __snake_case ( self ): lowerCAmelCase = ['''examples/by_feature/gradient_accumulation.py'''] run_command(self._launch_args + testargs ) def __snake_case ( self ): lowerCAmelCase = ['''examples/by_feature/local_sgd.py'''] run_command(self._launch_args + testargs )
705
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase_ ={ """configuration_jukebox""": [ """JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """JukeboxConfig""", """JukeboxPriorConfig""", """JukeboxVQVAEConfig""", ], """tokenization_jukebox""": ["""JukeboxTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ =[ """JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST""", """JukeboxModel""", """JukeboxPreTrainedModel""", """JukeboxVQVAE""", """JukeboxPrior""", ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys UpperCAmelCase_ =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
33
0
def UpperCAmelCase ( _snake_case ): if not all(char in '''01''' for char in bin_string ): raise ValueError('''Non-binary value was passed to the function''' ) if not bin_string: raise ValueError('''Empty string was passed to the function''' ) lowerCAmelCase = "" while len(lowercase_ ) % 3 != 0: lowerCAmelCase = "0" + bin_string lowerCAmelCase = [ bin_string[index : index + 3] for index in range(len(lowercase_ ) ) if index % 3 == 0 ] for bin_group in bin_string_in_3_list: lowerCAmelCase = 0 for index, val in enumerate(lowercase_ ): oct_val += int(2 ** (2 - index) * int(lowercase_ ) ) oct_string += str(lowercase_ ) return oct_string if __name__ == "__main__": from doctest import testmod testmod()
706
import json import os import pickle import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers import is_faiss_available from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bart.tokenization_bart import BartTokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch if is_faiss_available(): import faiss @require_faiss class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' def __snake_case ( self ): lowerCAmelCase = tempfile.mkdtemp() lowerCAmelCase = 8 # DPR tok lowerCAmelCase = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] lowerCAmelCase = os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ ) lowerCAmelCase = os.path.join(UpperCAmelCase_ , DPR_VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) # BART tok lowerCAmelCase = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] lowerCAmelCase = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) ) lowerCAmelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] lowerCAmelCase = {'''unk_token''': '''<unk>'''} lowerCAmelCase = os.path.join(self.tmpdirname , '''bart_tokenizer''' ) os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ ) lowerCAmelCase = os.path.join(UpperCAmelCase_ , BART_VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCAmelCase = os.path.join(UpperCAmelCase_ , BART_VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(UpperCAmelCase_ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(UpperCAmelCase_ ) ) def __snake_case ( self ): return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) ) def __snake_case ( self ): return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) ) def __snake_case ( self ): return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) ) def __snake_case ( self ): shutil.rmtree(self.tmpdirname ) def __snake_case ( self ): lowerCAmelCase = Dataset.from_dict( { '''id''': ['''0''', '''1'''], '''text''': ['''foo''', '''bar'''], '''title''': ['''Foo''', '''Bar'''], '''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )], } ) dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT ) return dataset def __snake_case ( self ): lowerCAmelCase = self.get_dummy_dataset() lowerCAmelCase = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , ) with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset: lowerCAmelCase = dataset lowerCAmelCase = RagRetriever( UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) return retriever def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = self.get_dummy_dataset() lowerCAmelCase = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , ) if from_disk: lowerCAmelCase = os.path.join(self.tmpdirname , '''dataset''' ) lowerCAmelCase = os.path.join(self.tmpdirname , '''index.faiss''' ) dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) ) dataset.drop_index('''embeddings''' ) dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) ) del dataset lowerCAmelCase = RagRetriever( UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) else: lowerCAmelCase = RagRetriever( UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , UpperCAmelCase_ ) , ) return retriever def __snake_case ( self ): lowerCAmelCase = Dataset.from_dict( { '''id''': ['''0''', '''1'''], '''text''': ['''foo''', '''bar'''], '''title''': ['''Foo''', '''Bar'''], '''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )], } ) dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT ) lowerCAmelCase = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' ) dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' ) pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) ) lowerCAmelCase = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' ) lowerCAmelCase = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset} pickle.dump(UpperCAmelCase_ , open(UpperCAmelCase_ , '''wb''' ) ) lowerCAmelCase = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , ) lowerCAmelCase = RagRetriever( UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() ) return retriever def __snake_case ( self ): lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_canonical_hf_index_retriever() lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(UpperCAmelCase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , UpperCAmelCase_ ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def __snake_case ( self ): lowerCAmelCase = self.get_dummy_canonical_hf_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset: lowerCAmelCase = self.get_dummy_dataset() retriever.save_pretrained(UpperCAmelCase_ ) lowerCAmelCase = RagRetriever.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=1 ) self.assertTrue(out is not None ) def __snake_case ( self ): lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(UpperCAmelCase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , UpperCAmelCase_ ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def __snake_case ( self ): lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(UpperCAmelCase_ ) lowerCAmelCase = RagRetriever.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=1 ) self.assertTrue(out is not None ) def __snake_case ( self ): lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(UpperCAmelCase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , UpperCAmelCase_ ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def __snake_case ( self ): lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(UpperCAmelCase_ ) lowerCAmelCase = RagRetriever.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=1 ) self.assertTrue(out is not None ) def __snake_case ( self ): lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_legacy_index_retriever() lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(UpperCAmelCase_ ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''text'''] ) , UpperCAmelCase_ ) self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def __snake_case ( self ): lowerCAmelCase = self.get_dummy_legacy_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(UpperCAmelCase_ ) lowerCAmelCase = RagRetriever.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=1 ) self.assertTrue(out is not None ) @require_torch @require_tokenizers @require_sentencepiece def __snake_case ( self ): import torch lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_canonical_hf_index_retriever() lowerCAmelCase = [[5, 7], [10, 11]] lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever(UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_ ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = ( out['''context_input_ids'''], out['''context_attention_mask'''], out['''retrieved_doc_embeds'''], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , np.ndarray ) lowerCAmelCase = retriever( UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_ , return_tensors='''pt''' , ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = ( # noqa: F841 out['''context_input_ids'''], out['''context_attention_mask'''], out['''retrieved_doc_embeds'''], out['''doc_ids'''], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(UpperCAmelCase_ , torch.Tensor ) self.assertIsInstance(UpperCAmelCase_ , torch.Tensor ) self.assertIsInstance(UpperCAmelCase_ , torch.Tensor ) @require_torch @require_tokenizers @require_sentencepiece def __snake_case ( self ): lowerCAmelCase = self.get_dpr_ctx_encoder_tokenizer() lowerCAmelCase = 1 lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ ) retriever.set_ctx_encoder_tokenizer(UpperCAmelCase_ ) lowerCAmelCase = [[5, 7], [10, 11]] lowerCAmelCase = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) lowerCAmelCase = retriever(UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_ ) self.assertEqual( len(UpperCAmelCase_ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs self.assertEqual( all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , UpperCAmelCase_ ) # check for doc token related keys in dictionary.
33
0
import copy import os from typing import TYPE_CHECKING, List, Union if TYPE_CHECKING: pass from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ =logging.get_logger(__name__) UpperCAmelCase_ ={ """kakaobrain/align-base""": """https://huggingface.co/kakaobrain/align-base/resolve/main/config.json""", } class __UpperCamelCase ( UpperCAmelCase_ ): '''simple docstring''' __a : List[str] ="align_text_model" def __init__( self , UpperCAmelCase_=3_05_22 , UpperCAmelCase_=7_68 , UpperCAmelCase_=12 , UpperCAmelCase_=12 , UpperCAmelCase_=30_72 , UpperCAmelCase_="gelu" , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=5_12 , UpperCAmelCase_=2 , UpperCAmelCase_=0.02 , UpperCAmelCase_=1E-1_2 , UpperCAmelCase_=0 , UpperCAmelCase_="absolute" , UpperCAmelCase_=True , **UpperCAmelCase_ , ): super().__init__(**_snake_case ) lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = hidden_act lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = max_position_embeddings lowerCAmelCase = type_vocab_size lowerCAmelCase = initializer_range lowerCAmelCase = layer_norm_eps lowerCAmelCase = position_embedding_type lowerCAmelCase = use_cache lowerCAmelCase = pad_token_id @classmethod def __snake_case ( cls , UpperCAmelCase_ , **UpperCAmelCase_ ): cls._set_token_in_kwargs(_snake_case ) lowerCAmelCase , lowerCAmelCase = cls.get_config_dict(_snake_case , **_snake_case ) # get the text config dict if we are loading from AlignConfig if config_dict.get('''model_type''' ) == "align": lowerCAmelCase = config_dict['''text_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(_snake_case , **_snake_case ) class __UpperCamelCase ( UpperCAmelCase_ ): '''simple docstring''' __a : str ="align_vision_model" def __init__( self , UpperCAmelCase_ = 3 , UpperCAmelCase_ = 6_00 , UpperCAmelCase_ = 2.0 , UpperCAmelCase_ = 3.1 , UpperCAmelCase_ = 8 , UpperCAmelCase_ = [3, 3, 5, 3, 5, 5, 3] , UpperCAmelCase_ = [32, 16, 24, 40, 80, 1_12, 1_92] , UpperCAmelCase_ = [16, 24, 40, 80, 1_12, 1_92, 3_20] , UpperCAmelCase_ = [] , UpperCAmelCase_ = [1, 2, 2, 2, 1, 2, 1] , UpperCAmelCase_ = [1, 2, 2, 3, 3, 4, 1] , UpperCAmelCase_ = [1, 6, 6, 6, 6, 6, 6] , UpperCAmelCase_ = 0.25 , UpperCAmelCase_ = "swish" , UpperCAmelCase_ = 25_60 , UpperCAmelCase_ = "mean" , UpperCAmelCase_ = 0.02 , UpperCAmelCase_ = 0.001 , UpperCAmelCase_ = 0.99 , UpperCAmelCase_ = 0.2 , **UpperCAmelCase_ , ): super().__init__(**_snake_case ) lowerCAmelCase = num_channels lowerCAmelCase = image_size lowerCAmelCase = width_coefficient lowerCAmelCase = depth_coefficient lowerCAmelCase = depth_divisor lowerCAmelCase = kernel_sizes lowerCAmelCase = in_channels lowerCAmelCase = out_channels lowerCAmelCase = depthwise_padding lowerCAmelCase = strides lowerCAmelCase = num_block_repeats lowerCAmelCase = expand_ratios lowerCAmelCase = squeeze_expansion_ratio lowerCAmelCase = hidden_act lowerCAmelCase = hidden_dim lowerCAmelCase = pooling_type lowerCAmelCase = initializer_range lowerCAmelCase = batch_norm_eps lowerCAmelCase = batch_norm_momentum lowerCAmelCase = drop_connect_rate lowerCAmelCase = sum(_snake_case ) * 4 @classmethod def __snake_case ( cls , UpperCAmelCase_ , **UpperCAmelCase_ ): cls._set_token_in_kwargs(_snake_case ) lowerCAmelCase , lowerCAmelCase = cls.get_config_dict(_snake_case , **_snake_case ) # get the vision config dict if we are loading from AlignConfig if config_dict.get('''model_type''' ) == "align": lowerCAmelCase = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(_snake_case , **_snake_case ) class __UpperCamelCase ( UpperCAmelCase_ ): '''simple docstring''' __a : Optional[int] ="align" __a : str =True def __init__( self , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=6_40 , UpperCAmelCase_=1.0 , UpperCAmelCase_=0.02 , **UpperCAmelCase_ , ): super().__init__(**_snake_case ) if text_config is None: lowerCAmelCase = {} logger.info('''text_config is None. Initializing the AlignTextConfig with default values.''' ) if vision_config is None: lowerCAmelCase = {} logger.info('''vision_config is None. Initializing the AlignVisionConfig with default values.''' ) lowerCAmelCase = AlignTextConfig(**_snake_case ) lowerCAmelCase = AlignVisionConfig(**_snake_case ) lowerCAmelCase = projection_dim lowerCAmelCase = temperature_init_value lowerCAmelCase = initializer_range @classmethod def __snake_case ( cls , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ): return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_snake_case ) def __snake_case ( self ): lowerCAmelCase = copy.deepcopy(self.__dict__ ) lowerCAmelCase = self.text_config.to_dict() lowerCAmelCase = self.vision_config.to_dict() lowerCAmelCase = self.__class__.model_type return output
707
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ =logging.get_logger(__name__) UpperCAmelCase_ ={ """google/switch-base-8""": """https://huggingface.co/google/switch-base-8/blob/main/config.json""", } class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' __a : Any ="""switch_transformers""" __a : Union[str, Any] =["""past_key_values"""] __a : Dict ={"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""} def __init__( self , UpperCAmelCase_=3_21_28 , UpperCAmelCase_=7_68 , UpperCAmelCase_=64 , UpperCAmelCase_=20_48 , UpperCAmelCase_=64 , UpperCAmelCase_=12 , UpperCAmelCase_=3 , UpperCAmelCase_=12 , UpperCAmelCase_=3 , UpperCAmelCase_=12 , UpperCAmelCase_=8 , UpperCAmelCase_=False , UpperCAmelCase_=0.01 , UpperCAmelCase_="float32" , UpperCAmelCase_=False , UpperCAmelCase_=32 , UpperCAmelCase_=1_28 , UpperCAmelCase_=0.1 , UpperCAmelCase_=1E-6 , UpperCAmelCase_=0.001 , UpperCAmelCase_=0.001 , UpperCAmelCase_=1.0 , UpperCAmelCase_="relu" , UpperCAmelCase_=True , UpperCAmelCase_=False , UpperCAmelCase_=True , UpperCAmelCase_=0 , UpperCAmelCase_=1 , **UpperCAmelCase_ , ): lowerCAmelCase = vocab_size lowerCAmelCase = d_model lowerCAmelCase = d_kv lowerCAmelCase = d_ff lowerCAmelCase = num_sparse_encoder_layers lowerCAmelCase = num_layers lowerCAmelCase = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry lowerCAmelCase = num_sparse_decoder_layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_encoder_layers > 0: lowerCAmelCase = self.num_layers // self.num_sparse_encoder_layers else: lowerCAmelCase = self.num_layers # HACK: this will create 0 sparse layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_decoder_layers > 0: lowerCAmelCase = self.num_decoder_layers // self.num_sparse_decoder_layers else: lowerCAmelCase = self.num_decoder_layers # HACK: this will create 0 sparse layers lowerCAmelCase = num_heads lowerCAmelCase = num_experts lowerCAmelCase = expert_capacity lowerCAmelCase = router_bias lowerCAmelCase = router_jitter_noise if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(F"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" ) lowerCAmelCase = router_dtype lowerCAmelCase = router_ignore_padding_tokens lowerCAmelCase = relative_attention_num_buckets lowerCAmelCase = relative_attention_max_distance lowerCAmelCase = dropout_rate lowerCAmelCase = layer_norm_epsilon lowerCAmelCase = initializer_factor lowerCAmelCase = feed_forward_proj lowerCAmelCase = use_cache lowerCAmelCase = add_router_probs lowerCAmelCase = router_z_loss_coef lowerCAmelCase = router_aux_loss_coef lowerCAmelCase = self.feed_forward_proj.split('''-''' ) lowerCAmelCase = act_info[-1] lowerCAmelCase = act_info[0] == '''gated''' if len(UpperCAmelCase_ ) > 1 and act_info[0] != "gated" or len(UpperCAmelCase_ ) > 2: raise ValueError( F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.""" '''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ''' '''\'gated-gelu\' or \'relu\'''' ) # for backwards compatibility if feed_forward_proj == "gated-gelu": lowerCAmelCase = '''gelu_new''' super().__init__( pad_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , **UpperCAmelCase_ , )
33
0
import numpy # List of input, output pairs UpperCAmelCase_ =( ((5, 2, 3), 15), ((6, 5, 9), 25), ((11, 12, 13), 41), ((1, 1, 1), 8), ((11, 12, 13), 41), ) UpperCAmelCase_ =(((515, 22, 13), 555), ((61, 35, 49), 150)) UpperCAmelCase_ =[2, 4, 1, 5] UpperCAmelCase_ =len(train_data) UpperCAmelCase_ =0.0_09 def UpperCAmelCase ( _snake_case , _snake_case="train" ): return calculate_hypothesis_value(_snake_case , _snake_case ) - output( _snake_case , _snake_case ) def UpperCAmelCase ( _snake_case ): lowerCAmelCase = 0 for i in range(len(_snake_case ) - 1 ): hyp_val += data_input_tuple[i] * parameter_vector[i + 1] hyp_val += parameter_vector[0] return hyp_val def UpperCAmelCase ( _snake_case , _snake_case ): if data_set == "train": return train_data[example_no][1] elif data_set == "test": return test_data[example_no][1] return None def UpperCAmelCase ( _snake_case , _snake_case ): if data_set == "train": return _hypothesis_value(train_data[example_no][0] ) elif data_set == "test": return _hypothesis_value(test_data[example_no][0] ) return None def UpperCAmelCase ( _snake_case , _snake_case=m ): lowerCAmelCase = 0 for i in range(_snake_case ): if index == -1: summation_value += _error(_snake_case ) else: summation_value += _error(_snake_case ) * train_data[i][0][index] return summation_value def UpperCAmelCase ( _snake_case ): lowerCAmelCase = summation_of_cost_derivative(_snake_case , _snake_case ) / m return cost_derivative_value def UpperCAmelCase ( ): global parameter_vector # Tune these values to set a tolerance value for predicted output lowerCAmelCase = 0.000002 lowerCAmelCase = 0 lowerCAmelCase = 0 while True: j += 1 lowerCAmelCase = [0, 0, 0, 0] for i in range(0 , len(_snake_case ) ): lowerCAmelCase = get_cost_derivative(i - 1 ) lowerCAmelCase = ( parameter_vector[i] - LEARNING_RATE * cost_derivative ) if numpy.allclose( _snake_case , _snake_case , atol=_snake_case , rtol=_snake_case , ): break lowerCAmelCase = temp_parameter_vector print(('''Number of iterations:''', j) ) def UpperCAmelCase ( ): for i in range(len(_snake_case ) ): print(('''Actual output value:''', output(_snake_case , '''test''' )) ) print(('''Hypothesis output:''', calculate_hypothesis_value(_snake_case , '''test''' )) ) if __name__ == "__main__": run_gradient_descent() print("""\nTesting gradient descent for a linear hypothesis function.\n""") test_gradient_descent()
708
from __future__ import annotations import math from collections import Counter from string import ascii_lowercase def UpperCAmelCase ( _snake_case ): lowerCAmelCase , lowerCAmelCase = analyze_text(_snake_case ) lowerCAmelCase = list(''' ''' + ascii_lowercase ) # what is our total sum of probabilities. lowerCAmelCase = sum(single_char_strings.values() ) # one length string lowerCAmelCase = 0 # for each alpha we go in our dict and if it is in it we calculate entropy for ch in my_alphas: if ch in single_char_strings: lowerCAmelCase = single_char_strings[ch] lowerCAmelCase = my_str / all_sum my_fir_sum += prob * math.loga(_snake_case ) # entropy formula. # print entropy print(F"""{round(-1 * my_fir_sum ):.1f}""" ) # two len string lowerCAmelCase = sum(two_char_strings.values() ) lowerCAmelCase = 0 # for each alpha (two in size) calculate entropy. for cha in my_alphas: for cha in my_alphas: lowerCAmelCase = cha + cha if sequence in two_char_strings: lowerCAmelCase = two_char_strings[sequence] lowerCAmelCase = int(_snake_case ) / all_sum my_sec_sum += prob * math.loga(_snake_case ) # print second entropy print(F"""{round(-1 * my_sec_sum ):.1f}""" ) # print the difference between them print(F"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" ) def UpperCAmelCase ( _snake_case ): lowerCAmelCase = Counter() # type: ignore lowerCAmelCase = Counter() # type: ignore single_char_strings[text[-1]] += 1 # first case when we have space at start. two_char_strings[" " + text[0]] += 1 for i in range(0 , len(_snake_case ) - 1 ): single_char_strings[text[i]] += 1 two_char_strings[text[i : i + 2]] += 1 return single_char_strings, two_char_strings def UpperCAmelCase ( ): import doctest doctest.testmod() # text = ( # "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark " # "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest " # "jointure saw horrible. He private he on be imagine suppose. Fertile " # "beloved evident through no service elderly is. Blind there if every no so " # "at. Own neglected you preferred way sincerity delivered his attempted. To " # "of message cottage windows do besides against uncivil. Delightful " # "unreserved impossible few estimating men favourable see entreaties. She " # "propriety immediate was improving. He or entrance humoured likewise " # "moderate. Much nor game son say feel. Fat make met can must form into " # "gate. Me we offending prevailed discovery. " # ) # calculate_prob(text) if __name__ == "__main__": main()
33
0
from __future__ import annotations class __UpperCamelCase : '''simple docstring''' def __init__( self , UpperCAmelCase_ ): lowerCAmelCase = order # a_{0} ... a_{k} lowerCAmelCase = [1.0] + [0.0] * order # b_{0} ... b_{k} lowerCAmelCase = [1.0] + [0.0] * order # x[n-1] ... x[n-k] lowerCAmelCase = [0.0] * self.order # y[n-1] ... y[n-k] lowerCAmelCase = [0.0] * self.order def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ): if len(_a ) < self.order: lowerCAmelCase = [1.0, *a_coeffs] if len(_a ) != self.order + 1: lowerCAmelCase = ( F"""Expected a_coeffs to have {self.order + 1} elements """ F"""for {self.order}-order filter, got {len(_a )}""" ) raise ValueError(_a ) if len(_a ) != self.order + 1: lowerCAmelCase = ( F"""Expected b_coeffs to have {self.order + 1} elements """ F"""for {self.order}-order filter, got {len(_a )}""" ) raise ValueError(_a ) lowerCAmelCase = a_coeffs lowerCAmelCase = b_coeffs def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = 0.0 # Start at index 1 and do index 0 at the end. for i in range(1 , self.order + 1 ): result += ( self.b_coeffs[i] * self.input_history[i - 1] - self.a_coeffs[i] * self.output_history[i - 1] ) lowerCAmelCase = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0] lowerCAmelCase = self.input_history[:-1] lowerCAmelCase = self.output_history[:-1] lowerCAmelCase = sample lowerCAmelCase = result return result
709
import random import unittest import torch from diffusers import IFInpaintingSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class __UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): '''simple docstring''' __a : Tuple =IFInpaintingSuperResolutionPipeline __a : Dict =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""} __a : int =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""} ) __a : Union[str, Any] =PipelineTesterMixin.required_optional_params - {"""latents"""} def __snake_case ( self ): return self._get_superresolution_dummy_components() def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_=0 ): if str(UpperCAmelCase_ ).startswith('''mps''' ): lowerCAmelCase = torch.manual_seed(UpperCAmelCase_ ) else: lowerCAmelCase = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ ) lowerCAmelCase = floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ ) lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ ) lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ ) lowerCAmelCase = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''original_image''': original_image, '''mask_image''': mask_image, '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def __snake_case ( self ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def __snake_case ( self ): self._test_save_load_optional_components() @unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' ) def __snake_case ( self ): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def __snake_case ( self ): self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def __snake_case ( self ): self._test_save_load_local() def __snake_case ( self ): self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
33
0
import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import ( AutoProcessor, BertTokenizerFast, BlipImageProcessor, GPTaTokenizer, InstructBlipProcessor, PreTrainedTokenizerFast, ) @require_vision class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __snake_case ( self ): lowerCAmelCase = tempfile.mkdtemp() lowerCAmelCase = BlipImageProcessor() lowerCAmelCase = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' ) lowerCAmelCase = BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) lowerCAmelCase = InstructBlipProcessor(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) processor.save_pretrained(self.tmpdirname ) def __snake_case ( self , **UpperCAmelCase_ ): return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ).tokenizer def __snake_case ( self , **UpperCAmelCase_ ): return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ).image_processor def __snake_case ( self , **UpperCAmelCase_ ): return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ).qformer_tokenizer def __snake_case ( self ): shutil.rmtree(self.tmpdirname ) def __snake_case ( self ): lowerCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] lowerCAmelCase = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def __snake_case ( self ): lowerCAmelCase = InstructBlipProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , ) processor.save_pretrained(self.tmpdirname ) lowerCAmelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) lowerCAmelCase = self.get_image_processor(do_normalize=UpperCamelCase__ , padding_value=1.0 ) lowerCAmelCase = InstructBlipProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCamelCase__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , UpperCamelCase__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCamelCase__ ) self.assertIsInstance(processor.qformer_tokenizer , UpperCamelCase__ ) def __snake_case ( self ): lowerCAmelCase = self.get_image_processor() lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = self.get_qformer_tokenizer() lowerCAmelCase = InstructBlipProcessor( tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ ) lowerCAmelCase = self.prepare_image_inputs() lowerCAmelCase = image_processor(UpperCamelCase__ , return_tensors='''np''' ) lowerCAmelCase = processor(images=UpperCamelCase__ , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __snake_case ( self ): lowerCAmelCase = self.get_image_processor() lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = self.get_qformer_tokenizer() lowerCAmelCase = InstructBlipProcessor( tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ ) lowerCAmelCase = '''lower newer''' lowerCAmelCase = processor(text=UpperCamelCase__ ) lowerCAmelCase = tokenizer(UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ ) lowerCAmelCase = qformer_tokenizer(UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ ) for key in encoded_tokens.keys(): self.assertListEqual(encoded_tokens[key] , encoded_processor[key] ) for key in encoded_tokens_qformer.keys(): self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['''qformer_''' + key] ) def __snake_case ( self ): lowerCAmelCase = self.get_image_processor() lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = self.get_qformer_tokenizer() lowerCAmelCase = InstructBlipProcessor( tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ ) lowerCAmelCase = '''lower newer''' lowerCAmelCase = self.prepare_image_inputs() lowerCAmelCase = processor(text=UpperCamelCase__ , images=UpperCamelCase__ ) self.assertListEqual( list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , ) # test if it raises when no input is passed with pytest.raises(UpperCamelCase__ ): processor() def __snake_case ( self ): lowerCAmelCase = self.get_image_processor() lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = self.get_qformer_tokenizer() lowerCAmelCase = InstructBlipProcessor( tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ ) lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowerCAmelCase = processor.batch_decode(UpperCamelCase__ ) lowerCAmelCase = tokenizer.batch_decode(UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) def __snake_case ( self ): lowerCAmelCase = self.get_image_processor() lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = self.get_qformer_tokenizer() lowerCAmelCase = InstructBlipProcessor( tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ ) lowerCAmelCase = '''lower newer''' lowerCAmelCase = self.prepare_image_inputs() lowerCAmelCase = processor(text=UpperCamelCase__ , images=UpperCamelCase__ ) self.assertListEqual( list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
710
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) UpperCAmelCase_ ={ """configuration_efficientformer""": [ """EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """EfficientFormerConfig""", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ =["""EfficientFormerImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ =[ """EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """EfficientFormerForImageClassification""", """EfficientFormerForImageClassificationWithTeacher""", """EfficientFormerModel""", """EfficientFormerPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ =[ """TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFEfficientFormerForImageClassification""", """TFEfficientFormerForImageClassificationWithTeacher""", """TFEfficientFormerModel""", """TFEfficientFormerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientformer import EfficientFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientformer import ( EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientFormerForImageClassification, EfficientFormerForImageClassificationWithTeacher, EfficientFormerModel, EfficientFormerPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, TFEfficientFormerPreTrainedModel, ) else: import sys UpperCAmelCase_ =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
33
0
'''simple docstring''' from pickle import UnpicklingError import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict from ..utils import logging UpperCAmelCase_ =logging.get_logger(__name__) def UpperCAmelCase ( _snake_case , _snake_case ): try: with open(lowerCamelCase_ , '''rb''' ) as flax_state_f: lowerCAmelCase = from_bytes(lowerCamelCase_ , flax_state_f.read() ) except UnpicklingError as e: try: with open(lowerCamelCase_ ) as f: if f.read().startswith('''version''' ): raise OSError( '''You seem to have cloned a repository without having git-lfs installed. Please''' ''' install git-lfs and run `git lfs install` followed by `git lfs pull` in the''' ''' folder you cloned.''' ) else: raise ValueError from e except (UnicodeDecodeError, ValueError): raise EnvironmentError(F"""Unable to convert {model_file} to Flax deserializable object. """ ) return load_flax_weights_in_pytorch_model(lowerCamelCase_ , lowerCamelCase_ ) def UpperCAmelCase ( _snake_case , _snake_case ): try: import torch # noqa: F401 except ImportError: logger.error( '''Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise # check if we have bf16 weights lowerCAmelCase = flatten_dict(jax.tree_util.tree_map(lambda _snake_case : x.dtype == jnp.bfloataa , lowerCamelCase_ ) ).values() if any(lowerCamelCase_ ): # convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( '''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` ''' '''before loading those in PyTorch model.''' ) lowerCAmelCase = jax.tree_util.tree_map( lambda _snake_case : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , lowerCamelCase_ ) lowerCAmelCase = '''''' lowerCAmelCase = flatten_dict(lowerCamelCase_ , sep='''.''' ) lowerCAmelCase = pt_model.state_dict() # keep track of unexpected & missing keys lowerCAmelCase = [] lowerCAmelCase = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): lowerCAmelCase = flax_key_tuple.split('''.''' ) if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4: lowerCAmelCase = flax_key_tuple_array[:-1] + ['''weight'''] lowerCAmelCase = jnp.transpose(lowerCamelCase_ , (3, 2, 0, 1) ) elif flax_key_tuple_array[-1] == "kernel": lowerCAmelCase = flax_key_tuple_array[:-1] + ['''weight'''] lowerCAmelCase = flax_tensor.T elif flax_key_tuple_array[-1] == "scale": lowerCAmelCase = flax_key_tuple_array[:-1] + ['''weight'''] if "time_embedding" not in flax_key_tuple_array: for i, flax_key_tuple_string in enumerate(lowerCamelCase_ ): lowerCAmelCase = ( flax_key_tuple_string.replace('''_0''' , '''.0''' ) .replace('''_1''' , '''.1''' ) .replace('''_2''' , '''.2''' ) .replace('''_3''' , '''.3''' ) .replace('''_4''' , '''.4''' ) .replace('''_5''' , '''.5''' ) .replace('''_6''' , '''.6''' ) .replace('''_7''' , '''.7''' ) .replace('''_8''' , '''.8''' ) .replace('''_9''' , '''.9''' ) ) lowerCAmelCase = '''.'''.join(lowerCamelCase_ ) if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( F"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """ F"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" ) else: # add weight to pytorch dict lowerCAmelCase = np.asarray(lowerCamelCase_ ) if not isinstance(lowerCamelCase_ , np.ndarray ) else flax_tensor lowerCAmelCase = torch.from_numpy(lowerCamelCase_ ) # remove from missing keys missing_keys.remove(lowerCamelCase_ ) else: # weight is not expected by PyTorch model unexpected_keys.append(lowerCamelCase_ ) pt_model.load_state_dict(lowerCamelCase_ ) # re-transform missing_keys to list lowerCAmelCase = list(lowerCamelCase_ ) if len(lowerCamelCase_ ) > 0: logger.warning( '''Some weights of the Flax model were not used when initializing the PyTorch model''' F""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing""" F""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture""" ''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This''' F""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect""" ''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a''' ''' FlaxBertForSequenceClassification model).''' ) if len(lowerCamelCase_ ) > 0: logger.warning( F"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly""" F""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to""" ''' use it for predictions and inference.''' ) return pt_model
711
import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline UpperCAmelCase_ =datasets.utils.logging.get_logger(__name__) @dataclass class __UpperCamelCase ( datasets.BuilderConfig ): '''simple docstring''' __a : Optional[datasets.Features] =None __a : str ="utf-8" __a : Optional[str] =None __a : Optional[str] =None __a : bool =True # deprecated __a : Optional[int] =None # deprecated __a : int =1_0 << 2_0 # 10MB __a : Optional[bool] =None class __UpperCamelCase ( datasets.ArrowBasedBuilder ): '''simple docstring''' __a : str =JsonConfig def __snake_case ( self ): if self.config.block_size is not None: logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' ) lowerCAmelCase = self.config.block_size if self.config.use_threads is not True: logger.warning( '''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' ) if self.config.newlines_in_values is not None: raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' ) return datasets.DatasetInfo(features=self.config.features ) def __snake_case ( self , UpperCAmelCase_ ): if not self.config.data_files: raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) lowerCAmelCase = dl_manager.download_and_extract(self.config.data_files ) if isinstance(UpperCAmelCase_ , (str, list, tuple) ): lowerCAmelCase = data_files if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): lowerCAmelCase = [files] lowerCAmelCase = [dl_manager.iter_files(UpperCAmelCase_ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] lowerCAmelCase = [] for split_name, files in data_files.items(): if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): lowerCAmelCase = [files] lowerCAmelCase = [dl_manager.iter_files(UpperCAmelCase_ ) for file in files] splits.append(datasets.SplitGenerator(name=UpperCAmelCase_ , gen_kwargs={'''files''': files} ) ) return splits def __snake_case ( self , UpperCAmelCase_ ): if self.config.features is not None: # adding missing columns for column_name in set(self.config.features ) - set(pa_table.column_names ): lowerCAmelCase = self.config.features.arrow_schema.field(UpperCAmelCase_ ).type lowerCAmelCase = pa_table.append_column(UpperCAmelCase_ , pa.array([None] * len(UpperCAmelCase_ ) , type=UpperCAmelCase_ ) ) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example lowerCAmelCase = table_cast(UpperCAmelCase_ , self.config.features.arrow_schema ) return pa_table def __snake_case ( self , UpperCAmelCase_ ): for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase_ ) ): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(UpperCAmelCase_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: lowerCAmelCase = json.load(UpperCAmelCase_ ) # We keep only the field we are interested in lowerCAmelCase = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(UpperCAmelCase_ , (list, tuple) ): lowerCAmelCase = set().union(*[row.keys() for row in dataset] ) lowerCAmelCase = {col: [row.get(UpperCAmelCase_ ) for row in dataset] for col in keys} else: lowerCAmelCase = dataset lowerCAmelCase = pa.Table.from_pydict(UpperCAmelCase_ ) yield file_idx, self._cast_table(UpperCAmelCase_ ) # If the file has one json object per line else: with open(UpperCAmelCase_ , '''rb''' ) as f: lowerCAmelCase = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small lowerCAmelCase = max(self.config.chunksize // 32 , 16 << 10 ) lowerCAmelCase = ( self.config.encoding_errors if self.config.encoding_errors is not None else '''strict''' ) while True: lowerCAmelCase = f.read(self.config.chunksize ) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(UpperCAmelCase_ ) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": lowerCAmelCase = batch.decode(self.config.encoding , errors=UpperCAmelCase_ ).encode('''utf-8''' ) try: while True: try: lowerCAmelCase = paj.read_json( io.BytesIO(UpperCAmelCase_ ) , read_options=paj.ReadOptions(block_size=UpperCAmelCase_ ) ) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(UpperCAmelCase_ , pa.ArrowInvalid ) and "straddling" not in str(UpperCAmelCase_ ) or block_size > len(UpperCAmelCase_ ) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( F"""Batch of {len(UpperCAmelCase_ )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" ) block_size *= 2 except pa.ArrowInvalid as e: try: with open( UpperCAmelCase_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: lowerCAmelCase = json.load(UpperCAmelCase_ ) except json.JSONDecodeError: logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase_ )}: {e}""" ) raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): # list is the only sequence type supported in JSON try: lowerCAmelCase = set().union(*[row.keys() for row in dataset] ) lowerCAmelCase = {col: [row.get(UpperCAmelCase_ ) for row in dataset] for col in keys} lowerCAmelCase = pa.Table.from_pydict(UpperCAmelCase_ ) except (pa.ArrowInvalid, AttributeError) as e: logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase_ )}: {e}""" ) raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None yield file_idx, self._cast_table(UpperCAmelCase_ ) break else: logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase_ )}: {e}""" ) raise ValueError( F"""Not able to read records in the JSON file at {file}. """ F"""You should probably indicate the field of the JSON file containing your records. """ F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """ F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(UpperCAmelCase_ ) batch_idx += 1
33
0
def UpperCAmelCase ( _snake_case = 1000 ): lowerCAmelCase = 3 lowerCAmelCase = 0 while a < n: if a % 3 == 0 or a % 5 == 0: result += a elif a % 15 == 0: result -= a a += 1 return result if __name__ == "__main__": print(F'''{solution() = }''')
712
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices UpperCAmelCase_ =logging.get_logger(__name__) class __UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __a : Optional[Any] ="""maskformer-swin""" __a : Optional[int] ={ """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self , UpperCAmelCase_=2_24 , UpperCAmelCase_=4 , UpperCAmelCase_=3 , UpperCAmelCase_=96 , UpperCAmelCase_=[2, 2, 6, 2] , UpperCAmelCase_=[3, 6, 12, 24] , UpperCAmelCase_=7 , UpperCAmelCase_=4.0 , UpperCAmelCase_=True , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.1 , UpperCAmelCase_="gelu" , UpperCAmelCase_=False , UpperCAmelCase_=0.02 , UpperCAmelCase_=1E-5 , UpperCAmelCase_=None , UpperCAmelCase_=None , **UpperCAmelCase_ , ): super().__init__(**UpperCAmelCase_ ) lowerCAmelCase = image_size lowerCAmelCase = patch_size lowerCAmelCase = num_channels lowerCAmelCase = embed_dim lowerCAmelCase = depths lowerCAmelCase = len(UpperCAmelCase_ ) lowerCAmelCase = num_heads lowerCAmelCase = window_size lowerCAmelCase = mlp_ratio lowerCAmelCase = qkv_bias lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = drop_path_rate lowerCAmelCase = hidden_act lowerCAmelCase = use_absolute_embeddings lowerCAmelCase = layer_norm_eps lowerCAmelCase = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model lowerCAmelCase = int(embed_dim * 2 ** (len(UpperCAmelCase_ ) - 1) ) lowerCAmelCase = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(UpperCAmelCase_ ) + 1 )] lowerCAmelCase , lowerCAmelCase = get_aligned_output_features_output_indices( out_features=UpperCAmelCase_ , out_indices=UpperCAmelCase_ , stage_names=self.stage_names )
33
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available UpperCAmelCase_ ={ """configuration_ernie""": ["""ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ErnieConfig""", """ErnieOnnxConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ =[ """ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST""", """ErnieForCausalLM""", """ErnieForMaskedLM""", """ErnieForMultipleChoice""", """ErnieForNextSentencePrediction""", """ErnieForPreTraining""", """ErnieForQuestionAnswering""", """ErnieForSequenceClassification""", """ErnieForTokenClassification""", """ErnieModel""", """ErniePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ernie import ( ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST, ErnieForCausalLM, ErnieForMaskedLM, ErnieForMultipleChoice, ErnieForNextSentencePrediction, ErnieForPreTraining, ErnieForQuestionAnswering, ErnieForSequenceClassification, ErnieForTokenClassification, ErnieModel, ErniePreTrainedModel, ) else: import sys UpperCAmelCase_ =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
713
from collections.abc import Sequence def UpperCAmelCase ( _snake_case , _snake_case = False ): if not arr: return 0 lowerCAmelCase = 0 if allow_empty_subarrays else float('''-inf''' ) lowerCAmelCase = 0.0 for num in arr: lowerCAmelCase = max(0 if allow_empty_subarrays else num , curr_sum + num ) lowerCAmelCase = max(_snake_case , _snake_case ) return max_sum if __name__ == "__main__": from doctest import testmod testmod() UpperCAmelCase_ =[-2, 1, -3, 4, -1, 2, 1, -5, 4] print(F'''{max_subarray_sum(nums) = }''')
33
0
import inspect import unittest class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __snake_case ( self ): try: import diffusers # noqa: F401 except ImportError: assert False def __snake_case ( self ): import diffusers from diffusers.dependency_versions_table import deps lowerCAmelCase = inspect.getmembers(__A , inspect.isclass ) for cls_name, cls_module in all_classes: if "dummy_" in cls_module.__module__: for backend in cls_module._backends: if backend == "k_diffusion": lowerCAmelCase = '''k-diffusion''' elif backend == "invisible_watermark": lowerCAmelCase = '''invisible-watermark''' assert backend in deps, F"""{backend} is not in the deps table!"""
714
import os import pickle import unittest from transformers import AutoTokenizer from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.models.bert_japanese.tokenization_bert_japanese import ( VOCAB_FILES_NAMES, BertJapaneseTokenizer, CharacterTokenizer, JumanppTokenizer, MecabTokenizer, SudachiTokenizer, WordpieceTokenizer, ) from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi from ...test_tokenization_common import TokenizerTesterMixin @custom_tokenizers class __UpperCamelCase ( __UpperCAmelCase , unittest.TestCase ): '''simple docstring''' __a : Any =BertJapaneseTokenizer __a : Optional[int] =False __a : int =True def __snake_case ( self ): super().setUp() lowerCAmelCase = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは''', '''世界''', '''##世界''', '''、''', '''##、''', '''。''', '''##。''', ] lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = '''こんにちは、世界。 \nこんばんは、世界。''' lowerCAmelCase = '''こんにちは 、 世界 。 こんばんは 、 世界 。''' return input_text, output_text def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase , lowerCAmelCase = self.get_input_output_texts(UpperCAmelCase_ ) lowerCAmelCase = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) lowerCAmelCase = tokenizer.decode(UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ ) return text, ids def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class(self.vocab_file ) lowerCAmelCase = tokenizer.tokenize('''こんにちは、世界。\nこんばんは、世界。''' ) self.assertListEqual(UpperCAmelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''mecab''' ) self.assertIsNotNone(UpperCAmelCase_ ) lowerCAmelCase = '''こんにちは、世界。\nこんばんは、世界。''' lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) lowerCAmelCase = os.path.join(self.tmpdirname , '''tokenizer.bin''' ) with open(UpperCAmelCase_ , '''wb''' ) as handle: pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ ) with open(UpperCAmelCase_ , '''rb''' ) as handle: lowerCAmelCase = pickle.load(UpperCAmelCase_ ) lowerCAmelCase = tokenizer_new.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def __snake_case ( self ): lowerCAmelCase = MecabTokenizer(mecab_dic='''ipadic''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) def __snake_case ( self ): try: lowerCAmelCase = MecabTokenizer(mecab_dic='''unidic_lite''' ) except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) def __snake_case ( self ): try: lowerCAmelCase = MecabTokenizer(mecab_dic='''unidic''' ) except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) def __snake_case ( self ): lowerCAmelCase = MecabTokenizer(do_lower_case=UpperCAmelCase_ , mecab_dic='''ipadic''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iphone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) def __snake_case ( self ): try: lowerCAmelCase = MecabTokenizer( do_lower_case=UpperCAmelCase_ , normalize_text=UpperCAmelCase_ , mecab_option='''-d /usr/local/lib/mecab/dic/jumandic''' ) except RuntimeError: # if dict doesn't exist in the system, previous code raises this error. return self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , ) def __snake_case ( self ): lowerCAmelCase = MecabTokenizer(normalize_text=UpperCAmelCase_ , mecab_dic='''ipadic''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。'''] , ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''sudachi''' ) self.assertIsNotNone(UpperCAmelCase_ ) lowerCAmelCase = '''こんにちは、世界。\nこんばんは、世界。''' lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) lowerCAmelCase = os.path.join(self.tmpdirname , '''tokenizer.bin''' ) with open(UpperCAmelCase_ , '''wb''' ) as handle: pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ ) with open(UpperCAmelCase_ , '''rb''' ) as handle: lowerCAmelCase = pickle.load(UpperCAmelCase_ ) lowerCAmelCase = tokenizer_new.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='''core''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''A''' ) self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国''', '''人''', '''参政''', '''権'''] ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''B''' ) self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人''', '''参政権'''] ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''C''' ) self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人参政権'''] ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(do_lower_case=UpperCAmelCase_ , sudachi_dict_type='''core''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(normalize_text=UpperCAmelCase_ , sudachi_dict_type='''core''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', '''\u3000''', '''。''', ''' ''', ''' '''] , ) @require_sudachi def __snake_case ( self ): lowerCAmelCase = SudachiTokenizer(trim_whitespace=UpperCAmelCase_ , sudachi_dict_type='''core''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''jumanpp''' ) self.assertIsNotNone(UpperCAmelCase_ ) lowerCAmelCase = '''こんにちは、世界。\nこんばんは、世界。''' lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) lowerCAmelCase = os.path.join(self.tmpdirname , '''tokenizer.bin''' ) with open(UpperCAmelCase_ , '''wb''' ) as handle: pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ ) with open(UpperCAmelCase_ , '''rb''' ) as handle: lowerCAmelCase = pickle.load(UpperCAmelCase_ ) lowerCAmelCase = tokenizer_new.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = JumanppTokenizer(do_lower_case=UpperCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = JumanppTokenizer(normalize_text=UpperCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''ア''', '''ッ''', '''フ''', '''゚''', '''ル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = JumanppTokenizer(trim_whitespace=UpperCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''。'''] , ) @require_jumanpp def __snake_case ( self ): lowerCAmelCase = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize('''ありがとうございますm(_ _)m見つけるのが大変です。''' ) , ['''ありがとう''', '''ございます''', '''m(_ _)m''', '''見つける''', '''の''', '''が''', '''大変です''', '''。'''] , ) def __snake_case ( self ): lowerCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは'''] lowerCAmelCase = {} for i, token in enumerate(UpperCAmelCase_ ): lowerCAmelCase = i lowerCAmelCase = WordpieceTokenizer(vocab=UpperCAmelCase_ , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こんにちは'''] ) self.assertListEqual(tokenizer.tokenize('''こんばんは''' ) , ['''こん''', '''##ばんは'''] ) self.assertListEqual(tokenizer.tokenize('''こんばんは こんばんにちは こんにちは''' ) , ['''こん''', '''##ばんは''', '''[UNK]''', '''こんにちは'''] ) def __snake_case ( self ): lowerCAmelCase = BertJapaneseTokenizer.from_pretrained('''nlp-waseda/roberta-base-japanese-with-auto-jumanpp''' ) lowerCAmelCase = tokenizer.subword_tokenizer lowerCAmelCase = subword_tokenizer.tokenize('''国境 の 長い トンネル を 抜ける と 雪国 であった 。''' ) self.assertListEqual(UpperCAmelCase_ , ['''▁国境''', '''▁の''', '''▁長い''', '''▁トンネル''', '''▁を''', '''▁抜ける''', '''▁と''', '''▁雪''', '''国''', '''▁であった''', '''▁。'''] ) lowerCAmelCase = subword_tokenizer.tokenize('''こんばんは こんばん にち は こんにちは''' ) self.assertListEqual(UpperCAmelCase_ , ['''▁こん''', '''ばん''', '''は''', '''▁こん''', '''ばん''', '''▁に''', '''ち''', '''▁は''', '''▁こんにちは'''] ) def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese''' ) lowerCAmelCase = tokenizer.encode('''ありがとう。''' , add_special_tokens=UpperCAmelCase_ ) lowerCAmelCase = tokenizer.encode('''どういたしまして。''' , add_special_tokens=UpperCAmelCase_ ) lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ ) lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_ ) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_a + [3] @custom_tokenizers class __UpperCamelCase ( __UpperCAmelCase , unittest.TestCase ): '''simple docstring''' __a : Union[str, Any] =BertJapaneseTokenizer __a : Optional[int] =False def __snake_case ( self ): super().setUp() lowerCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。'''] lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def __snake_case ( self , **UpperCAmelCase_ ): return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='''character''' , **UpperCAmelCase_ ) def __snake_case ( self , UpperCAmelCase_ ): lowerCAmelCase = '''こんにちは、世界。 \nこんばんは、世界。''' lowerCAmelCase = '''こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。''' return input_text, output_text def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): pass # TODO add if relevant def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='''character''' ) lowerCAmelCase = tokenizer.tokenize('''こんにちは、世界。 \nこんばんは、世界。''' ) self.assertListEqual( UpperCAmelCase_ , ['''こ''', '''ん''', '''に''', '''ち''', '''は''', '''、''', '''世''', '''界''', '''。''', '''こ''', '''ん''', '''ば''', '''ん''', '''は''', '''、''', '''世''', '''界''', '''。'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] ) def __snake_case ( self ): lowerCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。'''] lowerCAmelCase = {} for i, token in enumerate(UpperCAmelCase_ ): lowerCAmelCase = i lowerCAmelCase = CharacterTokenizer(vocab=UpperCAmelCase_ , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''は'''] ) self.assertListEqual(tokenizer.tokenize('''こんにちほ''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''[UNK]'''] ) def __snake_case ( self ): lowerCAmelCase = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese-char''' ) lowerCAmelCase = tokenizer.encode('''ありがとう。''' , add_special_tokens=UpperCAmelCase_ ) lowerCAmelCase = tokenizer.encode('''どういたしまして。''' , add_special_tokens=UpperCAmelCase_ ) lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ ) lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_ ) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_a + [3] @custom_tokenizers class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __snake_case ( self ): lowerCAmelCase = '''cl-tohoku/bert-base-japanese''' lowerCAmelCase = AutoTokenizer.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __snake_case ( self ): lowerCAmelCase = '''cl-tohoku/bert-base-japanese''' with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm: BertTokenizer.from_pretrained(UpperCAmelCase_ ) self.assertTrue( cm.records[0].message.startswith( '''The tokenizer class you load from this checkpoint is not the same type as the class this function''' ''' is called from.''' ) ) lowerCAmelCase = '''bert-base-cased''' with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm: BertJapaneseTokenizer.from_pretrained(UpperCAmelCase_ ) self.assertTrue( cm.records[0].message.startswith( '''The tokenizer class you load from this checkpoint is not the same type as the class this function''' ''' is called from.''' ) )
33
0
import tensorflow as tf from ...tf_utils import shape_list class __UpperCamelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=1 , UpperCAmelCase_=False , **UpperCAmelCase_ ): super().__init__(**__SCREAMING_SNAKE_CASE ) lowerCAmelCase = vocab_size lowerCAmelCase = d_embed lowerCAmelCase = d_proj lowerCAmelCase = cutoffs + [vocab_size] lowerCAmelCase = [0] + self.cutoffs lowerCAmelCase = div_val lowerCAmelCase = self.cutoffs[0] lowerCAmelCase = len(self.cutoffs ) - 1 lowerCAmelCase = self.shortlist_size + self.n_clusters lowerCAmelCase = keep_order lowerCAmelCase = [] lowerCAmelCase = [] def __snake_case ( self , UpperCAmelCase_ ): if self.n_clusters > 0: lowerCAmelCase = self.add_weight( shape=(self.n_clusters, self.d_embed) , initializer='''zeros''' , trainable=__SCREAMING_SNAKE_CASE , name='''cluster_weight''' ) lowerCAmelCase = self.add_weight( shape=(self.n_clusters,) , initializer='''zeros''' , trainable=__SCREAMING_SNAKE_CASE , name='''cluster_bias''' ) if self.div_val == 1: for i in range(len(self.cutoffs ) ): if self.d_proj != self.d_embed: lowerCAmelCase = self.add_weight( shape=(self.d_embed, self.d_proj) , initializer='''zeros''' , trainable=__SCREAMING_SNAKE_CASE , name=F"""out_projs_._{i}""" , ) self.out_projs.append(__SCREAMING_SNAKE_CASE ) else: self.out_projs.append(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.add_weight( shape=(self.vocab_size, self.d_embed) , initializer='''zeros''' , trainable=__SCREAMING_SNAKE_CASE , name=F"""out_layers_._{i}_._weight""" , ) lowerCAmelCase = self.add_weight( shape=(self.vocab_size,) , initializer='''zeros''' , trainable=__SCREAMING_SNAKE_CASE , name=F"""out_layers_._{i}_._bias""" , ) self.out_layers.append((weight, bias) ) else: for i in range(len(self.cutoffs ) ): lowerCAmelCase , lowerCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1] lowerCAmelCase = self.d_embed // (self.div_val**i) lowerCAmelCase = self.add_weight( shape=(d_emb_i, self.d_proj) , initializer='''zeros''' , trainable=__SCREAMING_SNAKE_CASE , name=F"""out_projs_._{i}""" ) self.out_projs.append(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.add_weight( shape=(r_idx - l_idx, d_emb_i) , initializer='''zeros''' , trainable=__SCREAMING_SNAKE_CASE , name=F"""out_layers_._{i}_._weight""" , ) lowerCAmelCase = self.add_weight( shape=(r_idx - l_idx,) , initializer='''zeros''' , trainable=__SCREAMING_SNAKE_CASE , name=F"""out_layers_._{i}_._bias""" , ) self.out_layers.append((weight, bias) ) super().build(__SCREAMING_SNAKE_CASE ) @staticmethod def __snake_case ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None ): lowerCAmelCase = x if proj is not None: lowerCAmelCase = tf.einsum('''ibd,ed->ibe''' , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return tf.einsum('''ibd,nd->ibn''' , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) + b @staticmethod def __snake_case ( UpperCAmelCase_ , UpperCAmelCase_ ): lowerCAmelCase = shape_list(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = tf.range(lp_size[0] , dtype=target.dtype ) lowerCAmelCase = tf.stack([r, target] , 1 ) return tf.gather_nd(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=True , UpperCAmelCase_=False ): lowerCAmelCase = 0 if self.n_clusters == 0: lowerCAmelCase = self._logit(__SCREAMING_SNAKE_CASE , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] ) if target is not None: lowerCAmelCase = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=__SCREAMING_SNAKE_CASE , logits=__SCREAMING_SNAKE_CASE ) lowerCAmelCase = tf.nn.log_softmax(__SCREAMING_SNAKE_CASE , axis=-1 ) else: lowerCAmelCase = shape_list(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = [] lowerCAmelCase = tf.zeros(hidden_sizes[:2] ) for i in range(len(self.cutoffs ) ): lowerCAmelCase , lowerCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1] if target is not None: lowerCAmelCase = (target >= l_idx) & (target < r_idx) lowerCAmelCase = tf.where(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = tf.boolean_mask(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) - l_idx if self.div_val == 1: lowerCAmelCase = self.out_layers[0][0][l_idx:r_idx] lowerCAmelCase = self.out_layers[0][1][l_idx:r_idx] else: lowerCAmelCase = self.out_layers[i][0] lowerCAmelCase = self.out_layers[i][1] if i == 0: lowerCAmelCase = tf.concat([cur_W, self.cluster_weight] , 0 ) lowerCAmelCase = tf.concat([cur_b, self.cluster_bias] , 0 ) lowerCAmelCase = self._logit(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.out_projs[0] ) lowerCAmelCase = tf.nn.log_softmax(__SCREAMING_SNAKE_CASE ) out.append(head_logprob[..., : self.cutoffs[0]] ) if target is not None: lowerCAmelCase = tf.boolean_mask(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = self._gather_logprob(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else: lowerCAmelCase = self._logit(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.out_projs[i] ) lowerCAmelCase = tf.nn.log_softmax(__SCREAMING_SNAKE_CASE ) lowerCAmelCase = self.cutoffs[0] + i - 1 # No probability for the head cluster lowerCAmelCase = head_logprob[..., cluster_prob_idx, None] + tail_logprob out.append(__SCREAMING_SNAKE_CASE ) if target is not None: lowerCAmelCase = tf.boolean_mask(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = tf.boolean_mask(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowerCAmelCase = self._gather_logprob(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1] if target is not None: loss += tf.scatter_nd(__SCREAMING_SNAKE_CASE , -cur_logprob , shape_list(__SCREAMING_SNAKE_CASE ) ) lowerCAmelCase = tf.concat(__SCREAMING_SNAKE_CASE , axis=-1 ) if target is not None: if return_mean: lowerCAmelCase = tf.reduce_mean(__SCREAMING_SNAKE_CASE ) # Add the training-time loss value to the layer using `self.add_loss()`. self.add_loss(__SCREAMING_SNAKE_CASE ) # Log the loss as a metric (we could log arbitrary metrics, # including different metrics for training and inference. self.add_metric(__SCREAMING_SNAKE_CASE , name=self.name , aggregation='''mean''' if return_mean else '''''' ) return out
715
import json import os import tempfile import unittest import unittest.mock as mock from pathlib import Path from requests.exceptions import HTTPError from transformers.utils import ( CONFIG_NAME, FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TRANSFORMERS_CACHE, WEIGHTS_NAME, cached_file, get_file_from_repo, has_file, ) UpperCAmelCase_ ="""hf-internal-testing/tiny-random-bert""" UpperCAmelCase_ =os.path.join(TRANSFORMERS_CACHE, """models--hf-internal-testing--tiny-random-bert""") UpperCAmelCase_ ="""9b8c223d42b2188cb49d29af482996f9d0f3e5a6""" class __UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __snake_case ( self ): lowerCAmelCase = cached_file(UpperCAmelCase_ , UpperCAmelCase_ ) # Should have downloaded the file in here self.assertTrue(os.path.isdir(UpperCAmelCase_ ) ) # Cache should contain at least those three subfolders: for subfolder in ["blobs", "refs", "snapshots"]: self.assertTrue(os.path.isdir(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) ) ) with open(os.path.join(UpperCAmelCase_ , '''refs''' , '''main''' ) ) as f: lowerCAmelCase = f.read() self.assertEqual(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , '''snapshots''' , UpperCAmelCase_ , UpperCAmelCase_ ) ) self.assertTrue(os.path.isfile(UpperCAmelCase_ ) ) # File is cached at the same place the second time. lowerCAmelCase = cached_file(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) # Using a specific revision to test the full commit hash. lowerCAmelCase = cached_file(UpperCAmelCase_ , UpperCAmelCase_ , revision='''9b8c223''' ) self.assertEqual(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , '''snapshots''' , UpperCAmelCase_ , UpperCAmelCase_ ) ) def __snake_case ( self ): with self.assertRaisesRegex(UpperCAmelCase_ , '''is not a valid model identifier''' ): lowerCAmelCase = cached_file('''tiny-random-bert''' , UpperCAmelCase_ ) with self.assertRaisesRegex(UpperCAmelCase_ , '''is not a valid git identifier''' ): lowerCAmelCase = cached_file(UpperCAmelCase_ , UpperCAmelCase_ , revision='''aaaa''' ) with self.assertRaisesRegex(UpperCAmelCase_ , '''does not appear to have a file named''' ): lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' ) def __snake_case ( self ): with self.assertRaisesRegex(UpperCAmelCase_ , '''does not appear to have a file named''' ): lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' ) with open(os.path.join(UpperCAmelCase_ , '''refs''' , '''main''' ) ) as f: lowerCAmelCase = f.read() self.assertTrue(os.path.isfile(os.path.join(UpperCAmelCase_ , '''.no_exist''' , UpperCAmelCase_ , '''conf''' ) ) ) lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' , _raise_exceptions_for_missing_entries=UpperCAmelCase_ ) self.assertIsNone(UpperCAmelCase_ ) lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' , local_files_only=UpperCAmelCase_ , _raise_exceptions_for_missing_entries=UpperCAmelCase_ ) self.assertIsNone(UpperCAmelCase_ ) lowerCAmelCase = mock.Mock() lowerCAmelCase = 5_00 lowerCAmelCase = {} lowerCAmelCase = HTTPError lowerCAmelCase = {} # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('''requests.Session.request''' , return_value=UpperCAmelCase_ ) as mock_head: lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' , _raise_exceptions_for_connection_errors=UpperCAmelCase_ ) self.assertIsNone(UpperCAmelCase_ ) # This check we did call the fake head request mock_head.assert_called() def __snake_case ( self ): self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , UpperCAmelCase_ ) ) self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , UpperCAmelCase_ ) ) self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , UpperCAmelCase_ ) ) def __snake_case ( self ): # `get_file_from_repo` returns None if the file does not exist self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) ) # The function raises if the repository does not exist. with self.assertRaisesRegex(UpperCAmelCase_ , '''is not a valid model identifier''' ): get_file_from_repo('''bert-base-case''' , UpperCAmelCase_ ) # The function raises if the revision does not exist. with self.assertRaisesRegex(UpperCAmelCase_ , '''is not a valid git identifier''' ): get_file_from_repo('''bert-base-cased''' , UpperCAmelCase_ , revision='''ahaha''' ) lowerCAmelCase = get_file_from_repo('''bert-base-cased''' , UpperCAmelCase_ ) # The name is the cached name which is not very easy to test, so instead we load the content. lowerCAmelCase = json.loads(open(UpperCAmelCase_ , '''r''' ).read() ) self.assertEqual(config['''hidden_size'''] , 7_68 ) def __snake_case ( self ): with tempfile.TemporaryDirectory() as tmp_dir: lowerCAmelCase = Path(UpperCAmelCase_ ) / '''a.txt''' filename.touch() self.assertEqual(get_file_from_repo(UpperCAmelCase_ , '''a.txt''' ) , str(UpperCAmelCase_ ) ) self.assertIsNone(get_file_from_repo(UpperCAmelCase_ , '''b.txt''' ) )
33
0
import json import os import shutil import tempfile from unittest import TestCase from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available if is_torch_available() and is_datasets_available() and is_faiss_available(): from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.tokenization_rag import RagTokenizer @require_faiss @require_torch class __UpperCamelCase ( UpperCamelCase_ ): '''simple docstring''' def __snake_case ( self ): lowerCAmelCase = tempfile.mkdtemp() lowerCAmelCase = 8 # DPR tok lowerCAmelCase = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] lowerCAmelCase = os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) os.makedirs(__a , exist_ok=__a ) lowerCAmelCase = os.path.join(__a , DPR_VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) # BART tok lowerCAmelCase = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] lowerCAmelCase = dict(zip(__a , range(len(__a ) ) ) ) lowerCAmelCase = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] lowerCAmelCase = {'unk_token': '<unk>'} lowerCAmelCase = os.path.join(self.tmpdirname , '''bart_tokenizer''' ) os.makedirs(__a , exist_ok=__a ) lowerCAmelCase = os.path.join(__a , BART_VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCAmelCase = os.path.join(__a , BART_VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__a ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__a ) ) def __snake_case ( self ): return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) ) def __snake_case ( self ): return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) ) def __snake_case ( self ): shutil.rmtree(self.tmpdirname ) @require_tokenizers def __snake_case ( self ): lowerCAmelCase = os.path.join(self.tmpdirname , '''rag_tokenizer''' ) lowerCAmelCase = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() ) lowerCAmelCase = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() ) rag_config.save_pretrained(__a ) rag_tokenizer.save_pretrained(__a ) lowerCAmelCase = RagTokenizer.from_pretrained(__a , config=__a ) self.assertIsInstance(new_rag_tokenizer.question_encoder , __a ) self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() ) self.assertIsInstance(new_rag_tokenizer.generator , __a ) self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() ) @slow def __snake_case ( self ): lowerCAmelCase = RagTokenizer.from_pretrained('''facebook/rag-token-nq''' ) lowerCAmelCase = [ 'who got the first nobel prize in physics', 'when is the next deadpool movie being released', 'which mode is used for short wave broadcast service', 'who is the owner of reading football club', 'when is the next scandal episode coming out', 'when is the last time the philadelphia won the superbowl', 'what is the most current adobe flash player version', 'how many episodes are there in dragon ball z', 'what is the first step in the evolution of the eye', 'where is gall bladder situated in human body', 'what is the main mineral in lithium batteries', 'who is the president of usa right now', 'where do the greasers live in the outsiders', 'panda is a national animal of which country', 'what is the name of manchester united stadium', ] lowerCAmelCase = tokenizer(__a ) self.assertIsNotNone(__a ) @slow def __snake_case ( self ): lowerCAmelCase = RagTokenizer.from_pretrained('''facebook/rag-sequence-nq''' ) lowerCAmelCase = [ 'who got the first nobel prize in physics', 'when is the next deadpool movie being released', 'which mode is used for short wave broadcast service', 'who is the owner of reading football club', 'when is the next scandal episode coming out', 'when is the last time the philadelphia won the superbowl', 'what is the most current adobe flash player version', 'how many episodes are there in dragon ball z', 'what is the first step in the evolution of the eye', 'where is gall bladder situated in human body', 'what is the main mineral in lithium batteries', 'who is the president of usa right now', 'where do the greasers live in the outsiders', 'panda is a national animal of which country', 'what is the name of manchester united stadium', ] lowerCAmelCase = tokenizer(__a ) self.assertIsNotNone(__a )
716
from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class __UpperCamelCase ( __UpperCAmelCase ): '''simple docstring''' def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = True , UpperCAmelCase_ = None , UpperCAmelCase_ = False , UpperCAmelCase_ = None , UpperCAmelCase_ = True , UpperCAmelCase_ = "arrow" , **UpperCAmelCase_ , ): super().__init__( split=UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , keep_in_memory=UpperCAmelCase_ , streaming=UpperCAmelCase_ , **UpperCAmelCase_ , ) lowerCAmelCase = load_from_cache_file lowerCAmelCase = file_format lowerCAmelCase = Spark( df=UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , working_dir=UpperCAmelCase_ , **UpperCAmelCase_ , ) def __snake_case ( self ): if self.streaming: return self.builder.as_streaming_dataset(split=self.split ) lowerCAmelCase = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=UpperCAmelCase_ , file_format=self._file_format , ) return self.builder.as_dataset(split=self.split )
33
0