code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" import unittest import torch from torch import nn from diffusers.models.activations import get_activation class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : Union[str, Any]): a : List[str] = get_activation("swish") self.assertIsInstance(__UpperCAmelCase , nn.SiLU) self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa)).item() , 0) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa)).item() , 0) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa)).item() , 0) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa)).item() , 20) def __snake_case ( self : str): a : List[Any] = get_activation("silu") self.assertIsInstance(__UpperCAmelCase , nn.SiLU) self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa)).item() , 0) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa)).item() , 0) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa)).item() , 0) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa)).item() , 20) def __snake_case ( self : Optional[Any]): a : Any = get_activation("mish") self.assertIsInstance(__UpperCAmelCase , nn.Mish) self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa)).item() , 0) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa)).item() , 0) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa)).item() , 0) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa)).item() , 20) def __snake_case ( self : Union[str, Any]): a : Optional[Any] = get_activation("gelu") self.assertIsInstance(__UpperCAmelCase , nn.GELU) self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa)).item() , 0) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa)).item() , 0) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa)).item() , 0) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa)).item() , 20)
40
"""simple docstring""" def lowercase ( A_ , A_ )-> float: '''simple docstring''' if mass < 0: raise ValueError("The mass of a body cannot be negative" ) return 0.5 * mass * abs(A_ ) * abs(A_ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
40
1
"""simple docstring""" def lowercase ( A_ )-> str: '''simple docstring''' a : List[str] = int(A_ ) if decimal in (0, 1): # Exit cases for the recursion return str(A_ ) a , a : int = divmod(A_ , 2 ) return binary_recursive(A_ ) + str(A_ ) def lowercase ( A_ )-> str: '''simple docstring''' a : Optional[int] = str(A_ ).strip() if not number: raise ValueError("No input value was provided" ) a : Any = "-" if number.startswith("-" ) else "" a : Optional[int] = number.lstrip("-" ) if not number.isnumeric(): raise ValueError("Input value is not an integer" ) return F'''{negative}0b{binary_recursive(int(A_ ) )}''' if __name__ == "__main__": from doctest import testmod testmod()
40
"""simple docstring""" import os import sys import unittest __lowercase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path __lowercase = os.path.join(git_repo_path, """src""", """diffusers""") class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : Any): a : List[Any] = find_backend(" if not is_torch_available():") self.assertEqual(__UpperCAmelCase , "torch") # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") a : Dict = find_backend(" if not (is_torch_available() and is_transformers_available()):") self.assertEqual(__UpperCAmelCase , "torch_and_transformers") # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") a : int = find_backend( " if not (is_torch_available() and is_transformers_available() and is_onnx_available()):") self.assertEqual(__UpperCAmelCase , "torch_and_transformers_and_onnx") def __snake_case ( self : Union[str, Any]): a : Dict = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("torch" , __UpperCAmelCase) self.assertIn("torch_and_transformers" , __UpperCAmelCase) self.assertIn("flax_and_transformers" , __UpperCAmelCase) self.assertIn("torch_and_transformers_and_onnx" , __UpperCAmelCase) # Likewise, we can't assert on the exact content of a key self.assertIn("UNet2DModel" , objects["torch"]) self.assertIn("FlaxUNet2DConditionModel" , objects["flax"]) self.assertIn("StableDiffusionPipeline" , objects["torch_and_transformers"]) self.assertIn("FlaxStableDiffusionPipeline" , objects["flax_and_transformers"]) self.assertIn("LMSDiscreteScheduler" , objects["torch_and_scipy"]) self.assertIn("OnnxStableDiffusionPipeline" , objects["torch_and_transformers_and_onnx"]) def __snake_case ( self : Tuple): a : Optional[int] = create_dummy_object("CONSTANT" , "'torch'") self.assertEqual(__UpperCAmelCase , "\nCONSTANT = None\n") a : Dict = create_dummy_object("function" , "'torch'") self.assertEqual( __UpperCAmelCase , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n") a : Optional[Any] = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n" a : int = create_dummy_object("FakeClass" , "'torch'") self.assertEqual(__UpperCAmelCase , __UpperCAmelCase) def __snake_case ( self : List[str]): a : List[str] = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n" a : Tuple = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]}) self.assertEqual(dummy_files["torch"] , __UpperCAmelCase)
40
1
"""simple docstring""" def lowercase ( A_ )-> bool: '''simple docstring''' return number & 1 == 0 if __name__ == "__main__": import doctest doctest.testmod()
40
"""simple docstring""" __lowercase = { """Pillow""": """Pillow<10.0.0""", """accelerate""": """accelerate>=0.20.3""", """av""": """av==9.2.0""", """beautifulsoup4""": """beautifulsoup4""", """black""": """black~=23.1""", """codecarbon""": """codecarbon==1.2.0""", """cookiecutter""": """cookiecutter==1.7.3""", """dataclasses""": """dataclasses""", """datasets""": """datasets!=2.5.0""", """decord""": """decord==0.6.0""", """deepspeed""": """deepspeed>=0.9.3""", """diffusers""": """diffusers""", """dill""": """dill<0.3.5""", """evaluate""": """evaluate>=0.2.0""", """fairscale""": """fairscale>0.3""", """faiss-cpu""": """faiss-cpu""", """fastapi""": """fastapi""", """filelock""": """filelock""", """flax""": """flax>=0.4.1,<=0.7.0""", """ftfy""": """ftfy""", """fugashi""": """fugashi>=1.0""", """GitPython""": """GitPython<3.1.19""", """hf-doc-builder""": """hf-doc-builder>=0.3.0""", """huggingface-hub""": """huggingface-hub>=0.14.1,<1.0""", """importlib_metadata""": """importlib_metadata""", """ipadic""": """ipadic>=1.0.0,<2.0""", """isort""": """isort>=5.5.4""", """jax""": """jax>=0.2.8,!=0.3.2,<=0.4.13""", """jaxlib""": """jaxlib>=0.1.65,<=0.4.13""", """jieba""": """jieba""", """kenlm""": """kenlm""", """keras-nlp""": """keras-nlp>=0.3.1""", """librosa""": """librosa""", """nltk""": """nltk""", """natten""": """natten>=0.14.6""", """numpy""": """numpy>=1.17""", """onnxconverter-common""": """onnxconverter-common""", """onnxruntime-tools""": """onnxruntime-tools>=1.4.2""", """onnxruntime""": """onnxruntime>=1.4.0""", """opencv-python""": """opencv-python""", """optuna""": """optuna""", """optax""": """optax>=0.0.8,<=0.1.4""", """packaging""": """packaging>=20.0""", """parameterized""": """parameterized""", """phonemizer""": """phonemizer""", """protobuf""": """protobuf""", """psutil""": """psutil""", """pyyaml""": """pyyaml>=5.1""", """pydantic""": """pydantic<2""", """pytest""": """pytest>=7.2.0""", """pytest-timeout""": """pytest-timeout""", """pytest-xdist""": """pytest-xdist""", """python""": """python>=3.8.0""", """ray[tune]""": """ray[tune]""", """regex""": """regex!=2019.12.17""", """requests""": """requests""", """rhoknp""": """rhoknp>=1.1.0,<1.3.1""", """rjieba""": """rjieba""", """rouge-score""": """rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1""", """ruff""": """ruff>=0.0.241,<=0.0.259""", """sacrebleu""": """sacrebleu>=1.4.12,<2.0.0""", """sacremoses""": """sacremoses""", """safetensors""": """safetensors>=0.3.1""", """sagemaker""": """sagemaker>=2.31.0""", """scikit-learn""": """scikit-learn""", """sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""", """sigopt""": """sigopt""", """starlette""": """starlette""", """sudachipy""": """sudachipy>=0.6.6""", """sudachidict_core""": """sudachidict_core>=20220729""", """tensorflow-cpu""": """tensorflow-cpu>=2.6,<2.14""", """tensorflow""": """tensorflow>=2.6,<2.14""", """tensorflow-text""": """tensorflow-text<2.14""", """tf2onnx""": """tf2onnx""", """timeout-decorator""": """timeout-decorator""", """timm""": """timm""", """tokenizers""": """tokenizers>=0.11.1,!=0.11.3,<0.14""", """torch""": """torch>=1.9,!=1.12.0""", """torchaudio""": """torchaudio""", """torchvision""": """torchvision""", """pyctcdecode""": """pyctcdecode>=0.4.0""", """tqdm""": """tqdm>=4.27""", """unidic""": """unidic>=1.0.2""", """unidic_lite""": """unidic_lite>=1.0.7""", """urllib3""": """urllib3<2.0.0""", """uvicorn""": """uvicorn""", }
40
1
"""simple docstring""" from typing import List, Optional, Tuple, Union import torch from ...utils import logging, randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline __lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name class _A ( _a ): """simple docstring""" def __init__( self : str , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : str): super().__init__() self.register_modules(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase) @torch.no_grad() def __call__( self : Dict , __UpperCAmelCase : int = 1 , __UpperCAmelCase : int = 100 , __UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCAmelCase : Optional[float] = None , __UpperCAmelCase : bool = True , ): if audio_length_in_s is None: a : List[Any] = self.unet.config.sample_size / self.unet.config.sample_rate a : Optional[Any] = audio_length_in_s * self.unet.config.sample_rate a : Optional[Any] = 2 ** len(self.unet.up_blocks) if sample_size < 3 * down_scale_factor: raise ValueError( f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to''' f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''') a : int = int(__UpperCAmelCase) if sample_size % down_scale_factor != 0: a : Any = ( (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 ) * down_scale_factor logger.info( f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled''' f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising''' " process.") a : List[Any] = int(__UpperCAmelCase) a : Dict = next(iter(self.unet.parameters())).dtype a : str = (batch_size, self.unet.config.in_channels, sample_size) if isinstance(__UpperCAmelCase , __UpperCAmelCase) and len(__UpperCAmelCase) != batch_size: raise ValueError( f'''You have passed a list of generators of length {len(__UpperCAmelCase)}, but requested an effective batch''' f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''') a : List[Any] = randn_tensor(__UpperCAmelCase , generator=__UpperCAmelCase , device=self.device , dtype=__UpperCAmelCase) # set step values self.scheduler.set_timesteps(__UpperCAmelCase , device=audio.device) a : List[str] = self.scheduler.timesteps.to(__UpperCAmelCase) for t in self.progress_bar(self.scheduler.timesteps): # 1. predict noise model_output a : Dict = self.unet(__UpperCAmelCase , __UpperCAmelCase).sample # 2. compute previous image: x_t -> t_t-1 a : Optional[Any] = self.scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase).prev_sample a : Tuple = audio.clamp(-1 , 1).float().cpu().numpy() a : str = audio[:, :, :original_sample_size] if not return_dict: return (audio,) return AudioPipelineOutput(audios=__UpperCAmelCase)
40
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tensorflow_text_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowercase = { """configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""], """tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = ["""BertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """BERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """BertForMaskedLM""", """BertForMultipleChoice""", """BertForNextSentencePrediction""", """BertForPreTraining""", """BertForQuestionAnswering""", """BertForSequenceClassification""", """BertForTokenClassification""", """BertLayer""", """BertLMHeadModel""", """BertModel""", """BertPreTrainedModel""", """load_tf_weights_in_bert""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFBertEmbeddings""", """TFBertForMaskedLM""", """TFBertForMultipleChoice""", """TFBertForNextSentencePrediction""", """TFBertForPreTraining""", """TFBertForQuestionAnswering""", """TFBertForSequenceClassification""", """TFBertForTokenClassification""", """TFBertLMHeadModel""", """TFBertMainLayer""", """TFBertModel""", """TFBertPreTrainedModel""", ] try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = ["""TFBertTokenizer"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """FlaxBertForCausalLM""", """FlaxBertForMaskedLM""", """FlaxBertForMultipleChoice""", """FlaxBertForNextSentencePrediction""", """FlaxBertForPreTraining""", """FlaxBertForQuestionAnswering""", """FlaxBertForSequenceClassification""", """FlaxBertForTokenClassification""", """FlaxBertModel""", """FlaxBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_fast import BertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bert import ( BERT_PRETRAINED_MODEL_ARCHIVE_LIST, BertForMaskedLM, BertForMultipleChoice, BertForNextSentencePrediction, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, BertLayer, BertLMHeadModel, BertModel, BertPreTrainedModel, load_tf_weights_in_bert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_bert import ( TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFBertEmbeddings, TFBertForMaskedLM, TFBertForMultipleChoice, TFBertForNextSentencePrediction, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertForTokenClassification, TFBertLMHeadModel, TFBertMainLayer, TFBertModel, TFBertPreTrainedModel, ) try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_tf import TFBertTokenizer try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_bert import ( FlaxBertForCausalLM, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, FlaxBertPreTrainedModel, ) else: import sys __lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
40
1
"""simple docstring""" from __future__ import annotations import unittest import numpy as np from transformers import BlipTextConfig from transformers.testing_utils import require_tf, slow from transformers.utils import is_tf_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask if is_tf_available(): import tensorflow as tf from transformers import TFBlipTextModel from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST class _A : """simple docstring""" def __init__( self : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : Any=12 , __UpperCAmelCase : List[str]=7 , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : Dict=99 , __UpperCAmelCase : Union[str, Any]=32 , __UpperCAmelCase : Union[str, Any]=32 , __UpperCAmelCase : Optional[Any]=2 , __UpperCAmelCase : int=4 , __UpperCAmelCase : Dict=37 , __UpperCAmelCase : Optional[int]=0.1 , __UpperCAmelCase : str=0.1 , __UpperCAmelCase : Tuple=512 , __UpperCAmelCase : Union[str, Any]=0.02 , __UpperCAmelCase : List[str]=0 , __UpperCAmelCase : Dict=None , ): a : int = parent a : List[Any] = batch_size a : Any = seq_length a : List[str] = is_training a : int = use_input_mask a : List[str] = use_labels a : Optional[int] = vocab_size a : Union[str, Any] = hidden_size a : int = projection_dim a : int = num_hidden_layers a : Dict = num_attention_heads a : Any = intermediate_size a : str = dropout a : List[Any] = attention_dropout a : Optional[int] = max_position_embeddings a : Any = initializer_range a : List[Any] = scope a : Tuple = bos_token_id def __snake_case ( self : int): a : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) a : Dict = None if self.use_input_mask: a : int = random_attention_mask([self.batch_size, self.seq_length]) if input_mask is not None: a : Any = input_mask.numpy() a , a : Tuple = input_mask.shape a : int = np.random.randint(1 , seq_length - 1 , size=(batch_size,)) for batch_idx, start_index in enumerate(__UpperCAmelCase): a : int = 1 a : List[Any] = 0 a : Optional[Any] = self.get_config() return config, input_ids, tf.convert_to_tensor(__UpperCAmelCase) def __snake_case ( self : Tuple): return BlipTextConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , ) def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any]): a : Optional[Any] = TFBlipTextModel(config=__UpperCAmelCase) a : Any = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , training=__UpperCAmelCase) a : Optional[Any] = model(__UpperCAmelCase , training=__UpperCAmelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size)) def __snake_case ( self : Optional[int]): a : Optional[int] = self.prepare_config_and_inputs() a , a , a : str = config_and_inputs a : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class _A ( _a ,unittest.TestCase ): """simple docstring""" UpperCAmelCase : int = (TFBlipTextModel,) if is_tf_available() else () UpperCAmelCase : str = False UpperCAmelCase : List[Any] = False UpperCAmelCase : Optional[Any] = False def __snake_case ( self : Dict): a : Any = BlipTextModelTester(self) a : Optional[Any] = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37) def __snake_case ( self : int): self.config_tester.run_common_tests() def __snake_case ( self : List[Any]): a : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase) def __snake_case ( self : int): pass def __snake_case ( self : Dict): pass @unittest.skip(reason="Blip does not use inputs_embeds") def __snake_case ( self : List[str]): pass @unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING") def __snake_case ( self : List[Any]): pass @unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING") def __snake_case ( self : Dict): pass @slow def __snake_case ( self : str): for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a : str = TFBlipTextModel.from_pretrained(__UpperCAmelCase) self.assertIsNotNone(__UpperCAmelCase) def __snake_case ( self : Optional[Any] , __UpperCAmelCase : int=True): super().test_pt_tf_model_equivalence(allow_missing_keys=__UpperCAmelCase)
40
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class _A ( _a ): """simple docstring""" UpperCAmelCase : str = """naver-clova-ix/donut-base-finetuned-docvqa""" UpperCAmelCase : Tuple = ( """This is a tool that answers a question about an document (pdf). It takes an input named `document` which """ """should be the document containing the information, as well as a `question` that is the question about the """ """document. It returns a text that contains the answer to the question.""" ) UpperCAmelCase : List[str] = """document_qa""" UpperCAmelCase : str = AutoProcessor UpperCAmelCase : Optional[int] = VisionEncoderDecoderModel UpperCAmelCase : int = ["""image""", """text"""] UpperCAmelCase : int = ["""text"""] def __init__( self : Tuple , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Any): if not is_vision_available(): raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool.") super().__init__(*__UpperCAmelCase , **__UpperCAmelCase) def __snake_case ( self : Tuple , __UpperCAmelCase : "Image" , __UpperCAmelCase : str): a : Any = "<s_docvqa><s_question>{user_input}</s_question><s_answer>" a : Union[str, Any] = task_prompt.replace("{user_input}" , __UpperCAmelCase) a : Optional[Any] = self.pre_processor.tokenizer( __UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors="pt").input_ids a : Any = self.pre_processor(__UpperCAmelCase , return_tensors="pt").pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def __snake_case ( self : int , __UpperCAmelCase : int): return self.model.generate( inputs["pixel_values"].to(self.device) , decoder_input_ids=inputs["decoder_input_ids"].to(self.device) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__UpperCAmelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__UpperCAmelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__UpperCAmelCase , ).sequences def __snake_case ( self : str , __UpperCAmelCase : List[Any]): a : Union[str, Any] = self.pre_processor.batch_decode(__UpperCAmelCase)[0] a : Optional[Any] = sequence.replace(self.pre_processor.tokenizer.eos_token , "") a : Any = sequence.replace(self.pre_processor.tokenizer.pad_token , "") a : Optional[Any] = re.sub(r"<.*?>" , "" , __UpperCAmelCase , count=1).strip() # remove first task start token a : List[str] = self.pre_processor.tokenajson(__UpperCAmelCase) return sequence["answer"]
40
1
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __lowercase = logging.get_logger(__name__) __lowercase = { """asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""", # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class _A ( _a ): """simple docstring""" UpperCAmelCase : int = """sew-d""" def __init__( self : Optional[int] , __UpperCAmelCase : Any=32 , __UpperCAmelCase : Union[str, Any]=768 , __UpperCAmelCase : Tuple=12 , __UpperCAmelCase : Dict=12 , __UpperCAmelCase : str=3072 , __UpperCAmelCase : Tuple=2 , __UpperCAmelCase : Optional[int]=512 , __UpperCAmelCase : int=256 , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : int=True , __UpperCAmelCase : List[Any]=("p2c", "c2p") , __UpperCAmelCase : Any="layer_norm" , __UpperCAmelCase : Optional[Any]="gelu_python" , __UpperCAmelCase : Optional[int]=0.1 , __UpperCAmelCase : Optional[int]=0.1 , __UpperCAmelCase : Optional[Any]=0.1 , __UpperCAmelCase : Dict=0.0 , __UpperCAmelCase : str=0.1 , __UpperCAmelCase : Dict=0.02 , __UpperCAmelCase : List[Any]=1e-7 , __UpperCAmelCase : Dict=1e-5 , __UpperCAmelCase : Dict="group" , __UpperCAmelCase : Optional[Any]="gelu" , __UpperCAmelCase : Optional[Any]=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , __UpperCAmelCase : Dict=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __UpperCAmelCase : Optional[int]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __UpperCAmelCase : Union[str, Any]=False , __UpperCAmelCase : Union[str, Any]=128 , __UpperCAmelCase : Any=16 , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : int=0.05 , __UpperCAmelCase : Any=10 , __UpperCAmelCase : Any=2 , __UpperCAmelCase : int=0.0 , __UpperCAmelCase : Optional[Any]=10 , __UpperCAmelCase : Optional[Any]=0 , __UpperCAmelCase : Optional[Any]="mean" , __UpperCAmelCase : Dict=False , __UpperCAmelCase : Dict=False , __UpperCAmelCase : List[Any]=256 , __UpperCAmelCase : Optional[Any]=0 , __UpperCAmelCase : List[str]=1 , __UpperCAmelCase : List[Any]=2 , **__UpperCAmelCase : str , ): super().__init__(**__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase) a : Tuple = hidden_size a : Optional[Any] = feat_extract_norm a : List[str] = feat_extract_activation a : str = list(__UpperCAmelCase) a : Dict = list(__UpperCAmelCase) a : str = list(__UpperCAmelCase) a : int = conv_bias a : Optional[int] = num_conv_pos_embeddings a : Union[str, Any] = num_conv_pos_embedding_groups a : str = len(self.conv_dim) a : int = num_hidden_layers a : Dict = intermediate_size a : Optional[Any] = squeeze_factor a : Union[str, Any] = max_position_embeddings a : Any = position_buckets a : Tuple = share_att_key a : str = relative_attention a : Tuple = norm_rel_ebd a : Tuple = list(__UpperCAmelCase) a : Tuple = hidden_act a : List[str] = num_attention_heads a : Optional[int] = hidden_dropout a : int = attention_dropout a : Dict = activation_dropout a : str = feat_proj_dropout a : Any = final_dropout a : Optional[Any] = layer_norm_eps a : List[Any] = feature_layer_norm_eps a : str = initializer_range a : Dict = vocab_size if ( (len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect." "It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`," f'''but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)''' f'''= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.''') # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 a : str = apply_spec_augment a : int = mask_time_prob a : Union[str, Any] = mask_time_length a : Any = mask_time_min_masks a : Tuple = mask_feature_prob a : List[Any] = mask_feature_length a : Optional[Any] = mask_feature_min_masks # ctc loss a : Tuple = ctc_loss_reduction a : str = ctc_zero_infinity # sequence classification a : int = use_weighted_layer_sum a : Any = classifier_proj_size @property def __snake_case ( self : Optional[Any]): return functools.reduce(operator.mul , self.conv_stride , 1)
40
"""simple docstring""" from __future__ import annotations class _A : """simple docstring""" def __init__( self : List[str] , __UpperCAmelCase : int = 0): a : Tuple = key def __snake_case ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : int): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) a : Dict = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(__UpperCAmelCase) ^ key) for ch in content] def __snake_case ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : int): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) a : Optional[Any] = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(__UpperCAmelCase) ^ key) for ch in content] def __snake_case ( self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : int = 0): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) a : List[Any] = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned a : Any = "" for ch in content: ans += chr(ord(__UpperCAmelCase) ^ key) return ans def __snake_case ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : int = 0): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) a : Dict = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned a : str = "" for ch in content: ans += chr(ord(__UpperCAmelCase) ^ key) return ans def __snake_case ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : int = 0): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) try: with open(__UpperCAmelCase) as fin, open("encrypt.out" , "w+") as fout: # actual encrypt-process for line in fin: fout.write(self.encrypt_string(__UpperCAmelCase , __UpperCAmelCase)) except OSError: return False return True def __snake_case ( self : Any , __UpperCAmelCase : str , __UpperCAmelCase : int): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) try: with open(__UpperCAmelCase) as fin, open("decrypt.out" , "w+") as fout: # actual encrypt-process for line in fin: fout.write(self.decrypt_string(__UpperCAmelCase , __UpperCAmelCase)) except OSError: return False return True # Tests # crypt = XORCipher() # key = 67 # # test encrypt # print(crypt.encrypt("hallo welt",key)) # # test decrypt # print(crypt.decrypt(crypt.encrypt("hallo welt",key), key)) # # test encrypt_string # print(crypt.encrypt_string("hallo welt",key)) # # test decrypt_string # print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key)) # if (crypt.encrypt_file("test.txt",key)): # print("encrypt successful") # else: # print("encrypt unsuccessful") # if (crypt.decrypt_file("encrypt.out",key)): # print("decrypt successful") # else: # print("decrypt unsuccessful")
40
1
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class _A ( _a ): """simple docstring""" UpperCAmelCase : str = """naver-clova-ix/donut-base-finetuned-docvqa""" UpperCAmelCase : Tuple = ( """This is a tool that answers a question about an document (pdf). It takes an input named `document` which """ """should be the document containing the information, as well as a `question` that is the question about the """ """document. It returns a text that contains the answer to the question.""" ) UpperCAmelCase : List[str] = """document_qa""" UpperCAmelCase : str = AutoProcessor UpperCAmelCase : Optional[int] = VisionEncoderDecoderModel UpperCAmelCase : int = ["""image""", """text"""] UpperCAmelCase : int = ["""text"""] def __init__( self : Tuple , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Any): if not is_vision_available(): raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool.") super().__init__(*__UpperCAmelCase , **__UpperCAmelCase) def __snake_case ( self : Tuple , __UpperCAmelCase : "Image" , __UpperCAmelCase : str): a : Any = "<s_docvqa><s_question>{user_input}</s_question><s_answer>" a : Union[str, Any] = task_prompt.replace("{user_input}" , __UpperCAmelCase) a : Optional[Any] = self.pre_processor.tokenizer( __UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors="pt").input_ids a : Any = self.pre_processor(__UpperCAmelCase , return_tensors="pt").pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def __snake_case ( self : int , __UpperCAmelCase : int): return self.model.generate( inputs["pixel_values"].to(self.device) , decoder_input_ids=inputs["decoder_input_ids"].to(self.device) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__UpperCAmelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__UpperCAmelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__UpperCAmelCase , ).sequences def __snake_case ( self : str , __UpperCAmelCase : List[Any]): a : Union[str, Any] = self.pre_processor.batch_decode(__UpperCAmelCase)[0] a : Optional[Any] = sequence.replace(self.pre_processor.tokenizer.eos_token , "") a : Any = sequence.replace(self.pre_processor.tokenizer.pad_token , "") a : Optional[Any] = re.sub(r"<.*?>" , "" , __UpperCAmelCase , count=1).strip() # remove first task start token a : List[str] = self.pre_processor.tokenajson(__UpperCAmelCase) return sequence["answer"]
40
"""simple docstring""" import pytest from datasets import inspect_metric, list_metrics, load_metric @pytest.fixture def lowercase ( A_ )-> List[Any]: '''simple docstring''' monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() ) @pytest.fixture def lowercase ( A_ )-> Tuple: '''simple docstring''' class _A : """simple docstring""" def __init__( self : str , __UpperCAmelCase : int): a : List[Any] = metric_id class _A : """simple docstring""" UpperCAmelCase : Union[str, Any] = [MetricMock(_a ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]] def __snake_case ( self : List[str]): return self._metrics monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() ) @pytest.mark.parametrize( "func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] ) def lowercase ( A_ , A_ , A_ , A_ , A_ )-> Any: '''simple docstring''' if "tmp_path" in args: a : Union[str, Any] = tuple(arg if arg != "tmp_path" else tmp_path for arg in args ) with pytest.warns(A_ , match="https://huggingface.co/docs/evaluate" ): func(*A_ )
40
1
"""simple docstring""" import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : Tuple , __UpperCAmelCase : Any): for model_result in results.values(): for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"]): a : Dict = model_result["result"][batch_size][sequence_length] self.assertIsNotNone(__UpperCAmelCase) def __snake_case ( self : Optional[Any]): a : Union[str, Any] = "sshleifer/tiny-gpt2" a : Tuple = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , ) a : Union[str, Any] = PyTorchBenchmark(__UpperCAmelCase) a : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def __snake_case ( self : Optional[int]): a : List[str] = "sgugger/tiny-distilbert-classification" a : Dict = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , only_pretrain_model=__UpperCAmelCase , ) a : Optional[int] = PyTorchBenchmark(__UpperCAmelCase) a : int = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def __snake_case ( self : Any): a : List[str] = "sshleifer/tiny-gpt2" a : Union[str, Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , torchscript=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , ) a : str = PyTorchBenchmark(__UpperCAmelCase) a : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) @unittest.skipIf(torch_device == "cpu" , "Cant do half precision") def __snake_case ( self : str): a : Any = "sshleifer/tiny-gpt2" a : List[str] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , fpaa=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , ) a : Any = PyTorchBenchmark(__UpperCAmelCase) a : Any = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def __snake_case ( self : Tuple): a : Any = "sshleifer/tiny-gpt2" a : Any = AutoConfig.from_pretrained(__UpperCAmelCase) # set architectures equal to `None` a : List[Any] = None a : Optional[Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , ) a : List[Any] = PyTorchBenchmark(__UpperCAmelCase , configs=[config]) a : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def __snake_case ( self : List[str]): a : Tuple = "sshleifer/tiny-gpt2" a : Optional[int] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , ) a : Dict = PyTorchBenchmark(__UpperCAmelCase) a : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) @unittest.skipIf(torch_device == "cpu" , "Can't do half precision") def __snake_case ( self : Optional[Any]): a : Any = "sshleifer/tiny-gpt2" a : List[str] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=__UpperCAmelCase , multi_process=__UpperCAmelCase , ) a : str = PyTorchBenchmark(__UpperCAmelCase) a : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) def __snake_case ( self : int): a : Optional[Any] = "sshleifer/tiny-gpt2" a : str = AutoConfig.from_pretrained(__UpperCAmelCase) a : Dict = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , ) a : int = PyTorchBenchmark(__UpperCAmelCase , configs=[config]) a : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def __snake_case ( self : Any): a : str = "sshleifer/tinier_bart" a : Union[str, Any] = AutoConfig.from_pretrained(__UpperCAmelCase) a : Any = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , ) a : List[str] = PyTorchBenchmark(__UpperCAmelCase , configs=[config]) a : Optional[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def __snake_case ( self : Optional[Any]): a : Dict = "sshleifer/tiny-gpt2" a : Dict = AutoConfig.from_pretrained(__UpperCAmelCase) a : Union[str, Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , ) a : int = PyTorchBenchmark(__UpperCAmelCase , configs=[config]) a : List[str] = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) def __snake_case ( self : Optional[int]): a : Any = "sshleifer/tinier_bart" a : Optional[Any] = AutoConfig.from_pretrained(__UpperCAmelCase) a : Dict = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , ) a : Dict = PyTorchBenchmark(__UpperCAmelCase , configs=[config]) a : Optional[int] = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) def __snake_case ( self : int): a : List[str] = "sshleifer/tiny-gpt2" with tempfile.TemporaryDirectory() as tmp_dir: a : Tuple = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , save_to_csv=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__UpperCAmelCase , "inf_time.csv") , train_memory_csv_file=os.path.join(__UpperCAmelCase , "train_mem.csv") , inference_memory_csv_file=os.path.join(__UpperCAmelCase , "inf_mem.csv") , train_time_csv_file=os.path.join(__UpperCAmelCase , "train_time.csv") , env_info_csv_file=os.path.join(__UpperCAmelCase , "env.csv") , multi_process=__UpperCAmelCase , ) a : List[str] = PyTorchBenchmark(__UpperCAmelCase) benchmark.run() self.assertTrue(Path(os.path.join(__UpperCAmelCase , "inf_time.csv")).exists()) self.assertTrue(Path(os.path.join(__UpperCAmelCase , "train_time.csv")).exists()) self.assertTrue(Path(os.path.join(__UpperCAmelCase , "inf_mem.csv")).exists()) self.assertTrue(Path(os.path.join(__UpperCAmelCase , "train_mem.csv")).exists()) self.assertTrue(Path(os.path.join(__UpperCAmelCase , "env.csv")).exists()) def __snake_case ( self : int): a : Union[str, Any] = "sshleifer/tiny-gpt2" def _check_summary_is_not_empty(__UpperCAmelCase : str): self.assertTrue(hasattr(__UpperCAmelCase , "sequential")) self.assertTrue(hasattr(__UpperCAmelCase , "cumulative")) self.assertTrue(hasattr(__UpperCAmelCase , "current")) self.assertTrue(hasattr(__UpperCAmelCase , "total")) with tempfile.TemporaryDirectory() as tmp_dir: a : Tuple = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__UpperCAmelCase , "log.txt") , log_print=__UpperCAmelCase , trace_memory_line_by_line=__UpperCAmelCase , multi_process=__UpperCAmelCase , ) a : Optional[Any] = PyTorchBenchmark(__UpperCAmelCase) a : int = benchmark.run() _check_summary_is_not_empty(result.inference_summary) _check_summary_is_not_empty(result.train_summary) self.assertTrue(Path(os.path.join(__UpperCAmelCase , "log.txt")).exists())
40
"""simple docstring""" from __future__ import annotations from PIL import Image # Define glider example __lowercase = [ [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], ] # Define blinker example __lowercase = [[0, 1, 0], [0, 1, 0], [0, 1, 0]] def lowercase ( A_ )-> list[list[int]]: '''simple docstring''' a : str = [] for i in range(len(A_ ) ): a : str = [] for j in range(len(cells[i] ) ): # Get the number of live neighbours a : Union[str, Any] = 0 if i > 0 and j > 0: neighbour_count += cells[i - 1][j - 1] if i > 0: neighbour_count += cells[i - 1][j] if i > 0 and j < len(cells[i] ) - 1: neighbour_count += cells[i - 1][j + 1] if j > 0: neighbour_count += cells[i][j - 1] if j < len(cells[i] ) - 1: neighbour_count += cells[i][j + 1] if i < len(A_ ) - 1 and j > 0: neighbour_count += cells[i + 1][j - 1] if i < len(A_ ) - 1: neighbour_count += cells[i + 1][j] if i < len(A_ ) - 1 and j < len(cells[i] ) - 1: neighbour_count += cells[i + 1][j + 1] # Rules of the game of life (excerpt from Wikipedia): # 1. Any live cell with two or three live neighbours survives. # 2. Any dead cell with three live neighbours becomes a live cell. # 3. All other live cells die in the next generation. # Similarly, all other dead cells stay dead. a : Tuple = cells[i][j] == 1 if ( (alive and 2 <= neighbour_count <= 3) or not alive and neighbour_count == 3 ): next_generation_row.append(1 ) else: next_generation_row.append(0 ) next_generation.append(A_ ) return next_generation def lowercase ( A_ , A_ )-> list[Image.Image]: '''simple docstring''' a : List[str] = [] for _ in range(A_ ): # Create output image a : str = Image.new("RGB" , (len(cells[0] ), len(A_ )) ) a : Union[str, Any] = img.load() # Save cells to image for x in range(len(A_ ) ): for y in range(len(cells[0] ) ): a : Optional[Any] = 255 - cells[y][x] * 255 a : str = (colour, colour, colour) # Save image images.append(A_ ) a : Tuple = new_generation(A_ ) return images if __name__ == "__main__": __lowercase = generate_images(GLIDER, 16) images[0].save("""out.gif""", save_all=True, append_images=images[1:])
40
1
"""simple docstring""" import os import unittest from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _A ( _a ,unittest.TestCase ): """simple docstring""" UpperCAmelCase : str = LayoutLMTokenizer UpperCAmelCase : int = LayoutLMTokenizerFast UpperCAmelCase : Union[str, Any] = True UpperCAmelCase : Optional[Any] = True def __snake_case ( self : Optional[int]): super().setUp() a : Tuple = [ "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] a : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) def __snake_case ( self : Optional[int] , **__UpperCAmelCase : Tuple): return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase) def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : str): a : Tuple = "UNwant\u00E9d,running" a : Dict = "unwanted, running" return input_text, output_text def __snake_case ( self : Any): a : List[Any] = self.tokenizer_class(self.vocab_file) a : str = tokenizer.tokenize("UNwant\u00E9d,running") self.assertListEqual(__UpperCAmelCase , ["un", "##want", "##ed", ",", "runn", "##ing"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase) , [7, 4, 5, 10, 8, 9]) def __snake_case ( self : Dict): pass
40
"""simple docstring""" from itertools import permutations def lowercase ( A_ )-> bool: '''simple docstring''' if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False a : Optional[int] = [7, 11, 13, 17] for i, test in enumerate(A_ ): if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def lowercase ( A_ = 10 )-> int: '''simple docstring''' return sum( int("".join(map(A_ , A_ ) ) ) for num in permutations(range(A_ ) ) if is_substring_divisible(A_ ) ) if __name__ == "__main__": print(f'''{solution() = }''')
40
1
"""simple docstring""" from dataclasses import dataclass, field from typing import Optional @dataclass class _A : """simple docstring""" UpperCAmelCase : Optional[str] = field( default="""codeparrot/codeparrot""" ,metadata={"""help""": """Model name or path of model to be trained."""} ) UpperCAmelCase : Optional[str] = field( default="""./""" ,metadata={"""help""": """Save dir where model repo is cloned and models updates are saved to."""} ) UpperCAmelCase : Optional[str] = field( default="""codeparrot/codeparrot-clean-train""" ,metadata={"""help""": """Name or path of training dataset."""} ) UpperCAmelCase : Optional[str] = field( default="""codeparrot/codeparrot-clean-valid""" ,metadata={"""help""": """Name or path of validation dataset."""} ) UpperCAmelCase : Optional[int] = field(default=2 ,metadata={"""help""": """Batch size for training."""} ) UpperCAmelCase : Optional[int] = field(default=2 ,metadata={"""help""": """Batch size for evaluation."""} ) UpperCAmelCase : Optional[float] = field(default=0.1 ,metadata={"""help""": """Value of weight decay."""} ) UpperCAmelCase : Optional[int] = field( default=1_0_0_0_0 ,metadata={"""help""": """Size of buffer used to shuffle streaming dataset."""} ) UpperCAmelCase : Optional[float] = field(default=2E-4 ,metadata={"""help""": """Learning rate fo training."""} ) UpperCAmelCase : Optional[str] = field(default="""cosine""" ,metadata={"""help""": """Learning rate."""} ) UpperCAmelCase : Optional[int] = field( default=7_5_0 ,metadata={"""help""": """Number of warmup steps in the learning rate schedule."""} ) UpperCAmelCase : Optional[int] = field( default=1_6 ,metadata={"""help""": """Number of gradient accumulation steps."""} ) UpperCAmelCase : Optional[bool] = field( default=_a ,metadata={"""help""": """Use gradient checkpointing to reduce memory footprint."""} ) UpperCAmelCase : Optional[int] = field(default=5_0_0_0_0 ,metadata={"""help""": """Maximum number of training steps."""} ) UpperCAmelCase : Optional[int] = field( default=-1 ,metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""} ) UpperCAmelCase : Optional[int] = field(default=1_0_2_4 ,metadata={"""help""": """Sequence lengths used for training."""} ) UpperCAmelCase : Optional[int] = field(default=1 ,metadata={"""help""": """Training seed."""} ) UpperCAmelCase : Optional[int] = field( default=1_0_2_4 ,metadata={"""help""": """Interval to save checkpoints. Measured as number of forward passes not training steps."""} ,) UpperCAmelCase : Optional[str] = field( default=_a ,metadata={"""help""": """States path if the training should continue from a checkpoint folder."""} ) UpperCAmelCase : Optional[bool] = field(default=_a ,metadata={"""help""": """If True the data is pretokenized."""} ) @dataclass class _A : """simple docstring""" UpperCAmelCase : Optional[str] = field( default="""codeparrot/codeparrot""" ,metadata={"""help""": """Model name or path of model to be evaluated."""} ) UpperCAmelCase : Optional[str] = field( default="""codeparrot/codeparrot-clean-valid""" ,metadata={"""help""": """Name or path of validation dataset."""} ) UpperCAmelCase : Optional[int] = field(default=2 ,metadata={"""help""": """Batch size used for evaluation."""} ) UpperCAmelCase : Optional[int] = field( default=-1 ,metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""} ) UpperCAmelCase : Optional[int] = field(default=1_0_2_4 ,metadata={"""help""": """Length of sequences to be evaluated."""} ) UpperCAmelCase : Optional[int] = field(default=1 ,metadata={"""help""": """Random seed used for evaluation."""} ) @dataclass class _A : """simple docstring""" UpperCAmelCase : Optional[str] = field( default="""codeparrot/codeparrot""" ,metadata={"""help""": """Model name or path of model to be evaluated."""} ) UpperCAmelCase : Optional[int] = field(default=_a ,metadata={"""help""": """Number of workers used for code evaluation."""} ) UpperCAmelCase : Optional[int] = field( default=_a ,metadata={"""help""": """The number of human-eval tasks to run. If not included all tasks are evaluated."""} ,) UpperCAmelCase : Optional[bool] = field( default=_a ,metadata={"""help""": """Sample from the language model's output distribution."""} ) UpperCAmelCase : Optional[float] = field(default=0.2 ,metadata={"""help""": """Sampling temperature used for generation."""} ) UpperCAmelCase : Optional[int] = field(default=2_5_6 ,metadata={"""help""": """Maximum number of newly generated tokens."""} ) UpperCAmelCase : Optional[int] = field(default=0 ,metadata={"""help""": """Top-k parameter used for generation."""} ) UpperCAmelCase : Optional[float] = field(default=0.95 ,metadata={"""help""": """Top-p parameter used for nucleus sampling."""} ) UpperCAmelCase : Optional[int] = field(default=1_0 ,metadata={"""help""": """Number of generations to run in parallel."""} ) UpperCAmelCase : Optional[int] = field( default=2_0_0 ,metadata={"""help""": """Number of completions to generate for each sample."""} ) UpperCAmelCase : Optional[int] = field(default=1 ,metadata={"""help""": """Random seed used for evaluation."""} ) UpperCAmelCase : Optional[str] = field( default="""eval_results.json""" ,metadata={"""help""": """Random seed used for evaluation."""} ) UpperCAmelCase : Optional[str] = field( default="""0""" ,metadata={"""help""": """Allow `code_eval` to execute Python code on machine"""} ) UpperCAmelCase : Optional[int] = field( default=-1 ,metadata={ """help""": ( """Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive""" """ number corresponds to which GPU device id to run on.""" ) } ,) @dataclass class _A : """simple docstring""" UpperCAmelCase : Optional[int] = field( default=_a ,metadata={ """help""": """The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.""" } ,) UpperCAmelCase : Optional[str] = field( default="""transformersbook/codeparrot""" ,metadata={"""help""": """Folder or name of dataset to process."""} ) UpperCAmelCase : Optional[str] = field( default="""codeparrot-clean""" ,metadata={"""help""": """Folder to save processed processed dataset."""} ) UpperCAmelCase : Optional[int] = field( default=1_0_0_0_0_0 ,metadata={"""help""": """Number of files to save per JSON output file."""} ) UpperCAmelCase : Optional[str] = field(default="""content""" ,metadata={"""help""": """Column containing text data to process."""} ) UpperCAmelCase : Optional[float] = field( default=1_0_0_0 ,metadata={"""help""": """Maximum line length in file, otherwise file is filtered."""} ) UpperCAmelCase : Optional[float] = field( default=1_0_0 ,metadata={"""help""": """Maximum mean line length in file, otherwise file is filtered."""} ) UpperCAmelCase : Optional[float] = field( default=0.25 ,metadata={"""help""": """Maximum fraction of non-alphanumeric characters, otherwise file is filtered."""} ) UpperCAmelCase : Optional[float] = field( default=1.5 ,metadata={"""help""": """Minimum character token ratio for the file, otherwise file is filtered."""} ) UpperCAmelCase : Optional[float] = field( default=0.7 ,metadata={"""help""": """Probability for filtering config, test and uncommon files."""} ) UpperCAmelCase : Optional[str] = field( default="""codeparrot/codeparrot""" ,metadata={"""help""": """Name or path to the tokenizer."""} ,) UpperCAmelCase : Optional[bool] = field( default=_a ,metadata={"""help""": """If True, near-duplicate samples are removed."""} ) UpperCAmelCase : Optional[float] = field( default=0.85 ,metadata={"""help""": """Jaccard threshold for near-duplicate samples."""} ) @dataclass class _A : """simple docstring""" UpperCAmelCase : Optional[str] = field( default="""gpt2""" ,metadata={"""help""": """Base tokenizer to build new tokenizer from."""} ) UpperCAmelCase : Optional[str] = field( default="""transformersbook/codeparrot-train""" ,metadata={"""help""": """Dataset to train tokenizer on."""} ) UpperCAmelCase : Optional[str] = field(default="""content""" ,metadata={"""help""": """Column containing text data to process."""} ) UpperCAmelCase : Optional[int] = field(default=2_0_0_0_0_0 ,metadata={"""help""": """Number of examples to train tokenizer on."""} ) UpperCAmelCase : Optional[int] = field( default=3_2_7_6_8 ,metadata={"""help""": """Number of examples to train the tokenizer on."""} ) UpperCAmelCase : Optional[str] = field(default="""codeparrot""" ,metadata={"""help""": """Name of new tokenizer."""} ) UpperCAmelCase : Optional[bool] = field(default=_a ,metadata={"""help""": """Push saved tokenizer to the hub."""} ) @dataclass class _A : """simple docstring""" UpperCAmelCase : Optional[str] = field( default="""codeparrot/codeparrot""" ,metadata={"""help""": """Name or path to the tokenizer."""} ) UpperCAmelCase : Optional[str] = field( default="""codeparrot/codeparrot-clean-train""" ,metadata={"""help""": """Name or path to the dataset to pretokenize."""} ) UpperCAmelCase : Optional[str] = field( default="""tokenized-codeparrot-train""" ,metadata={"""help""": """Repo name of the pretokenized data."""} ) UpperCAmelCase : Optional[int] = field(default=_a ,metadata={"""help""": """Number of workers used for code evaluation."""} ) @dataclass class _A : """simple docstring""" UpperCAmelCase : Optional[str] = field( default="""gpt2-large""" ,metadata={"""help""": """Configuration to use for model initialization."""} ) UpperCAmelCase : Optional[str] = field( default="""codeparrot/codeparrot""" ,metadata={"""help""": """Tokenizer attached to model."""} ) UpperCAmelCase : Optional[str] = field(default="""codeparrot""" ,metadata={"""help""": """Name of the created model."""} ) UpperCAmelCase : Optional[bool] = field(default=_a ,metadata={"""help""": """Push saved tokenizer to the hub."""} )
40
"""simple docstring""" import gc import random import unittest import numpy as np import torch from diffusers import ( DDIMScheduler, KandinskyVaaControlnetPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _A ( _a ,unittest.TestCase ): """simple docstring""" UpperCAmelCase : Dict = KandinskyVaaControlnetPipeline UpperCAmelCase : List[str] = ["""image_embeds""", """negative_image_embeds""", """hint"""] UpperCAmelCase : Optional[Any] = ["""image_embeds""", """negative_image_embeds""", """hint"""] UpperCAmelCase : Dict = [ """generator""", """height""", """width""", """latents""", """guidance_scale""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] UpperCAmelCase : Optional[int] = False @property def __snake_case ( self : Optional[Any]): return 32 @property def __snake_case ( self : Dict): return 32 @property def __snake_case ( self : Dict): return self.time_input_dim @property def __snake_case ( self : Any): return self.time_input_dim * 4 @property def __snake_case ( self : str): return 100 @property def __snake_case ( self : str): torch.manual_seed(0) a : str = { "in_channels": 8, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image_hint", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } a : Dict = UNetaDConditionModel(**__UpperCAmelCase) return model @property def __snake_case ( self : str): return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def __snake_case ( self : Union[str, Any]): torch.manual_seed(0) a : Dict = VQModel(**self.dummy_movq_kwargs) return model def __snake_case ( self : Optional[Any]): a : Optional[Any] = self.dummy_unet a : int = self.dummy_movq a : str = DDIMScheduler( num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , steps_offset=1 , prediction_type="epsilon" , thresholding=__UpperCAmelCase , ) a : Optional[Any] = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def __snake_case ( self : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int=0): a : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase) a : List[str] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to( __UpperCAmelCase) # create hint a : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase) if str(__UpperCAmelCase).startswith("mps"): a : Union[str, Any] = torch.manual_seed(__UpperCAmelCase) else: a : List[Any] = torch.Generator(device=__UpperCAmelCase).manual_seed(__UpperCAmelCase) a : str = { "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "hint": hint, "generator": generator, "height": 64, "width": 64, "guidance_scale": 4.0, "num_inference_steps": 2, "output_type": "np", } return inputs def __snake_case ( self : Dict): a : str = "cpu" a : Tuple = self.get_dummy_components() a : Dict = self.pipeline_class(**__UpperCAmelCase) a : Optional[int] = pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) a : Optional[Any] = pipe(**self.get_dummy_inputs(__UpperCAmelCase)) a : Any = output.images a : Any = pipe( **self.get_dummy_inputs(__UpperCAmelCase) , return_dict=__UpperCAmelCase , )[0] a : Union[str, Any] = image[0, -3:, -3:, -1] a : int = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) a : Tuple = np.array( [0.6_959_826, 0.868_279, 0.7_558_092, 0.68_769_467, 0.85_805_804, 0.65_977_496, 0.44_885_302, 0.5_959_111, 0.4_251_595]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : Optional[int]): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __snake_case ( self : List[str]): a : List[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy") a : Union[str, Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/hint_image_cat.png") a : List[Any] = torch.from_numpy(np.array(__UpperCAmelCase)).float() / 255.0 a : str = hint.permute(2 , 0 , 1).unsqueeze(0) a : Optional[int] = KandinskyVaaPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa) pipe_prior.to(__UpperCAmelCase) a : List[str] = KandinskyVaaControlnetPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa) a : int = pipeline.to(__UpperCAmelCase) pipeline.set_progress_bar_config(disable=__UpperCAmelCase) a : Tuple = "A robot, 4k photo" a : Any = torch.Generator(device="cuda").manual_seed(0) a , a : int = pipe_prior( __UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple() a : str = torch.Generator(device="cuda").manual_seed(0) a : Union[str, Any] = pipeline( image_embeds=__UpperCAmelCase , negative_image_embeds=__UpperCAmelCase , hint=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=100 , output_type="np" , ) a : str = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase)
40
1
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor from .base import PipelineTool class _A ( _a ): """simple docstring""" UpperCAmelCase : List[Any] = """openai/whisper-base""" UpperCAmelCase : Tuple = ( """This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """ """transcribed text.""" ) UpperCAmelCase : List[Any] = """transcriber""" UpperCAmelCase : Any = WhisperProcessor UpperCAmelCase : List[str] = WhisperForConditionalGeneration UpperCAmelCase : Tuple = ["""audio"""] UpperCAmelCase : Tuple = ["""text"""] def __snake_case ( self : Dict , __UpperCAmelCase : str): return self.pre_processor(__UpperCAmelCase , return_tensors="pt").input_features def __snake_case ( self : Optional[Any] , __UpperCAmelCase : int): return self.model.generate(inputs=__UpperCAmelCase) def __snake_case ( self : Any , __UpperCAmelCase : Any): return self.pre_processor.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase)[0]
40
"""simple docstring""" import argparse import json from pathlib import Path import torch import torchaudio from datasets import load_dataset from huggingface_hub import hf_hub_download from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification from transformers.utils import logging logging.set_verbosity_info() __lowercase = logging.get_logger(__name__) def lowercase ( A_ )-> Dict: '''simple docstring''' a : str = ASTConfig() if "10-10" in model_name: pass elif "speech-commands" in model_name: a : Union[str, Any] = 128 elif "12-12" in model_name: a : List[Any] = 12 a : str = 12 elif "14-14" in model_name: a : List[Any] = 14 a : Optional[int] = 14 elif "16-16" in model_name: a : Any = 16 a : List[Any] = 16 else: raise ValueError("Model not supported" ) a : Optional[int] = "huggingface/label-files" if "speech-commands" in model_name: a : Optional[int] = 35 a : List[str] = "speech-commands-v2-id2label.json" else: a : Optional[Any] = 527 a : Tuple = "audioset-id2label.json" a : List[str] = json.load(open(hf_hub_download(A_ , A_ , repo_type="dataset" ) , "r" ) ) a : Union[str, Any] = {int(A_ ): v for k, v in idalabel.items()} a : Any = idalabel a : str = {v: k for k, v in idalabel.items()} return config def lowercase ( A_ )-> Tuple: '''simple docstring''' if "module.v" in name: a : Union[str, Any] = name.replace("module.v" , "audio_spectrogram_transformer" ) if "cls_token" in name: a : List[Any] = name.replace("cls_token" , "embeddings.cls_token" ) if "dist_token" in name: a : Union[str, Any] = name.replace("dist_token" , "embeddings.distillation_token" ) if "pos_embed" in name: a : str = name.replace("pos_embed" , "embeddings.position_embeddings" ) if "patch_embed.proj" in name: a : Union[str, Any] = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) # transformer blocks if "blocks" in name: a : Union[str, Any] = name.replace("blocks" , "encoder.layer" ) if "attn.proj" in name: a : str = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: a : Tuple = name.replace("attn" , "attention.self" ) if "norm1" in name: a : int = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: a : Union[str, Any] = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: a : Union[str, Any] = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: a : Optional[Any] = name.replace("mlp.fc2" , "output.dense" ) # final layernorm if "audio_spectrogram_transformer.norm" in name: a : Tuple = name.replace("audio_spectrogram_transformer.norm" , "audio_spectrogram_transformer.layernorm" ) # classifier head if "module.mlp_head.0" in name: a : List[str] = name.replace("module.mlp_head.0" , "classifier.layernorm" ) if "module.mlp_head.1" in name: a : Optional[int] = name.replace("module.mlp_head.1" , "classifier.dense" ) return name def lowercase ( A_ , A_ )-> Any: '''simple docstring''' for key in orig_state_dict.copy().keys(): a : str = orig_state_dict.pop(A_ ) if "qkv" in key: a : int = key.split("." ) a : Optional[int] = int(key_split[3] ) a : int = config.hidden_size if "weight" in key: a : List[str] = val[:dim, :] a : Any = val[dim : dim * 2, :] a : int = val[-dim:, :] else: a : Optional[Any] = val[:dim] a : Union[str, Any] = val[dim : dim * 2] a : str = val[-dim:] else: a : str = val return orig_state_dict def lowercase ( A_ )-> Dict: '''simple docstring''' a : Union[str, Any] = [ "module.v.head.weight", "module.v.head.bias", "module.v.head_dist.weight", "module.v.head_dist.bias", ] for k in ignore_keys: state_dict.pop(A_ , A_ ) @torch.no_grad() def lowercase ( A_ , A_ , A_=False )-> Optional[int]: '''simple docstring''' a : Optional[int] = get_audio_spectrogram_transformer_config(A_ ) a : Dict = { "ast-finetuned-audioset-10-10-0.4593": ( "https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.450": ( "https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.448": ( "https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.448-v2": ( "https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1" ), "ast-finetuned-audioset-12-12-0.447": ( "https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1" ), "ast-finetuned-audioset-14-14-0.443": ( "https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1" ), "ast-finetuned-audioset-16-16-0.442": ( "https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1" ), "ast-finetuned-speech-commands-v2": ( "https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1" ), } # load original state_dict a : Any = model_name_to_url[model_name] a : List[Any] = torch.hub.load_state_dict_from_url(A_ , map_location="cpu" ) # remove some keys remove_keys(A_ ) # rename some keys a : Union[str, Any] = convert_state_dict(A_ , A_ ) # load 🤗 model a : List[str] = ASTForAudioClassification(A_ ) model.eval() model.load_state_dict(A_ ) # verify outputs on dummy input # source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62 a : Tuple = -4.2_6_7_7_3_9_3 if "speech-commands" not in model_name else -6.8_4_5_9_7_8 a : Union[str, Any] = 4.5_6_8_9_9_7_4 if "speech-commands" not in model_name else 5.5_6_5_4_5_2_6 a : str = 1_024 if "speech-commands" not in model_name else 128 a : List[Any] = ASTFeatureExtractor(mean=A_ , std=A_ , max_length=A_ ) if "speech-commands" in model_name: a : List[str] = load_dataset("speech_commands" , "v0.02" , split="validation" ) a : int = dataset[0]["audio"]["array"] else: a : Tuple = hf_hub_download( repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" , ) a , a : Tuple = torchaudio.load(A_ ) a : Optional[Any] = waveform.squeeze().numpy() a : Union[str, Any] = feature_extractor(A_ , sampling_rate=16_000 , return_tensors="pt" ) # forward pass a : Optional[Any] = model(**A_ ) a : List[str] = outputs.logits if model_name == "ast-finetuned-audioset-10-10-0.4593": a : Any = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] ) elif model_name == "ast-finetuned-audioset-10-10-0.450": a : Optional[int] = torch.tensor([-1.1_9_8_6, -7.0_9_0_3, -8.2_7_1_8] ) elif model_name == "ast-finetuned-audioset-10-10-0.448": a : List[str] = torch.tensor([-2.6_1_2_8, -8.0_0_8_0, -9.4_3_4_4] ) elif model_name == "ast-finetuned-audioset-10-10-0.448-v2": a : Tuple = torch.tensor([-1.5_0_8_0, -7.4_5_3_4, -8.8_9_1_7] ) elif model_name == "ast-finetuned-audioset-12-12-0.447": a : int = torch.tensor([-0.5_0_5_0, -6.5_8_3_3, -8.0_8_4_3] ) elif model_name == "ast-finetuned-audioset-14-14-0.443": a : Any = torch.tensor([-0.3_8_2_6, -7.0_3_3_6, -8.2_4_1_3] ) elif model_name == "ast-finetuned-audioset-16-16-0.442": a : Dict = torch.tensor([-1.2_1_1_3, -6.9_1_0_1, -8.3_4_7_0] ) elif model_name == "ast-finetuned-speech-commands-v2": a : Union[str, Any] = torch.tensor([6.1_5_8_9, -8.0_5_6_6, -8.7_9_8_4] ) else: raise ValueError("Unknown model name" ) if not torch.allclose(logits[0, :3] , A_ , atol=1e-4 ): raise ValueError("Logits don't match" ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(A_ ).mkdir(exist_ok=A_ ) print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(A_ ) print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' ) feature_extractor.save_pretrained(A_ ) if push_to_hub: print("Pushing model and feature extractor to the hub..." ) model.push_to_hub(F'''MIT/{model_name}''' ) feature_extractor.push_to_hub(F'''MIT/{model_name}''' ) if __name__ == "__main__": __lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""ast-finetuned-audioset-10-10-0.4593""", type=str, help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) __lowercase = parser.parse_args() convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
40
1
"""simple docstring""" import warnings from ..trainer import Trainer from ..utils import logging __lowercase = logging.get_logger(__name__) class _A ( _a ): """simple docstring""" def __init__( self : str , __UpperCAmelCase : Union[str, Any]=None , **__UpperCAmelCase : Union[str, Any]): warnings.warn( "`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` " "instead." , __UpperCAmelCase , ) super().__init__(args=__UpperCAmelCase , **__UpperCAmelCase)
40
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __lowercase = { """configuration_rag""": ["""RagConfig"""], """retrieval_rag""": ["""RagRetriever"""], """tokenization_rag""": ["""RagTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """RagModel""", """RagPreTrainedModel""", """RagSequenceForGeneration""", """RagTokenForGeneration""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """TFRagModel""", """TFRagPreTrainedModel""", """TFRagSequenceForGeneration""", """TFRagTokenForGeneration""", ] if TYPE_CHECKING: from .configuration_rag import RagConfig from .retrieval_rag import RagRetriever from .tokenization_rag import RagTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rag import ( TFRagModel, TFRagPreTrainedModel, TFRagSequenceForGeneration, TFRagTokenForGeneration, ) else: import sys __lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
40
1
"""simple docstring""" def lowercase ( A_ , A_ )-> int: '''simple docstring''' while second != 0: a : List[str] = first & second first ^= second a : Union[str, Any] = c << 1 return first if __name__ == "__main__": import doctest doctest.testmod() __lowercase = int(input("""Enter the first number: """).strip()) __lowercase = int(input("""Enter the second number: """).strip()) print(f'''{add(first, second) = }''')
40
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _A ( _a ,_a ,_a ,unittest.TestCase ): """simple docstring""" UpperCAmelCase : str = StableDiffusionInpaintPipeline UpperCAmelCase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS UpperCAmelCase : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS UpperCAmelCase : Union[str, Any] = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess UpperCAmelCase : int = frozenset([] ) def __snake_case ( self : Dict): torch.manual_seed(0) a : List[Any] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__UpperCAmelCase , ) a : Tuple = PNDMScheduler(skip_prk_steps=__UpperCAmelCase) torch.manual_seed(0) a : str = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0) a : List[str] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , ) a : Any = CLIPTextModel(__UpperCAmelCase) a : List[str] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") a : Optional[Any] = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Union[str, Any]=0): # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched a : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase) a : List[str] = image.cpu().permute(0 , 2 , 3 , 1)[0] a : Union[str, Any] = Image.fromarray(np.uinta(__UpperCAmelCase)).convert("RGB").resize((64, 64)) a : Dict = Image.fromarray(np.uinta(image + 4)).convert("RGB").resize((64, 64)) if str(__UpperCAmelCase).startswith("mps"): a : Tuple = torch.manual_seed(__UpperCAmelCase) else: a : Tuple = torch.Generator(device=__UpperCAmelCase).manual_seed(__UpperCAmelCase) a : Optional[Any] = { "prompt": "A painting of a squirrel eating a burger", "image": init_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def __snake_case ( self : List[str]): a : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator a : Tuple = self.get_dummy_components() a : Optional[int] = StableDiffusionInpaintPipeline(**__UpperCAmelCase) a : int = sd_pipe.to(__UpperCAmelCase) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase) a : Any = self.get_dummy_inputs(__UpperCAmelCase) a : Optional[int] = sd_pipe(**__UpperCAmelCase).images a : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) a : int = np.array([0.4_727, 0.5_735, 0.3_941, 0.5_446, 0.5_926, 0.4_394, 0.5_062, 0.4_654, 0.4_476]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def __snake_case ( self : str): super().test_inference_batch_single_identical(expected_max_diff=3e-3) @slow @require_torch_gpu class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : Union[str, Any]): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __snake_case ( self : Dict): a : str = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png") a : str = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png") a : List[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint" "/yellow_cat_sitting_on_a_park_bench.npy") a : Tuple = "stabilityai/stable-diffusion-2-inpainting" a : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(__UpperCAmelCase , safety_checker=__UpperCAmelCase) pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) pipe.enable_attention_slicing() a : Any = "Face of a yellow cat, high resolution, sitting on a park bench" a : str = torch.manual_seed(0) a : Union[str, Any] = pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="np" , ) a : List[str] = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image).max() < 9e-3 def __snake_case ( self : Any): a : str = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png") a : Optional[Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png") a : str = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint" "/yellow_cat_sitting_on_a_park_bench_fp16.npy") a : Optional[Any] = "stabilityai/stable-diffusion-2-inpainting" a : Any = StableDiffusionInpaintPipeline.from_pretrained( __UpperCAmelCase , torch_dtype=torch.floataa , safety_checker=__UpperCAmelCase , ) pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) pipe.enable_attention_slicing() a : Optional[int] = "Face of a yellow cat, high resolution, sitting on a park bench" a : Dict = torch.manual_seed(0) a : List[Any] = pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="np" , ) a : Optional[Any] = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image).max() < 5e-1 def __snake_case ( self : int): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() a : Tuple = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png") a : Optional[int] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png") a : Optional[Any] = "stabilityai/stable-diffusion-2-inpainting" a : Optional[int] = PNDMScheduler.from_pretrained(__UpperCAmelCase , subfolder="scheduler") a : int = StableDiffusionInpaintPipeline.from_pretrained( __UpperCAmelCase , safety_checker=__UpperCAmelCase , scheduler=__UpperCAmelCase , torch_dtype=torch.floataa , ) pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() a : Optional[int] = "Face of a yellow cat, high resolution, sitting on a park bench" a : Optional[int] = torch.manual_seed(0) a : str = pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type="np" , ) a : int = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.65 * 10**9
40
1
"""simple docstring""" import argparse import os import re import packaging.version __lowercase = """examples/""" __lowercase = { """examples""": (re.compile(R"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""), """init""": (re.compile(R"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""), """setup""": (re.compile(R"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), R"""\1version=\"VERSION\","""), """doc""": (re.compile(R"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""), } __lowercase = { """init""": """src/diffusers/__init__.py""", """setup""": """setup.py""", } __lowercase = """README.md""" def lowercase ( A_ , A_ , A_ )-> Dict: '''simple docstring''' with open(A_ , "r" , encoding="utf-8" , newline="\n" ) as f: a : str = f.read() a , a : Union[str, Any] = REPLACE_PATTERNS[pattern] a : Optional[Any] = replace.replace("VERSION" , A_ ) a : Tuple = re_pattern.sub(A_ , A_ ) with open(A_ , "w" , encoding="utf-8" , newline="\n" ) as f: f.write(A_ ) def lowercase ( A_ )-> Optional[Any]: '''simple docstring''' for folder, directories, fnames in os.walk(A_ ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("research_projects" ) if "legacy" in directories: directories.remove("legacy" ) for fname in fnames: if fname.endswith(".py" ): update_version_in_file(os.path.join(A_ , A_ ) , A_ , pattern="examples" ) def lowercase ( A_ , A_=False )-> List[Any]: '''simple docstring''' for pattern, fname in REPLACE_FILES.items(): update_version_in_file(A_ , A_ , A_ ) if not patch: update_version_in_examples(A_ ) def lowercase ( )-> Tuple: '''simple docstring''' a : str = "🤗 Transformers currently provides the following architectures" a : List[str] = "1. Want to contribute a new model?" with open(A_ , "r" , encoding="utf-8" , newline="\n" ) as f: a : Optional[int] = f.readlines() # Find the start of the list. a : int = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 a : List[Any] = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith("1." ): a : Optional[Any] = lines[index].replace( "https://huggingface.co/docs/diffusers/main/model_doc" , "https://huggingface.co/docs/diffusers/model_doc" , ) index += 1 with open(A_ , "w" , encoding="utf-8" , newline="\n" ) as f: f.writelines(A_ ) def lowercase ( )-> int: '''simple docstring''' with open(REPLACE_FILES["init"] , "r" ) as f: a : Any = f.read() a : str = REPLACE_PATTERNS["init"][0].search(A_ ).groups()[0] return packaging.version.parse(A_ ) def lowercase ( A_=False )-> Optional[Any]: '''simple docstring''' a : int = get_version() if patch and default_version.is_devrelease: raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" ) if default_version.is_devrelease: a : Dict = default_version.base_version elif patch: a : Any = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}''' else: a : int = F'''{default_version.major}.{default_version.minor + 1}.0''' # Now let's ask nicely if that's the right one. a : List[Any] = input(F'''Which version are you releasing? [{default_version}]''' ) if len(A_ ) == 0: a : Dict = default_version print(F'''Updating version to {version}.''' ) global_version_update(A_ , patch=A_ ) def lowercase ( )-> Union[str, Any]: '''simple docstring''' a : Dict = get_version() a : Union[str, Any] = F'''{current_version.major}.{current_version.minor + 1}.0.dev0''' a : Optional[int] = current_version.base_version # Check with the user we got that right. a : Any = input(F'''Which version are we developing now? [{dev_version}]''' ) if len(A_ ) == 0: a : List[Any] = dev_version print(F'''Updating version to {version}.''' ) global_version_update(A_ ) # print("Cleaning main README, don't forget to run `make fix-copies`.") # clean_main_ref_in_model_list() if __name__ == "__main__": __lowercase = argparse.ArgumentParser() parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""") parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""") __lowercase = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print("""Nothing to do after a patch :-)""") else: post_release_work()
40
"""simple docstring""" def lowercase ( A_ )-> bool: '''simple docstring''' if not all(x.isalpha() for x in string ): raise ValueError("String must only contain alphabetic characters." ) a : Tuple = sorted(string.lower() ) return len(A_ ) == len(set(A_ ) ) if __name__ == "__main__": __lowercase = input("""Enter a string """).strip() __lowercase = is_isogram(input_str) print(f'''{input_str} is {'an' if isogram else 'not an'} isogram.''')
40
1
"""simple docstring""" def lowercase ( A_ , A_ , A_ , A_ )-> bool: '''simple docstring''' if graph[path[curr_ind - 1]][next_ver] == 0: return False # 2. Validate that next vertex is not already in path return not any(vertex == next_ver for vertex in path ) def lowercase ( A_ , A_ , A_ )-> bool: '''simple docstring''' if curr_ind == len(A_ ): # return whether path exists between current and starting vertices return graph[path[curr_ind - 1]][path[0]] == 1 # Recursive Step for next_ver in range(0 , len(A_ ) ): if valid_connection(A_ , A_ , A_ , A_ ): # Insert current vertex into path as next transition a : List[Any] = next_ver # Validate created path if util_hamilton_cycle(A_ , A_ , curr_ind + 1 ): return True # Backtrack a : List[str] = -1 return False def lowercase ( A_ , A_ = 0 )-> list[int]: '''simple docstring''' a : Dict = [-1] * (len(A_ ) + 1) # initialize start and end of path with starting index a : Union[str, Any] = start_index # evaluate and if we find answer return path either return empty array return path if util_hamilton_cycle(A_ , A_ , 1 ) else []
40
"""simple docstring""" import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast __lowercase = datasets.utils.logging.get_logger(__name__) @dataclass class _A ( datasets.BuilderConfig ): """simple docstring""" UpperCAmelCase : int = 1_0_0_0_0 UpperCAmelCase : Optional[List[str]] = None UpperCAmelCase : Optional[datasets.Features] = None class _A ( datasets.ArrowBasedBuilder ): """simple docstring""" UpperCAmelCase : str = ParquetConfig def __snake_case ( self : Tuple): return datasets.DatasetInfo(features=self.config.features) def __snake_case ( self : List[Any] , __UpperCAmelCase : str): if not self.config.data_files: raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''') a : str = dl_manager.download_and_extract(self.config.data_files) if isinstance(__UpperCAmelCase , (str, list, tuple)): a : Dict = data_files if isinstance(__UpperCAmelCase , __UpperCAmelCase): a : str = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive a : List[Any] = [dl_manager.iter_files(__UpperCAmelCase) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files})] a : Dict = [] for split_name, files in data_files.items(): if isinstance(__UpperCAmelCase , __UpperCAmelCase): a : Optional[int] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive a : Tuple = [dl_manager.iter_files(__UpperCAmelCase) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(__UpperCAmelCase): with open(__UpperCAmelCase , "rb") as f: a : Tuple = datasets.Features.from_arrow_schema(pq.read_schema(__UpperCAmelCase)) break splits.append(datasets.SplitGenerator(name=__UpperCAmelCase , gen_kwargs={"files": files})) return splits def __snake_case ( self : List[str] , __UpperCAmelCase : pa.Table): if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example a : Optional[int] = table_cast(__UpperCAmelCase , self.info.features.arrow_schema) return pa_table def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int): a : Tuple = self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema) != sorted(self.config.columns): raise ValueError( f'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''') for file_idx, file in enumerate(itertools.chain.from_iterable(__UpperCAmelCase)): with open(__UpperCAmelCase , "rb") as f: a : Tuple = pq.ParquetFile(__UpperCAmelCase) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns)): a : Optional[Any] = pa.Table.from_batches([record_batch]) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield f'''{file_idx}_{batch_idx}''', self._cast_table(__UpperCAmelCase) except ValueError as e: logger.error(f'''Failed to read file \'{file}\' with error {type(__UpperCAmelCase)}: {e}''') raise
40
1
"""simple docstring""" from __future__ import annotations import math def lowercase ( A_ , A_ )-> list: '''simple docstring''' if len(A_ ) != 2 or len(a[0] ) != 2 or len(A_ ) != 2 or len(b[0] ) != 2: raise Exception("Matrices are not 2x2" ) a : Dict = [ [a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]], [a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]], ] return new_matrix def lowercase ( A_ , A_ )-> Optional[int]: '''simple docstring''' return [ [matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )] for row in range(len(A_ ) ) ] def lowercase ( A_ , A_ )-> Tuple: '''simple docstring''' return [ [matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )] for row in range(len(A_ ) ) ] def lowercase ( A_ )-> tuple[list, list, list, list]: '''simple docstring''' if len(A_ ) % 2 != 0 or len(a[0] ) % 2 != 0: raise Exception("Odd matrices are not supported!" ) a : Optional[int] = len(A_ ) a : str = matrix_length // 2 a : Dict = [[a[i][j] for j in range(A_ , A_ )] for i in range(A_ )] a : Optional[Any] = [ [a[i][j] for j in range(A_ , A_ )] for i in range(A_ , A_ ) ] a : Dict = [[a[i][j] for j in range(A_ )] for i in range(A_ )] a : Optional[Any] = [[a[i][j] for j in range(A_ )] for i in range(A_ , A_ )] return top_left, top_right, bot_left, bot_right def lowercase ( A_ )-> tuple[int, int]: '''simple docstring''' return len(A_ ), len(matrix[0] ) def lowercase ( A_ )-> None: '''simple docstring''' print("\n".join(str(A_ ) for line in matrix ) ) def lowercase ( A_ , A_ )-> list: '''simple docstring''' if matrix_dimensions(A_ ) == (2, 2): return default_matrix_multiplication(A_ , A_ ) a , a , a , a : List[str] = split_matrix(A_ ) a , a , a , a : Any = split_matrix(A_ ) a : List[str] = actual_strassen(A_ , matrix_subtraction(A_ , A_ ) ) a : Optional[Any] = actual_strassen(matrix_addition(A_ , A_ ) , A_ ) a : int = actual_strassen(matrix_addition(A_ , A_ ) , A_ ) a : List[Any] = actual_strassen(A_ , matrix_subtraction(A_ , A_ ) ) a : List[str] = actual_strassen(matrix_addition(A_ , A_ ) , matrix_addition(A_ , A_ ) ) a : Dict = actual_strassen(matrix_subtraction(A_ , A_ ) , matrix_addition(A_ , A_ ) ) a : str = actual_strassen(matrix_subtraction(A_ , A_ ) , matrix_addition(A_ , A_ ) ) a : Optional[Any] = matrix_addition(matrix_subtraction(matrix_addition(A_ , A_ ) , A_ ) , A_ ) a : Union[str, Any] = matrix_addition(A_ , A_ ) a : Optional[Any] = matrix_addition(A_ , A_ ) a : Any = matrix_subtraction(matrix_subtraction(matrix_addition(A_ , A_ ) , A_ ) , A_ ) # construct the new matrix from our 4 quadrants a : Optional[Any] = [] for i in range(len(A_ ) ): new_matrix.append(top_left[i] + top_right[i] ) for i in range(len(A_ ) ): new_matrix.append(bot_left[i] + bot_right[i] ) return new_matrix def lowercase ( A_ , A_ )-> list: '''simple docstring''' if matrix_dimensions(A_ )[1] != matrix_dimensions(A_ )[0]: a : str = ( "Unable to multiply these matrices, please check the dimensions.\n" F'''Matrix A: {matrixa}\n''' F'''Matrix B: {matrixa}''' ) raise Exception(A_ ) a : Any = matrix_dimensions(A_ ) a : Optional[Any] = matrix_dimensions(A_ ) if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]: return [matrixa, matrixa] a : Optional[Any] = max(*A_ , *A_ ) a : Optional[Any] = int(math.pow(2 , math.ceil(math.loga(A_ ) ) ) ) a : Union[str, Any] = matrixa a : Optional[int] = matrixa # Adding zeros to the matrices so that the arrays dimensions are the same and also # power of 2 for i in range(0 , A_ ): if i < dimensiona[0]: for _ in range(dimensiona[1] , A_ ): new_matrixa[i].append(0 ) else: new_matrixa.append([0] * maxim ) if i < dimensiona[0]: for _ in range(dimensiona[1] , A_ ): new_matrixa[i].append(0 ) else: new_matrixa.append([0] * maxim ) a : Dict = actual_strassen(A_ , A_ ) # Removing the additional zeros for i in range(0 , A_ ): if i < dimensiona[0]: for _ in range(dimensiona[1] , A_ ): final_matrix[i].pop() else: final_matrix.pop() return final_matrix if __name__ == "__main__": __lowercase = [ [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 2, 3, 1], ] __lowercase = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]] print(strassen(matrixa, matrixa))
40
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __lowercase = logging.get_logger(__name__) __lowercase = { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json""" ), """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json""" ), """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json""" ), } class _A ( _a ): """simple docstring""" UpperCAmelCase : int = """dpr""" def __init__( self : List[Any] , __UpperCAmelCase : int=30522 , __UpperCAmelCase : Union[str, Any]=768 , __UpperCAmelCase : Dict=12 , __UpperCAmelCase : List[str]=12 , __UpperCAmelCase : Any=3072 , __UpperCAmelCase : Optional[int]="gelu" , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : str=512 , __UpperCAmelCase : List[str]=2 , __UpperCAmelCase : Tuple=0.02 , __UpperCAmelCase : List[str]=1e-12 , __UpperCAmelCase : List[str]=0 , __UpperCAmelCase : str="absolute" , __UpperCAmelCase : int = 0 , **__UpperCAmelCase : Tuple , ): super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase) a : List[Any] = vocab_size a : Optional[Any] = hidden_size a : Union[str, Any] = num_hidden_layers a : Dict = num_attention_heads a : int = hidden_act a : Any = intermediate_size a : Any = hidden_dropout_prob a : Dict = attention_probs_dropout_prob a : Any = max_position_embeddings a : Union[str, Any] = type_vocab_size a : Optional[Any] = initializer_range a : Dict = layer_norm_eps a : int = projection_dim a : str = position_embedding_type
40
1
"""simple docstring""" def lowercase ( A_ = 10**12 )-> int: '''simple docstring''' a : List[str] = 1 a : Optional[Any] = 0 a : Any = 1 a : Optional[int] = 1 while numerator <= 2 * min_total - 1: prev_numerator += 2 * numerator numerator += 2 * prev_numerator prev_denominator += 2 * denominator denominator += 2 * prev_denominator return (denominator + 1) // 2 if __name__ == "__main__": print(f'''{solution() = }''')
40
"""simple docstring""" class _A : """simple docstring""" def __init__( self : int , __UpperCAmelCase : int): a : Tuple = size a : Dict = [0] * size a : Optional[int] = [0] * size @staticmethod def __snake_case ( __UpperCAmelCase : int): return index | (index + 1) @staticmethod def __snake_case ( __UpperCAmelCase : int): return (index & (index + 1)) - 1 def __snake_case ( self : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : int): a : Union[str, Any] = value while index < self.size: a : Dict = self.get_prev(__UpperCAmelCase) + 1 if current_left_border == index: a : Optional[int] = value else: a : Any = max(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) a : Optional[int] = self.get_next(__UpperCAmelCase) def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : int): right -= 1 # Because of right is exclusive a : List[str] = 0 while left <= right: a : Dict = self.get_prev(__UpperCAmelCase) if left <= current_left: a : Optional[int] = max(__UpperCAmelCase , self.tree[right]) a : Optional[Any] = current_left else: a : List[str] = max(__UpperCAmelCase , self.arr[right]) right -= 1 return result if __name__ == "__main__": import doctest doctest.testmod()
40
1
"""simple docstring""" import tempfile import unittest from make_student import create_student_by_copying_alternating_layers from transformers import AutoConfig from transformers.file_utils import cached_property from transformers.testing_utils import require_torch __lowercase = """sshleifer/bart-tiny-random""" __lowercase = """patrickvonplaten/t5-tiny-random""" @require_torch class _A ( unittest.TestCase ): """simple docstring""" @cached_property def __snake_case ( self : Any): return AutoConfig.from_pretrained(__UpperCAmelCase) def __snake_case ( self : str): a , *a : Optional[int] = create_student_by_copying_alternating_layers(__UpperCAmelCase , tempfile.mkdtemp() , e=1 , d=1) self.assertEqual(student.config.num_hidden_layers , 1) def __snake_case ( self : List[str]): a , *a : Union[str, Any] = create_student_by_copying_alternating_layers(__UpperCAmelCase , tempfile.mkdtemp() , e=1 , d=__UpperCAmelCase) def __snake_case ( self : Union[str, Any]): a , *a : Optional[int] = create_student_by_copying_alternating_layers(__UpperCAmelCase , tempfile.mkdtemp() , e=1 , d=__UpperCAmelCase) self.assertEqual(student.config.encoder_layers , 1) self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers) def __snake_case ( self : List[str]): a , *a : Union[str, Any] = create_student_by_copying_alternating_layers(__UpperCAmelCase , tempfile.mkdtemp() , e=1 , d=1) self.assertEqual(student.config.encoder_layers , 1) self.assertEqual(student.config.decoder_layers , 1) def __snake_case ( self : List[Any]): with self.assertRaises(__UpperCAmelCase): create_student_by_copying_alternating_layers(__UpperCAmelCase , tempfile.mkdtemp() , e=__UpperCAmelCase , d=__UpperCAmelCase)
40
"""simple docstring""" import unittest from knapsack import knapsack as k class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : List[Any]): a : str = 0 a : Optional[int] = [0] a : Union[str, Any] = [0] a : Any = len(__UpperCAmelCase) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 0) a : List[str] = [60] a : str = [10] a : Optional[int] = len(__UpperCAmelCase) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 0) def __snake_case ( self : Optional[int]): a : Any = 3 a : str = [1, 2, 3] a : Tuple = [3, 2, 1] a : Any = len(__UpperCAmelCase) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 5) def __snake_case ( self : Tuple): a : int = 50 a : List[Any] = [60, 100, 120] a : Optional[int] = [10, 20, 30] a : str = len(__UpperCAmelCase) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 220) if __name__ == "__main__": unittest.main()
40
1
"""simple docstring""" from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available from .timesteps import ( fastaa_timesteps, smartaa_timesteps, smartaa_timesteps, smartaaa_timesteps, smartaaa_timesteps, superaa_timesteps, superaa_timesteps, superaaa_timesteps, ) @dataclass class _A ( _a ): """simple docstring""" UpperCAmelCase : Union[List[PIL.Image.Image], np.ndarray] UpperCAmelCase : Optional[List[bool]] UpperCAmelCase : Optional[List[bool]] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_if import IFPipeline from .pipeline_if_imgaimg import IFImgaImgPipeline from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline from .pipeline_if_inpainting import IFInpaintingPipeline from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline from .pipeline_if_superresolution import IFSuperResolutionPipeline from .safety_checker import IFSafetyChecker from .watermark import IFWatermarker
40
"""simple docstring""" import os import unittest from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _A ( _a ,unittest.TestCase ): """simple docstring""" UpperCAmelCase : str = LayoutLMTokenizer UpperCAmelCase : int = LayoutLMTokenizerFast UpperCAmelCase : Union[str, Any] = True UpperCAmelCase : Optional[Any] = True def __snake_case ( self : Optional[int]): super().setUp() a : Tuple = [ "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] a : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) def __snake_case ( self : Optional[int] , **__UpperCAmelCase : Tuple): return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase) def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : str): a : Tuple = "UNwant\u00E9d,running" a : Dict = "unwanted, running" return input_text, output_text def __snake_case ( self : Any): a : List[Any] = self.tokenizer_class(self.vocab_file) a : str = tokenizer.tokenize("UNwant\u00E9d,running") self.assertListEqual(__UpperCAmelCase , ["un", "##want", "##ed", ",", "runn", "##ing"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase) , [7, 4, 5, 10, 8, 9]) def __snake_case ( self : Dict): pass
40
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __lowercase = logging.get_logger(__name__) __lowercase = { """sayakpaul/vit-msn-base""": """https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json""", # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn } class _A ( _a ): """simple docstring""" UpperCAmelCase : Union[str, Any] = """vit_msn""" def __init__( self : List[str] , __UpperCAmelCase : Optional[int]=768 , __UpperCAmelCase : Optional[int]=12 , __UpperCAmelCase : Union[str, Any]=12 , __UpperCAmelCase : int=3072 , __UpperCAmelCase : List[Any]="gelu" , __UpperCAmelCase : Dict=0.0 , __UpperCAmelCase : Optional[int]=0.0 , __UpperCAmelCase : Optional[int]=0.02 , __UpperCAmelCase : int=1e-06 , __UpperCAmelCase : int=224 , __UpperCAmelCase : List[str]=16 , __UpperCAmelCase : Union[str, Any]=3 , __UpperCAmelCase : Dict=True , **__UpperCAmelCase : str , ): super().__init__(**__UpperCAmelCase) a : Union[str, Any] = hidden_size a : Dict = num_hidden_layers a : List[Any] = num_attention_heads a : Any = intermediate_size a : Union[str, Any] = hidden_act a : Optional[int] = hidden_dropout_prob a : Union[str, Any] = attention_probs_dropout_prob a : Optional[Any] = initializer_range a : List[str] = layer_norm_eps a : List[Any] = image_size a : Optional[Any] = patch_size a : Any = num_channels a : Optional[Any] = qkv_bias
40
"""simple docstring""" def lowercase ( A_ )-> str: '''simple docstring''' if isinstance(A_ , A_ ): raise TypeError("'float' object cannot be interpreted as an integer" ) if isinstance(A_ , A_ ): raise TypeError("'str' object cannot be interpreted as an integer" ) if num == 0: return "0b0" a : Optional[Any] = False if num < 0: a : Tuple = True a : str = -num a : list[int] = [] while num > 0: binary.insert(0 , num % 2 ) num >>= 1 if negative: return "-0b" + "".join(str(A_ ) for e in binary ) return "0b" + "".join(str(A_ ) for e in binary ) if __name__ == "__main__": import doctest doctest.testmod()
40
1
"""simple docstring""" from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging __lowercase = logging.get_logger(__name__) __lowercase = { """EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""", # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class _A ( _a ): """simple docstring""" UpperCAmelCase : Tuple = """gpt_neo""" UpperCAmelCase : str = ["""past_key_values"""] UpperCAmelCase : Optional[Any] = {"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""} def __init__( self : Optional[Any] , __UpperCAmelCase : Optional[Any]=50257 , __UpperCAmelCase : int=2048 , __UpperCAmelCase : List[Any]=2048 , __UpperCAmelCase : int=24 , __UpperCAmelCase : Optional[Any]=[[["global", "local"], 12]] , __UpperCAmelCase : Optional[int]=16 , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Optional[int]=256 , __UpperCAmelCase : Union[str, Any]="gelu_new" , __UpperCAmelCase : Dict=0.0 , __UpperCAmelCase : Dict=0.0 , __UpperCAmelCase : Any=0.0 , __UpperCAmelCase : Optional[Any]=0.1 , __UpperCAmelCase : str=1e-5 , __UpperCAmelCase : List[Any]=0.02 , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : List[Any]=50256 , __UpperCAmelCase : Optional[Any]=50256 , **__UpperCAmelCase : List[str] , ): a : List[Any] = vocab_size a : Optional[int] = max_position_embeddings a : Tuple = hidden_size a : Optional[Any] = num_layers a : Optional[int] = num_heads a : Optional[int] = intermediate_size a : List[str] = window_size a : Union[str, Any] = activation_function a : Union[str, Any] = resid_dropout a : List[Any] = embed_dropout a : Any = attention_dropout a : List[str] = classifier_dropout a : Any = layer_norm_epsilon a : Union[str, Any] = initializer_range a : Dict = use_cache a : Any = bos_token_id a : List[str] = eos_token_id a : Any = attention_types a : Dict = self.expand_attention_types_params(__UpperCAmelCase) if len(self.attention_layers) != self.num_layers: raise ValueError( "Configuration for convolutional module is incorrect. " "It is required that `len(config.attention_layers)` == `config.num_layers` " f'''but is `len(config.attention_layers) = {len(self.attention_layers)}`, ''' f'''`config.num_layers = {self.num_layers}`. ''' "`config.attention_layers` is prepared using `config.attention_types`. " "Please verify the value of `config.attention_types` argument.") super().__init__(bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase) @staticmethod def __snake_case ( __UpperCAmelCase : Tuple): a : List[str] = [] for item in attention_types: for _ in range(item[1]): attentions.extend(item[0]) return attentions def lowercase ( A_ , A_ , A_ , A_ )-> str: '''simple docstring''' import torch a : Dict = input.size() a : Any = len(A_ ) a : Optional[int] = shape[dimension] a : Optional[Any] = torch.arange(0 , A_ , A_ ) a : Tuple = torch.div(sizedim - size , A_ , rounding_mode="floor" ) + 1 a : Optional[Any] = torch.arange(A_ ) + low_indices[:min_length][:, None] a : List[str] = [slice(A_ )] * rank a : Union[str, Any] = indices a : str = input[s] a : int = list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(A_ ) def lowercase ( A_ , A_ )-> str: '''simple docstring''' import torch a : Tuple = torch.arange(1 , A_ ) a : str = torch.remainder(A_ , A_ ) a : Optional[Any] = remainders == 0 a : Tuple = candidates[divisor_indices] a : List[Any] = torch.max(A_ ) return largest_divisor, torch.div(A_ , A_ , rounding_mode="floor" ) class _A ( _a ): """simple docstring""" @property def __snake_case ( self : Tuple): a : str = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}}) if self.use_past: self.fill_with_past_key_values_(__UpperCAmelCase , direction="inputs") a : List[Any] = {0: "batch", 1: "past_sequence + sequence"} else: a : Union[str, Any] = {0: "batch", 1: "sequence"} return common_inputs @property def __snake_case ( self : Any): return self._config.num_heads def __snake_case ( self : Optional[Any] , __UpperCAmelCase : PreTrainedTokenizer , __UpperCAmelCase : int = -1 , __UpperCAmelCase : int = -1 , __UpperCAmelCase : bool = False , __UpperCAmelCase : Optional[TensorType] = None , ): a : Optional[int] = super(__UpperCAmelCase , self).generate_dummy_inputs( __UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase) # We need to order the input in the way they appears in the forward() a : Dict = OrderedDict({"input_ids": common_inputs["input_ids"]}) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.") else: import torch a , a : Dict = common_inputs["input_ids"].shape # Not using the same length for past_key_values a : Tuple = seqlen + 2 a : Tuple = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) a : Union[str, Any] = [ (torch.zeros(__UpperCAmelCase), torch.zeros(__UpperCAmelCase)) for _ in range(self.num_layers) ] a : List[Any] = common_inputs["attention_mask"] if self.use_past: a : Optional[int] = ordered_inputs["attention_mask"].dtype a : Tuple = torch.cat( [ordered_inputs["attention_mask"], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase)] , dim=1) return ordered_inputs @property def __snake_case ( self : str): return 13
40
"""simple docstring""" from bisect import bisect from itertools import accumulate def lowercase ( A_ , A_ , A_ , A_ )-> Union[str, Any]: '''simple docstring''' a : Any = sorted(zip(A_ , A_ ) , key=lambda A_ : x[0] / x[1] , reverse=A_ ) a , a : int = [i[0] for i in r], [i[1] for i in r] a : Union[str, Any] = list(accumulate(A_ ) ) a : Optional[Any] = bisect(A_ , A_ ) return ( 0 if k == 0 else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k]) if k != n else sum(vl[:k] ) ) if __name__ == "__main__": import doctest doctest.testmod()
40
1
"""simple docstring""" import collections import inspect import unittest from transformers import SwinvaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _A : """simple docstring""" def __init__( self : List[str] , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[str]=13 , __UpperCAmelCase : Optional[Any]=32 , __UpperCAmelCase : List[Any]=2 , __UpperCAmelCase : str=3 , __UpperCAmelCase : int=16 , __UpperCAmelCase : int=[1, 2, 1] , __UpperCAmelCase : Union[str, Any]=[2, 2, 4] , __UpperCAmelCase : int=2 , __UpperCAmelCase : int=2.0 , __UpperCAmelCase : int=True , __UpperCAmelCase : Optional[Any]=0.0 , __UpperCAmelCase : Optional[Any]=0.0 , __UpperCAmelCase : List[str]=0.1 , __UpperCAmelCase : Optional[int]="gelu" , __UpperCAmelCase : List[Any]=False , __UpperCAmelCase : str=True , __UpperCAmelCase : List[str]=0.02 , __UpperCAmelCase : Dict=1e-5 , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : Any=True , __UpperCAmelCase : str=10 , __UpperCAmelCase : Tuple=8 , ): a : List[Any] = parent a : str = batch_size a : int = image_size a : Union[str, Any] = patch_size a : Dict = num_channels a : List[Any] = embed_dim a : Optional[int] = depths a : List[Any] = num_heads a : Dict = window_size a : Tuple = mlp_ratio a : Union[str, Any] = qkv_bias a : List[str] = hidden_dropout_prob a : List[Any] = attention_probs_dropout_prob a : Union[str, Any] = drop_path_rate a : Optional[int] = hidden_act a : Optional[Any] = use_absolute_embeddings a : Optional[int] = patch_norm a : Optional[Any] = layer_norm_eps a : Any = initializer_range a : Optional[Any] = is_training a : List[str] = scope a : int = use_labels a : Tuple = type_sequence_label_size a : int = encoder_stride def __snake_case ( self : Optional[int]): a : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) a : List[Any] = None if self.use_labels: a : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size) a : List[str] = self.get_config() return config, pixel_values, labels def __snake_case ( self : List[str]): return SwinvaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def __snake_case ( self : Dict , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple): a : Tuple = SwinvaModel(config=__UpperCAmelCase) model.to(__UpperCAmelCase) model.eval() a : str = model(__UpperCAmelCase) a : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1)) a : str = int(config.embed_dim * 2 ** (len(config.depths) - 1)) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim)) def __snake_case ( self : Any , __UpperCAmelCase : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : int): a : List[Any] = SwinvaForMaskedImageModeling(config=__UpperCAmelCase) model.to(__UpperCAmelCase) model.eval() a : Optional[int] = model(__UpperCAmelCase) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size)) # test greyscale images a : str = 1 a : Optional[Any] = SwinvaForMaskedImageModeling(__UpperCAmelCase) model.to(__UpperCAmelCase) model.eval() a : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) a : Optional[int] = model(__UpperCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size)) def __snake_case ( self : str , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[int]): a : str = self.type_sequence_label_size a : Union[str, Any] = SwinvaForImageClassification(__UpperCAmelCase) model.to(__UpperCAmelCase) model.eval() a : List[str] = model(__UpperCAmelCase , labels=__UpperCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) def __snake_case ( self : Optional[int]): a : Optional[Any] = self.prepare_config_and_inputs() a , a , a : Dict = config_and_inputs a : Optional[Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class _A ( _a ,_a ,unittest.TestCase ): """simple docstring""" UpperCAmelCase : Any = ( (SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else () ) UpperCAmelCase : Dict = ( {"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification} if is_torch_available() else {} ) UpperCAmelCase : List[str] = False UpperCAmelCase : Dict = False UpperCAmelCase : Dict = False UpperCAmelCase : Optional[int] = False def __snake_case ( self : Tuple): a : Optional[int] = SwinvaModelTester(self) a : Dict = ConfigTester(self , config_class=__UpperCAmelCase , embed_dim=37) def __snake_case ( self : int): self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __snake_case ( self : Optional[Any]): a : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase) @unittest.skip(reason="Got `CUDA error: misaligned address` with PyTorch 2.0.0.") def __snake_case ( self : List[Any]): pass @unittest.skip(reason="Swinv2 does not use inputs_embeds") def __snake_case ( self : Optional[int]): pass def __snake_case ( self : Tuple): a , a : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a : Tuple = model_class(__UpperCAmelCase) self.assertIsInstance(model.get_input_embeddings() , (nn.Module)) a : List[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear)) def __snake_case ( self : str): a , a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a : str = model_class(__UpperCAmelCase) a : Any = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic a : List[str] = [*signature.parameters.keys()] a : Dict = ["pixel_values"] self.assertListEqual(arg_names[:1] , __UpperCAmelCase) def __snake_case ( self : int): a , a : Dict = self.model_tester.prepare_config_and_inputs_for_common() a : Optional[int] = True for model_class in self.all_model_classes: a : str = True a : Tuple = False a : int = True a : Tuple = model_class(__UpperCAmelCase) model.to(__UpperCAmelCase) model.eval() with torch.no_grad(): a : Dict = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase)) a : int = outputs.attentions a : List[Any] = len(self.model_tester.depths) self.assertEqual(len(__UpperCAmelCase) , __UpperCAmelCase) # check that output_attentions also work using config del inputs_dict["output_attentions"] a : Dict = True a : Union[str, Any] = config.window_size**2 a : List[str] = model_class(__UpperCAmelCase) model.to(__UpperCAmelCase) model.eval() with torch.no_grad(): a : Dict = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase)) a : Union[str, Any] = outputs.attentions self.assertEqual(len(__UpperCAmelCase) , __UpperCAmelCase) self.assertListEqual( list(attentions[0].shape[-3:]) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) a : str = len(__UpperCAmelCase) # Check attention is always last and order is fine a : Union[str, Any] = True a : List[str] = True a : Tuple = model_class(__UpperCAmelCase) model.to(__UpperCAmelCase) model.eval() with torch.no_grad(): a : Union[str, Any] = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase)) if hasattr(self.model_tester , "num_hidden_states_types"): a : Union[str, Any] = self.model_tester.num_hidden_states_types else: # also another +1 for reshaped_hidden_states a : Any = 2 self.assertEqual(out_len + added_hidden_states , len(__UpperCAmelCase)) a : Any = outputs.attentions self.assertEqual(len(__UpperCAmelCase) , __UpperCAmelCase) self.assertListEqual( list(self_attentions[0].shape[-3:]) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) def __snake_case ( self : Dict , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : Union[str, Any]): a : Union[str, Any] = model_class(__UpperCAmelCase) model.to(__UpperCAmelCase) model.eval() with torch.no_grad(): a : Any = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase)) a : Optional[int] = outputs.hidden_states a : Optional[int] = getattr( self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths) + 1) self.assertEqual(len(__UpperCAmelCase) , __UpperCAmelCase) # Swinv2 has a different seq_length a : Tuple = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable) else (config.patch_size, config.patch_size) ) a : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , ) a : Optional[int] = outputs.reshaped_hidden_states self.assertEqual(len(__UpperCAmelCase) , __UpperCAmelCase) a , a , a , a : str = reshaped_hidden_states[0].shape a : str = ( reshaped_hidden_states[0].view(__UpperCAmelCase , __UpperCAmelCase , height * width).permute(0 , 2 , 1) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:]) , [num_patches, self.model_tester.embed_dim] , ) def __snake_case ( self : int): a , a : int = self.model_tester.prepare_config_and_inputs_for_common() a : Optional[int] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: a : str = True self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] a : Optional[Any] = True self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) def __snake_case ( self : Any): a , a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() a : Dict = 3 a : Any = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) a : Union[str, Any] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable) else (config.patch_size, config.patch_size) ) a : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) a : str = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: a : Tuple = True self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , (padded_height, padded_width)) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] a : Union[str, Any] = True self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , (padded_height, padded_width)) def __snake_case ( self : int): a : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__UpperCAmelCase) def __snake_case ( self : List[Any]): a : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase) @slow def __snake_case ( self : List[str]): for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a : Optional[Any] = SwinvaModel.from_pretrained(__UpperCAmelCase) self.assertIsNotNone(__UpperCAmelCase) def __snake_case ( self : str): a , a : Any = self.model_tester.prepare_config_and_inputs_for_common() a : Optional[Any] = _config_zero_init(__UpperCAmelCase) for model_class in self.all_model_classes: a : int = model_class(config=__UpperCAmelCase) for name, param in model.named_parameters(): if "embeddings" not in name and "logit_scale" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @require_vision @require_torch class _A ( unittest.TestCase ): """simple docstring""" @cached_property def __snake_case ( self : int): return ( AutoImageProcessor.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256") if is_vision_available() else None ) @slow def __snake_case ( self : Dict): a : str = SwinvaForImageClassification.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256").to( __UpperCAmelCase) a : Union[str, Any] = self.default_image_processor a : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") a : str = image_processor(images=__UpperCAmelCase , return_tensors="pt").to(__UpperCAmelCase) # forward pass with torch.no_grad(): a : Any = model(**__UpperCAmelCase) # verify the logits a : str = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape , __UpperCAmelCase) a : Dict = torch.tensor([-0.3_947, -0.4_306, 0.0_026]).to(__UpperCAmelCase) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1e-4))
40
"""simple docstring""" from __future__ import annotations from numpy import array, cos, cross, floataa, radians, sin from numpy.typing import NDArray def lowercase ( A_ , A_ , A_ = False )-> list[float]: '''simple docstring''' if radian_mode: return [magnitude * cos(A_ ), magnitude * sin(A_ )] return [magnitude * cos(radians(A_ ) ), magnitude * sin(radians(A_ ) )] def lowercase ( A_ , A_ , A_ = 10**-1 )-> bool: '''simple docstring''' a : NDArray[floataa] = cross(A_ , A_ ) a : float = sum(A_ ) return abs(A_ ) < eps if __name__ == "__main__": # Test to check if it works __lowercase = array( [ polar_force(7_18.4, 180 - 30), polar_force(8_79.54, 45), polar_force(100, -90), ] ) __lowercase = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem 1 in image_data/2D_problems.jpg __lowercase = array( [ polar_force(30 * 9.81, 15), polar_force(215, 180 - 45), polar_force(264, 90 - 30), ] ) __lowercase = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem in image_data/2D_problems_1.jpg __lowercase = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]]) __lowercase = array([[0, 0], [6, 0], [10, 0], [12, 0]]) assert in_static_equilibrium(forces, location) import doctest doctest.testmod()
40
1
"""simple docstring""" from itertools import permutations def lowercase ( A_ )-> bool: '''simple docstring''' if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False a : Optional[int] = [7, 11, 13, 17] for i, test in enumerate(A_ ): if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def lowercase ( A_ = 10 )-> int: '''simple docstring''' return sum( int("".join(map(A_ , A_ ) ) ) for num in permutations(range(A_ ) ) if is_substring_divisible(A_ ) ) if __name__ == "__main__": print(f'''{solution() = }''')
40
"""simple docstring""" def lowercase ( A_ , A_ )-> float: '''simple docstring''' if mass < 0: raise ValueError("The mass of a body cannot be negative" ) return 0.5 * mass * abs(A_ ) * abs(A_ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
40
1
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roberta import RobertaTokenizer __lowercase = logging.get_logger(__name__) __lowercase = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} __lowercase = { """vocab_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""", """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json""" ), }, """merges_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""", """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt""" ), }, """tokenizer_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""", """roberta-base-openai-detector""": ( """https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json""" ), """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json""" ), }, } __lowercase = { """roberta-base""": 512, """roberta-large""": 512, """roberta-large-mnli""": 512, """distilroberta-base""": 512, """roberta-base-openai-detector""": 512, """roberta-large-openai-detector""": 512, } class _A ( _a ): """simple docstring""" UpperCAmelCase : str = VOCAB_FILES_NAMES UpperCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase : Any = ["""input_ids""", """attention_mask"""] UpperCAmelCase : int = RobertaTokenizer def __init__( self : Tuple , __UpperCAmelCase : Any=None , __UpperCAmelCase : str=None , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : List[Any]="replace" , __UpperCAmelCase : List[str]="<s>" , __UpperCAmelCase : int="</s>" , __UpperCAmelCase : List[str]="</s>" , __UpperCAmelCase : str="<s>" , __UpperCAmelCase : Dict="<unk>" , __UpperCAmelCase : Dict="<pad>" , __UpperCAmelCase : Dict="<mask>" , __UpperCAmelCase : int=False , __UpperCAmelCase : Dict=True , **__UpperCAmelCase : Dict , ): super().__init__( __UpperCAmelCase , __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , errors=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase , **__UpperCAmelCase , ) a : Union[str, Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) if pre_tok_state.get("add_prefix_space" , __UpperCAmelCase) != add_prefix_space: a : Optional[Any] = getattr(__UpperCAmelCase , pre_tok_state.pop("type")) a : Optional[int] = add_prefix_space a : Dict = pre_tok_class(**__UpperCAmelCase) a : Tuple = add_prefix_space a : str = "post_processor" a : Dict = getattr(self.backend_tokenizer , __UpperCAmelCase , __UpperCAmelCase) if tokenizer_component_instance: a : Union[str, Any] = json.loads(tokenizer_component_instance.__getstate__()) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: a : int = tuple(state["sep"]) if "cls" in state: a : Any = tuple(state["cls"]) a : Dict = False if state.get("add_prefix_space" , __UpperCAmelCase) != add_prefix_space: a : Optional[int] = add_prefix_space a : int = True if state.get("trim_offsets" , __UpperCAmelCase) != trim_offsets: a : List[str] = trim_offsets a : List[str] = True if changes_to_apply: a : str = getattr(__UpperCAmelCase , state.pop("type")) a : Optional[int] = component_class(**__UpperCAmelCase) setattr(self.backend_tokenizer , __UpperCAmelCase , __UpperCAmelCase) @property def __snake_case ( self : List[Any]): if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet.") return None return str(self._mask_token) @mask_token.setter def __snake_case ( self : int , __UpperCAmelCase : Any): a : Any = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase) if isinstance(__UpperCAmelCase , __UpperCAmelCase) else value a : str = value def __snake_case ( self : Dict , *__UpperCAmelCase : int , **__UpperCAmelCase : Tuple): a : Tuple = kwargs.get("is_split_into_words" , __UpperCAmelCase) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*__UpperCAmelCase , **__UpperCAmelCase) def __snake_case ( self : Optional[int] , *__UpperCAmelCase : Dict , **__UpperCAmelCase : List[Any]): a : Dict = kwargs.get("is_split_into_words" , __UpperCAmelCase) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*__UpperCAmelCase , **__UpperCAmelCase) def __snake_case ( self : Any , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None): a : str = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase) return tuple(__UpperCAmelCase) def __snake_case ( self : Tuple , __UpperCAmelCase : Any , __UpperCAmelCase : Dict=None): a : List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def __snake_case ( self : List[str] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None): a : Tuple = [self.sep_token_id] a : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
40
"""simple docstring""" import os import sys import unittest __lowercase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path __lowercase = os.path.join(git_repo_path, """src""", """diffusers""") class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : Any): a : List[Any] = find_backend(" if not is_torch_available():") self.assertEqual(__UpperCAmelCase , "torch") # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") a : Dict = find_backend(" if not (is_torch_available() and is_transformers_available()):") self.assertEqual(__UpperCAmelCase , "torch_and_transformers") # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") a : int = find_backend( " if not (is_torch_available() and is_transformers_available() and is_onnx_available()):") self.assertEqual(__UpperCAmelCase , "torch_and_transformers_and_onnx") def __snake_case ( self : Union[str, Any]): a : Dict = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("torch" , __UpperCAmelCase) self.assertIn("torch_and_transformers" , __UpperCAmelCase) self.assertIn("flax_and_transformers" , __UpperCAmelCase) self.assertIn("torch_and_transformers_and_onnx" , __UpperCAmelCase) # Likewise, we can't assert on the exact content of a key self.assertIn("UNet2DModel" , objects["torch"]) self.assertIn("FlaxUNet2DConditionModel" , objects["flax"]) self.assertIn("StableDiffusionPipeline" , objects["torch_and_transformers"]) self.assertIn("FlaxStableDiffusionPipeline" , objects["flax_and_transformers"]) self.assertIn("LMSDiscreteScheduler" , objects["torch_and_scipy"]) self.assertIn("OnnxStableDiffusionPipeline" , objects["torch_and_transformers_and_onnx"]) def __snake_case ( self : Tuple): a : Optional[int] = create_dummy_object("CONSTANT" , "'torch'") self.assertEqual(__UpperCAmelCase , "\nCONSTANT = None\n") a : Dict = create_dummy_object("function" , "'torch'") self.assertEqual( __UpperCAmelCase , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n") a : Optional[Any] = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n" a : int = create_dummy_object("FakeClass" , "'torch'") self.assertEqual(__UpperCAmelCase , __UpperCAmelCase) def __snake_case ( self : List[str]): a : List[str] = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n" a : Tuple = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]}) self.assertEqual(dummy_files["torch"] , __UpperCAmelCase)
40
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __lowercase = { """configuration_encodec""": [ """ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP""", """EncodecConfig""", ], """feature_extraction_encodec""": ["""EncodecFeatureExtractor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST""", """EncodecModel""", """EncodecPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys __lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
40
"""simple docstring""" __lowercase = { """Pillow""": """Pillow<10.0.0""", """accelerate""": """accelerate>=0.20.3""", """av""": """av==9.2.0""", """beautifulsoup4""": """beautifulsoup4""", """black""": """black~=23.1""", """codecarbon""": """codecarbon==1.2.0""", """cookiecutter""": """cookiecutter==1.7.3""", """dataclasses""": """dataclasses""", """datasets""": """datasets!=2.5.0""", """decord""": """decord==0.6.0""", """deepspeed""": """deepspeed>=0.9.3""", """diffusers""": """diffusers""", """dill""": """dill<0.3.5""", """evaluate""": """evaluate>=0.2.0""", """fairscale""": """fairscale>0.3""", """faiss-cpu""": """faiss-cpu""", """fastapi""": """fastapi""", """filelock""": """filelock""", """flax""": """flax>=0.4.1,<=0.7.0""", """ftfy""": """ftfy""", """fugashi""": """fugashi>=1.0""", """GitPython""": """GitPython<3.1.19""", """hf-doc-builder""": """hf-doc-builder>=0.3.0""", """huggingface-hub""": """huggingface-hub>=0.14.1,<1.0""", """importlib_metadata""": """importlib_metadata""", """ipadic""": """ipadic>=1.0.0,<2.0""", """isort""": """isort>=5.5.4""", """jax""": """jax>=0.2.8,!=0.3.2,<=0.4.13""", """jaxlib""": """jaxlib>=0.1.65,<=0.4.13""", """jieba""": """jieba""", """kenlm""": """kenlm""", """keras-nlp""": """keras-nlp>=0.3.1""", """librosa""": """librosa""", """nltk""": """nltk""", """natten""": """natten>=0.14.6""", """numpy""": """numpy>=1.17""", """onnxconverter-common""": """onnxconverter-common""", """onnxruntime-tools""": """onnxruntime-tools>=1.4.2""", """onnxruntime""": """onnxruntime>=1.4.0""", """opencv-python""": """opencv-python""", """optuna""": """optuna""", """optax""": """optax>=0.0.8,<=0.1.4""", """packaging""": """packaging>=20.0""", """parameterized""": """parameterized""", """phonemizer""": """phonemizer""", """protobuf""": """protobuf""", """psutil""": """psutil""", """pyyaml""": """pyyaml>=5.1""", """pydantic""": """pydantic<2""", """pytest""": """pytest>=7.2.0""", """pytest-timeout""": """pytest-timeout""", """pytest-xdist""": """pytest-xdist""", """python""": """python>=3.8.0""", """ray[tune]""": """ray[tune]""", """regex""": """regex!=2019.12.17""", """requests""": """requests""", """rhoknp""": """rhoknp>=1.1.0,<1.3.1""", """rjieba""": """rjieba""", """rouge-score""": """rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1""", """ruff""": """ruff>=0.0.241,<=0.0.259""", """sacrebleu""": """sacrebleu>=1.4.12,<2.0.0""", """sacremoses""": """sacremoses""", """safetensors""": """safetensors>=0.3.1""", """sagemaker""": """sagemaker>=2.31.0""", """scikit-learn""": """scikit-learn""", """sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""", """sigopt""": """sigopt""", """starlette""": """starlette""", """sudachipy""": """sudachipy>=0.6.6""", """sudachidict_core""": """sudachidict_core>=20220729""", """tensorflow-cpu""": """tensorflow-cpu>=2.6,<2.14""", """tensorflow""": """tensorflow>=2.6,<2.14""", """tensorflow-text""": """tensorflow-text<2.14""", """tf2onnx""": """tf2onnx""", """timeout-decorator""": """timeout-decorator""", """timm""": """timm""", """tokenizers""": """tokenizers>=0.11.1,!=0.11.3,<0.14""", """torch""": """torch>=1.9,!=1.12.0""", """torchaudio""": """torchaudio""", """torchvision""": """torchvision""", """pyctcdecode""": """pyctcdecode>=0.4.0""", """tqdm""": """tqdm>=4.27""", """unidic""": """unidic>=1.0.2""", """unidic_lite""": """unidic_lite>=1.0.7""", """urllib3""": """urllib3<2.0.0""", """uvicorn""": """uvicorn""", }
40
1
"""simple docstring""" from pathlib import Path import fire from tqdm import tqdm def lowercase ( A_="ro" , A_="en" , A_="wmt16" , A_=None )-> None: '''simple docstring''' try: import datasets except (ModuleNotFoundError, ImportError): raise ImportError("run pip install datasets" ) a : List[Any] = F'''{src_lang}-{tgt_lang}''' print(F'''Converting {dataset}-{pair}''' ) a : Tuple = datasets.load_dataset(A_ , A_ ) if save_dir is None: a : Dict = F'''{dataset}-{pair}''' a : str = Path(A_ ) save_dir.mkdir(exist_ok=A_ ) for split in ds.keys(): print(F'''Splitting {split} with {ds[split].num_rows} records''' ) # to save to val.source, val.target like summary datasets a : Any = "val" if split == "validation" else split a : Tuple = save_dir.joinpath(F'''{fn}.source''' ) a : Any = save_dir.joinpath(F'''{fn}.target''' ) a : Tuple = src_path.open("w+" ) a : List[Any] = tgt_path.open("w+" ) # reader is the bottleneck so writing one record at a time doesn't slow things down for x in tqdm(ds[split] ): a : Any = x["translation"] src_fp.write(ex[src_lang] + "\n" ) tgt_fp.write(ex[tgt_lang] + "\n" ) print(F'''Saved {dataset} dataset to {save_dir}''' ) if __name__ == "__main__": fire.Fire(download_wmt_dataset)
40
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tensorflow_text_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowercase = { """configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""], """tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = ["""BertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """BERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """BertForMaskedLM""", """BertForMultipleChoice""", """BertForNextSentencePrediction""", """BertForPreTraining""", """BertForQuestionAnswering""", """BertForSequenceClassification""", """BertForTokenClassification""", """BertLayer""", """BertLMHeadModel""", """BertModel""", """BertPreTrainedModel""", """load_tf_weights_in_bert""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFBertEmbeddings""", """TFBertForMaskedLM""", """TFBertForMultipleChoice""", """TFBertForNextSentencePrediction""", """TFBertForPreTraining""", """TFBertForQuestionAnswering""", """TFBertForSequenceClassification""", """TFBertForTokenClassification""", """TFBertLMHeadModel""", """TFBertMainLayer""", """TFBertModel""", """TFBertPreTrainedModel""", ] try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = ["""TFBertTokenizer"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """FlaxBertForCausalLM""", """FlaxBertForMaskedLM""", """FlaxBertForMultipleChoice""", """FlaxBertForNextSentencePrediction""", """FlaxBertForPreTraining""", """FlaxBertForQuestionAnswering""", """FlaxBertForSequenceClassification""", """FlaxBertForTokenClassification""", """FlaxBertModel""", """FlaxBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_fast import BertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bert import ( BERT_PRETRAINED_MODEL_ARCHIVE_LIST, BertForMaskedLM, BertForMultipleChoice, BertForNextSentencePrediction, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, BertLayer, BertLMHeadModel, BertModel, BertPreTrainedModel, load_tf_weights_in_bert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_bert import ( TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFBertEmbeddings, TFBertForMaskedLM, TFBertForMultipleChoice, TFBertForNextSentencePrediction, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertForTokenClassification, TFBertLMHeadModel, TFBertMainLayer, TFBertModel, TFBertPreTrainedModel, ) try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_tf import TFBertTokenizer try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_bert import ( FlaxBertForCausalLM, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, FlaxBertPreTrainedModel, ) else: import sys __lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
40
1
"""simple docstring""" import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __lowercase = get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece @require_tokenizers class _A ( _a ,unittest.TestCase ): """simple docstring""" UpperCAmelCase : str = XGLMTokenizer UpperCAmelCase : Dict = XGLMTokenizerFast UpperCAmelCase : Optional[Any] = True UpperCAmelCase : Any = True def __snake_case ( self : int): super().setUp() # We have a SentencePiece fixture for testing a : Optional[int] = XGLMTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase) tokenizer.save_pretrained(self.tmpdirname) def __snake_case ( self : str): a : str = "<pad>" a : str = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase) , __UpperCAmelCase) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase) , __UpperCAmelCase) def __snake_case ( self : List[str]): a : int = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , "<s>") self.assertEqual(vocab_keys[1] , "<pad>") self.assertEqual(len(__UpperCAmelCase) , 1008) def __snake_case ( self : int): self.assertEqual(self.get_tokenizer().vocab_size , 1008) def __snake_case ( self : Union[str, Any]): a : Optional[int] = XGLMTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase) a : Optional[int] = tokenizer.tokenize("This is a test") self.assertListEqual(__UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"]) self.assertListEqual( tokenizer.convert_tokens_to_ids(__UpperCAmelCase) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) a : str = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( __UpperCAmelCase , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) a : int = tokenizer.convert_tokens_to_ids(__UpperCAmelCase) self.assertListEqual( __UpperCAmelCase , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) a : int = tokenizer.convert_ids_to_tokens(__UpperCAmelCase) self.assertListEqual( __UpperCAmelCase , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) @cached_property def __snake_case ( self : str): return XGLMTokenizer.from_pretrained("facebook/xglm-564M") def __snake_case ( self : Optional[Any]): with tempfile.NamedTemporaryFile() as f: shutil.copyfile(__UpperCAmelCase , f.name) a : List[str] = XGLMTokenizer(f.name , keep_accents=__UpperCAmelCase) a : str = pickle.dumps(__UpperCAmelCase) pickle.loads(__UpperCAmelCase) def __snake_case ( self : List[str]): if not self.test_rust_tokenizer: return a : Dict = self.get_tokenizer() a : Any = self.get_rust_tokenizer() a : Optional[Any] = "I was born in 92000, and this is falsé." a : Any = tokenizer.tokenize(__UpperCAmelCase) a : Optional[Any] = rust_tokenizer.tokenize(__UpperCAmelCase) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase) a : Optional[int] = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase) a : Optional[Any] = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase) a : str = self.get_rust_tokenizer() a : int = tokenizer.encode(__UpperCAmelCase) a : str = rust_tokenizer.encode(__UpperCAmelCase) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase) @slow def __snake_case ( self : Optional[int]): a : Tuple = "Hello World!" a : Union[str, Any] = [2, 31227, 4447, 35] self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase)) @slow def __snake_case ( self : Any): a : Optional[Any] = ( "This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will" " add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth" ) # fmt: off a : str = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 71630, 28085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 13675, 377, 652, 7580, 10341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 202277, 17892, 33, 60, 87, 4, 3234, 157, 61, 2667, 52376, 19, 88, 23, 735] # fmt: on self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase)) @slow def __snake_case ( self : Optional[Any]): # fmt: off a : List[Any] = { "input_ids": [[2, 108825, 1163, 15, 88010, 473, 15898, 157, 13672, 1857, 312, 8, 238021, 1163, 53, 13672, 1857, 312, 8, 53283, 182396, 8, 18566, 16, 36733, 4101, 8, 230, 244017, 122553, 7, 15, 132597, 4, 293, 12511, 7610, 4, 3414, 132597, 9, 4, 32361, 362, 4, 734, 28512, 32569, 18, 4, 32361, 26096, 14982, 73, 18715, 21433, 235261, 15, 492, 12427, 16, 53, 18715, 21433, 65454, 15, 23659, 563, 16, 278, 597, 2843, 595, 7931, 182396, 64186, 22, 886, 595, 132981, 53, 25540, 3449, 43982, 39901, 5951, 878, 330, 4, 27694, 80269, 312, 53, 6517, 11780, 611, 20408, 5], [2, 6, 132597, 67, 42897, 33, 592, 8, 163729, 25540, 361, 136997, 109514, 173230, 7, 501, 60, 102913, 196, 5631, 235, 63243, 473, 6, 231757, 74, 5277, 7905, 53, 3095, 37317, 22, 454, 183874, 5], [2, 268, 31298, 46530, 6, 132935, 43831, 7, 597, 32, 24, 3688, 9865, 5]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] } # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__UpperCAmelCase , model_name="facebook/xglm-564M" , padding=__UpperCAmelCase , )
40
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class _A ( _a ): """simple docstring""" UpperCAmelCase : str = """naver-clova-ix/donut-base-finetuned-docvqa""" UpperCAmelCase : Tuple = ( """This is a tool that answers a question about an document (pdf). It takes an input named `document` which """ """should be the document containing the information, as well as a `question` that is the question about the """ """document. It returns a text that contains the answer to the question.""" ) UpperCAmelCase : List[str] = """document_qa""" UpperCAmelCase : str = AutoProcessor UpperCAmelCase : Optional[int] = VisionEncoderDecoderModel UpperCAmelCase : int = ["""image""", """text"""] UpperCAmelCase : int = ["""text"""] def __init__( self : Tuple , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Any): if not is_vision_available(): raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool.") super().__init__(*__UpperCAmelCase , **__UpperCAmelCase) def __snake_case ( self : Tuple , __UpperCAmelCase : "Image" , __UpperCAmelCase : str): a : Any = "<s_docvqa><s_question>{user_input}</s_question><s_answer>" a : Union[str, Any] = task_prompt.replace("{user_input}" , __UpperCAmelCase) a : Optional[Any] = self.pre_processor.tokenizer( __UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors="pt").input_ids a : Any = self.pre_processor(__UpperCAmelCase , return_tensors="pt").pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def __snake_case ( self : int , __UpperCAmelCase : int): return self.model.generate( inputs["pixel_values"].to(self.device) , decoder_input_ids=inputs["decoder_input_ids"].to(self.device) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__UpperCAmelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__UpperCAmelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__UpperCAmelCase , ).sequences def __snake_case ( self : str , __UpperCAmelCase : List[Any]): a : Union[str, Any] = self.pre_processor.batch_decode(__UpperCAmelCase)[0] a : Optional[Any] = sequence.replace(self.pre_processor.tokenizer.eos_token , "") a : Any = sequence.replace(self.pre_processor.tokenizer.pad_token , "") a : Optional[Any] = re.sub(r"<.*?>" , "" , __UpperCAmelCase , count=1).strip() # remove first task start token a : List[str] = self.pre_processor.tokenajson(__UpperCAmelCase) return sequence["answer"]
40
1
"""simple docstring""" import os import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers.models.realm.configuration_realm import RealmConfig from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer class _A ( _a ): """simple docstring""" def __snake_case ( self : Union[str, Any]): a : Any = tempfile.mkdtemp() a : str = 5 # Realm tok a : Dict = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "test", "question", "this", "is", "the", "first", "second", "third", "fourth", "fifth", "record", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] a : Tuple = os.path.join(self.tmpdirname , "realm_tokenizer") os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase) a : Optional[Any] = os.path.join(__UpperCAmelCase , VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) a : Dict = os.path.join(self.tmpdirname , "realm_block_records") os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase) def __snake_case ( self : int): return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , "realm_tokenizer")) def __snake_case ( self : Dict): shutil.rmtree(self.tmpdirname) def __snake_case ( self : List[Any]): a : Tuple = RealmConfig(num_block_records=self.num_block_records) return config def __snake_case ( self : Dict): a : Dict = Dataset.from_dict( { "id": ["0", "1"], "question": ["foo", "bar"], "answers": [["Foo", "Bar"], ["Bar"]], }) return dataset def __snake_case ( self : Any): a : List[Any] = np.array( [ B"This is the first record", B"This is the second record", B"This is the third record", B"This is the fourth record", B"This is the fifth record", B"This is a longer longer longer record", ] , dtype=__UpperCAmelCase , ) return block_records def __snake_case ( self : List[str]): a : Union[str, Any] = RealmRetriever( block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , ) return retriever def __snake_case ( self : Optional[int]): a : Union[str, Any] = self.get_config() a : List[str] = self.get_dummy_retriever() a : Optional[Any] = retriever.tokenizer a : int = np.array([0, 3] , dtype="long") a : List[str] = tokenizer(["Test question"]).input_ids a : Union[str, Any] = tokenizer( ["the fourth"] , add_special_tokens=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , ).input_ids a : List[str] = config.reader_seq_len a , a , a , a : Optional[Any] = retriever( __UpperCAmelCase , __UpperCAmelCase , answer_ids=__UpperCAmelCase , max_length=__UpperCAmelCase , return_tensors="np") self.assertEqual(len(__UpperCAmelCase) , 2) self.assertEqual(len(__UpperCAmelCase) , 2) self.assertEqual(len(__UpperCAmelCase) , 2) self.assertEqual(concat_inputs.input_ids.shape , (2, 10)) self.assertEqual(concat_inputs.attention_mask.shape , (2, 10)) self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10)) self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10)) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0]) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"] , ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1]) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"] , ) def __snake_case ( self : str): a : Union[str, Any] = self.get_config() a : List[str] = self.get_dummy_retriever() a : str = retriever.tokenizer a : Any = np.array([0, 3, 5] , dtype="long") a : Dict = tokenizer(["Test question"]).input_ids a : List[Any] = tokenizer( ["the fourth", "longer longer"] , add_special_tokens=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , ).input_ids a : List[Any] = config.reader_seq_len a , a , a , a : Tuple = retriever( __UpperCAmelCase , __UpperCAmelCase , answer_ids=__UpperCAmelCase , max_length=__UpperCAmelCase , return_tensors="np") self.assertEqual([False, True, True] , __UpperCAmelCase) self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __UpperCAmelCase) self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __UpperCAmelCase) def __snake_case ( self : Optional[Any]): a : str = self.get_dummy_retriever() retriever.save_pretrained(os.path.join(self.tmpdirname , "realm_block_records")) # Test local path a : Any = retriever.from_pretrained(os.path.join(self.tmpdirname , "realm_block_records")) self.assertEqual(retriever.block_records[0] , B"This is the first record") # Test mocked remote path with patch("transformers.models.realm.retrieval_realm.hf_hub_download") as mock_hf_hub_download: a : Dict = os.path.join( os.path.join(self.tmpdirname , "realm_block_records") , _REALM_BLOCK_RECORDS_FILENAME) a : Any = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa") self.assertEqual(retriever.block_records[0] , B"This is the first record")
40
"""simple docstring""" from __future__ import annotations class _A : """simple docstring""" def __init__( self : List[str] , __UpperCAmelCase : int = 0): a : Tuple = key def __snake_case ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : int): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) a : Dict = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(__UpperCAmelCase) ^ key) for ch in content] def __snake_case ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : int): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) a : Optional[Any] = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(__UpperCAmelCase) ^ key) for ch in content] def __snake_case ( self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : int = 0): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) a : List[Any] = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned a : Any = "" for ch in content: ans += chr(ord(__UpperCAmelCase) ^ key) return ans def __snake_case ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : int = 0): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) a : Dict = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned a : str = "" for ch in content: ans += chr(ord(__UpperCAmelCase) ^ key) return ans def __snake_case ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : int = 0): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) try: with open(__UpperCAmelCase) as fin, open("encrypt.out" , "w+") as fout: # actual encrypt-process for line in fin: fout.write(self.encrypt_string(__UpperCAmelCase , __UpperCAmelCase)) except OSError: return False return True def __snake_case ( self : Any , __UpperCAmelCase : str , __UpperCAmelCase : int): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) try: with open(__UpperCAmelCase) as fin, open("decrypt.out" , "w+") as fout: # actual encrypt-process for line in fin: fout.write(self.decrypt_string(__UpperCAmelCase , __UpperCAmelCase)) except OSError: return False return True # Tests # crypt = XORCipher() # key = 67 # # test encrypt # print(crypt.encrypt("hallo welt",key)) # # test decrypt # print(crypt.decrypt(crypt.encrypt("hallo welt",key), key)) # # test encrypt_string # print(crypt.encrypt_string("hallo welt",key)) # # test decrypt_string # print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key)) # if (crypt.encrypt_file("test.txt",key)): # print("encrypt successful") # else: # print("encrypt unsuccessful") # if (crypt.decrypt_file("encrypt.out",key)): # print("decrypt successful") # else: # print("decrypt unsuccessful")
40
1
"""simple docstring""" import re import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class _A ( _a ): """simple docstring""" UpperCAmelCase : List[str] = ["""image_processor""", """tokenizer"""] UpperCAmelCase : str = """AutoImageProcessor""" UpperCAmelCase : Optional[int] = """AutoTokenizer""" def __init__( self : str , __UpperCAmelCase : Any=None , __UpperCAmelCase : str=None , **__UpperCAmelCase : Dict): a : List[Any] = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , __UpperCAmelCase , ) a : Tuple = kwargs.pop("feature_extractor") a : Optional[int] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`.") if tokenizer is None: raise ValueError("You need to specify a `tokenizer`.") super().__init__(__UpperCAmelCase , __UpperCAmelCase) a : Any = self.image_processor a : Any = False def __call__( self : Optional[int] , *__UpperCAmelCase : List[Any] , **__UpperCAmelCase : int): # For backward compatibility if self._in_target_context_manager: return self.current_processor(*__UpperCAmelCase , **__UpperCAmelCase) a : List[Any] = kwargs.pop("images" , __UpperCAmelCase) a : Dict = kwargs.pop("text" , __UpperCAmelCase) if len(__UpperCAmelCase) > 0: a : Tuple = args[0] a : List[str] = args[1:] if images is None and text is None: raise ValueError("You need to specify either an `images` or `text` input to process.") if images is not None: a : List[Any] = self.image_processor(__UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase) if text is not None: a : List[str] = self.tokenizer(__UpperCAmelCase , **__UpperCAmelCase) if text is None: return inputs elif images is None: return encodings else: a : Tuple = encodings["input_ids"] return inputs def __snake_case ( self : Optional[int] , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : Optional[int]): return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase) def __snake_case ( self : str , *__UpperCAmelCase : Optional[int] , **__UpperCAmelCase : Dict): return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase) @contextmanager def __snake_case ( self : List[str]): warnings.warn( "`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your " "labels by using the argument `text` of the regular `__call__` method (either in the same call as " "your images inputs, or in a separate call.") a : Dict = True a : Any = self.tokenizer yield a : Optional[Any] = self.image_processor a : List[Any] = False def __snake_case ( self : int , __UpperCAmelCase : List[str] , __UpperCAmelCase : Union[str, Any]=False , __UpperCAmelCase : str=None): if added_vocab is None: a : Optional[int] = self.tokenizer.get_added_vocab() a : List[Any] = {} while tokens: a : str = re.search(r"<s_(.*?)>" , __UpperCAmelCase , re.IGNORECASE) if start_token is None: break a : Optional[int] = start_token.group(1) a : Optional[Any] = re.search(rf'''</s_{key}>''' , __UpperCAmelCase , re.IGNORECASE) a : Any = start_token.group() if end_token is None: a : Optional[Any] = tokens.replace(__UpperCAmelCase , "") else: a : int = end_token.group() a : Tuple = re.escape(__UpperCAmelCase) a : str = re.escape(__UpperCAmelCase) a : Dict = re.search(f'''{start_token_escaped}(.*?){end_token_escaped}''' , __UpperCAmelCase , re.IGNORECASE) if content is not None: a : Tuple = content.group(1).strip() if r"<s_" in content and r"</s_" in content: # non-leaf node a : Any = self.tokenajson(__UpperCAmelCase , is_inner_value=__UpperCAmelCase , added_vocab=__UpperCAmelCase) if value: if len(__UpperCAmelCase) == 1: a : str = value[0] a : Union[str, Any] = value else: # leaf nodes a : int = [] for leaf in content.split(r"<sep/>"): a : List[str] = leaf.strip() if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>": a : Union[str, Any] = leaf[1:-2] # for categorical special tokens output[key].append(__UpperCAmelCase) if len(output[key]) == 1: a : Tuple = output[key][0] a : List[Any] = tokens[tokens.find(__UpperCAmelCase) + len(__UpperCAmelCase) :].strip() if tokens[:6] == r"<sep/>": # non-leaf nodes return [output] + self.tokenajson(tokens[6:] , is_inner_value=__UpperCAmelCase , added_vocab=__UpperCAmelCase) if len(__UpperCAmelCase): return [output] if is_inner_value else output else: return [] if is_inner_value else {"text_sequence": tokens} @property def __snake_case ( self : str): warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __UpperCAmelCase , ) return self.image_processor_class @property def __snake_case ( self : int): warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __UpperCAmelCase , ) return self.image_processor
40
"""simple docstring""" import pytest from datasets import inspect_metric, list_metrics, load_metric @pytest.fixture def lowercase ( A_ )-> List[Any]: '''simple docstring''' monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() ) @pytest.fixture def lowercase ( A_ )-> Tuple: '''simple docstring''' class _A : """simple docstring""" def __init__( self : str , __UpperCAmelCase : int): a : List[Any] = metric_id class _A : """simple docstring""" UpperCAmelCase : Union[str, Any] = [MetricMock(_a ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]] def __snake_case ( self : List[str]): return self._metrics monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() ) @pytest.mark.parametrize( "func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] ) def lowercase ( A_ , A_ , A_ , A_ , A_ )-> Any: '''simple docstring''' if "tmp_path" in args: a : Union[str, Any] = tuple(arg if arg != "tmp_path" else tmp_path for arg in args ) with pytest.warns(A_ , match="https://huggingface.co/docs/evaluate" ): func(*A_ )
40
1
"""simple docstring""" import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast __lowercase = datasets.utils.logging.get_logger(__name__) @dataclass class _A ( datasets.BuilderConfig ): """simple docstring""" UpperCAmelCase : int = 1_0_0_0_0 UpperCAmelCase : Optional[List[str]] = None UpperCAmelCase : Optional[datasets.Features] = None class _A ( datasets.ArrowBasedBuilder ): """simple docstring""" UpperCAmelCase : str = ParquetConfig def __snake_case ( self : Tuple): return datasets.DatasetInfo(features=self.config.features) def __snake_case ( self : List[Any] , __UpperCAmelCase : str): if not self.config.data_files: raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''') a : str = dl_manager.download_and_extract(self.config.data_files) if isinstance(__UpperCAmelCase , (str, list, tuple)): a : Dict = data_files if isinstance(__UpperCAmelCase , __UpperCAmelCase): a : str = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive a : List[Any] = [dl_manager.iter_files(__UpperCAmelCase) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files})] a : Dict = [] for split_name, files in data_files.items(): if isinstance(__UpperCAmelCase , __UpperCAmelCase): a : Optional[int] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive a : Tuple = [dl_manager.iter_files(__UpperCAmelCase) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(__UpperCAmelCase): with open(__UpperCAmelCase , "rb") as f: a : Tuple = datasets.Features.from_arrow_schema(pq.read_schema(__UpperCAmelCase)) break splits.append(datasets.SplitGenerator(name=__UpperCAmelCase , gen_kwargs={"files": files})) return splits def __snake_case ( self : List[str] , __UpperCAmelCase : pa.Table): if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example a : Optional[int] = table_cast(__UpperCAmelCase , self.info.features.arrow_schema) return pa_table def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int): a : Tuple = self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema) != sorted(self.config.columns): raise ValueError( f'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''') for file_idx, file in enumerate(itertools.chain.from_iterable(__UpperCAmelCase)): with open(__UpperCAmelCase , "rb") as f: a : Tuple = pq.ParquetFile(__UpperCAmelCase) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns)): a : Optional[Any] = pa.Table.from_batches([record_batch]) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield f'''{file_idx}_{batch_idx}''', self._cast_table(__UpperCAmelCase) except ValueError as e: logger.error(f'''Failed to read file \'{file}\' with error {type(__UpperCAmelCase)}: {e}''') raise
40
"""simple docstring""" from __future__ import annotations from PIL import Image # Define glider example __lowercase = [ [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], ] # Define blinker example __lowercase = [[0, 1, 0], [0, 1, 0], [0, 1, 0]] def lowercase ( A_ )-> list[list[int]]: '''simple docstring''' a : str = [] for i in range(len(A_ ) ): a : str = [] for j in range(len(cells[i] ) ): # Get the number of live neighbours a : Union[str, Any] = 0 if i > 0 and j > 0: neighbour_count += cells[i - 1][j - 1] if i > 0: neighbour_count += cells[i - 1][j] if i > 0 and j < len(cells[i] ) - 1: neighbour_count += cells[i - 1][j + 1] if j > 0: neighbour_count += cells[i][j - 1] if j < len(cells[i] ) - 1: neighbour_count += cells[i][j + 1] if i < len(A_ ) - 1 and j > 0: neighbour_count += cells[i + 1][j - 1] if i < len(A_ ) - 1: neighbour_count += cells[i + 1][j] if i < len(A_ ) - 1 and j < len(cells[i] ) - 1: neighbour_count += cells[i + 1][j + 1] # Rules of the game of life (excerpt from Wikipedia): # 1. Any live cell with two or three live neighbours survives. # 2. Any dead cell with three live neighbours becomes a live cell. # 3. All other live cells die in the next generation. # Similarly, all other dead cells stay dead. a : Tuple = cells[i][j] == 1 if ( (alive and 2 <= neighbour_count <= 3) or not alive and neighbour_count == 3 ): next_generation_row.append(1 ) else: next_generation_row.append(0 ) next_generation.append(A_ ) return next_generation def lowercase ( A_ , A_ )-> list[Image.Image]: '''simple docstring''' a : List[str] = [] for _ in range(A_ ): # Create output image a : str = Image.new("RGB" , (len(cells[0] ), len(A_ )) ) a : Union[str, Any] = img.load() # Save cells to image for x in range(len(A_ ) ): for y in range(len(cells[0] ) ): a : Optional[Any] = 255 - cells[y][x] * 255 a : str = (colour, colour, colour) # Save image images.append(A_ ) a : Tuple = new_generation(A_ ) return images if __name__ == "__main__": __lowercase = generate_images(GLIDER, 16) images[0].save("""out.gif""", save_all=True, append_images=images[1:])
40
1
"""simple docstring""" import argparse import json from pathlib import Path import torch import torchaudio from datasets import load_dataset from huggingface_hub import hf_hub_download from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification from transformers.utils import logging logging.set_verbosity_info() __lowercase = logging.get_logger(__name__) def lowercase ( A_ )-> Dict: '''simple docstring''' a : str = ASTConfig() if "10-10" in model_name: pass elif "speech-commands" in model_name: a : Union[str, Any] = 128 elif "12-12" in model_name: a : List[Any] = 12 a : str = 12 elif "14-14" in model_name: a : List[Any] = 14 a : Optional[int] = 14 elif "16-16" in model_name: a : Any = 16 a : List[Any] = 16 else: raise ValueError("Model not supported" ) a : Optional[int] = "huggingface/label-files" if "speech-commands" in model_name: a : Optional[int] = 35 a : List[str] = "speech-commands-v2-id2label.json" else: a : Optional[Any] = 527 a : Tuple = "audioset-id2label.json" a : List[str] = json.load(open(hf_hub_download(A_ , A_ , repo_type="dataset" ) , "r" ) ) a : Union[str, Any] = {int(A_ ): v for k, v in idalabel.items()} a : Any = idalabel a : str = {v: k for k, v in idalabel.items()} return config def lowercase ( A_ )-> Tuple: '''simple docstring''' if "module.v" in name: a : Union[str, Any] = name.replace("module.v" , "audio_spectrogram_transformer" ) if "cls_token" in name: a : List[Any] = name.replace("cls_token" , "embeddings.cls_token" ) if "dist_token" in name: a : Union[str, Any] = name.replace("dist_token" , "embeddings.distillation_token" ) if "pos_embed" in name: a : str = name.replace("pos_embed" , "embeddings.position_embeddings" ) if "patch_embed.proj" in name: a : Union[str, Any] = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) # transformer blocks if "blocks" in name: a : Union[str, Any] = name.replace("blocks" , "encoder.layer" ) if "attn.proj" in name: a : str = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: a : Tuple = name.replace("attn" , "attention.self" ) if "norm1" in name: a : int = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: a : Union[str, Any] = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: a : Union[str, Any] = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: a : Optional[Any] = name.replace("mlp.fc2" , "output.dense" ) # final layernorm if "audio_spectrogram_transformer.norm" in name: a : Tuple = name.replace("audio_spectrogram_transformer.norm" , "audio_spectrogram_transformer.layernorm" ) # classifier head if "module.mlp_head.0" in name: a : List[str] = name.replace("module.mlp_head.0" , "classifier.layernorm" ) if "module.mlp_head.1" in name: a : Optional[int] = name.replace("module.mlp_head.1" , "classifier.dense" ) return name def lowercase ( A_ , A_ )-> Any: '''simple docstring''' for key in orig_state_dict.copy().keys(): a : str = orig_state_dict.pop(A_ ) if "qkv" in key: a : int = key.split("." ) a : Optional[int] = int(key_split[3] ) a : int = config.hidden_size if "weight" in key: a : List[str] = val[:dim, :] a : Any = val[dim : dim * 2, :] a : int = val[-dim:, :] else: a : Optional[Any] = val[:dim] a : Union[str, Any] = val[dim : dim * 2] a : str = val[-dim:] else: a : str = val return orig_state_dict def lowercase ( A_ )-> Dict: '''simple docstring''' a : Union[str, Any] = [ "module.v.head.weight", "module.v.head.bias", "module.v.head_dist.weight", "module.v.head_dist.bias", ] for k in ignore_keys: state_dict.pop(A_ , A_ ) @torch.no_grad() def lowercase ( A_ , A_ , A_=False )-> Optional[int]: '''simple docstring''' a : Optional[int] = get_audio_spectrogram_transformer_config(A_ ) a : Dict = { "ast-finetuned-audioset-10-10-0.4593": ( "https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.450": ( "https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.448": ( "https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.448-v2": ( "https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1" ), "ast-finetuned-audioset-12-12-0.447": ( "https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1" ), "ast-finetuned-audioset-14-14-0.443": ( "https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1" ), "ast-finetuned-audioset-16-16-0.442": ( "https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1" ), "ast-finetuned-speech-commands-v2": ( "https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1" ), } # load original state_dict a : Any = model_name_to_url[model_name] a : List[Any] = torch.hub.load_state_dict_from_url(A_ , map_location="cpu" ) # remove some keys remove_keys(A_ ) # rename some keys a : Union[str, Any] = convert_state_dict(A_ , A_ ) # load 🤗 model a : List[str] = ASTForAudioClassification(A_ ) model.eval() model.load_state_dict(A_ ) # verify outputs on dummy input # source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62 a : Tuple = -4.2_6_7_7_3_9_3 if "speech-commands" not in model_name else -6.8_4_5_9_7_8 a : Union[str, Any] = 4.5_6_8_9_9_7_4 if "speech-commands" not in model_name else 5.5_6_5_4_5_2_6 a : str = 1_024 if "speech-commands" not in model_name else 128 a : List[Any] = ASTFeatureExtractor(mean=A_ , std=A_ , max_length=A_ ) if "speech-commands" in model_name: a : List[str] = load_dataset("speech_commands" , "v0.02" , split="validation" ) a : int = dataset[0]["audio"]["array"] else: a : Tuple = hf_hub_download( repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" , ) a , a : Tuple = torchaudio.load(A_ ) a : Optional[Any] = waveform.squeeze().numpy() a : Union[str, Any] = feature_extractor(A_ , sampling_rate=16_000 , return_tensors="pt" ) # forward pass a : Optional[Any] = model(**A_ ) a : List[str] = outputs.logits if model_name == "ast-finetuned-audioset-10-10-0.4593": a : Any = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] ) elif model_name == "ast-finetuned-audioset-10-10-0.450": a : Optional[int] = torch.tensor([-1.1_9_8_6, -7.0_9_0_3, -8.2_7_1_8] ) elif model_name == "ast-finetuned-audioset-10-10-0.448": a : List[str] = torch.tensor([-2.6_1_2_8, -8.0_0_8_0, -9.4_3_4_4] ) elif model_name == "ast-finetuned-audioset-10-10-0.448-v2": a : Tuple = torch.tensor([-1.5_0_8_0, -7.4_5_3_4, -8.8_9_1_7] ) elif model_name == "ast-finetuned-audioset-12-12-0.447": a : int = torch.tensor([-0.5_0_5_0, -6.5_8_3_3, -8.0_8_4_3] ) elif model_name == "ast-finetuned-audioset-14-14-0.443": a : Any = torch.tensor([-0.3_8_2_6, -7.0_3_3_6, -8.2_4_1_3] ) elif model_name == "ast-finetuned-audioset-16-16-0.442": a : Dict = torch.tensor([-1.2_1_1_3, -6.9_1_0_1, -8.3_4_7_0] ) elif model_name == "ast-finetuned-speech-commands-v2": a : Union[str, Any] = torch.tensor([6.1_5_8_9, -8.0_5_6_6, -8.7_9_8_4] ) else: raise ValueError("Unknown model name" ) if not torch.allclose(logits[0, :3] , A_ , atol=1e-4 ): raise ValueError("Logits don't match" ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(A_ ).mkdir(exist_ok=A_ ) print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(A_ ) print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' ) feature_extractor.save_pretrained(A_ ) if push_to_hub: print("Pushing model and feature extractor to the hub..." ) model.push_to_hub(F'''MIT/{model_name}''' ) feature_extractor.push_to_hub(F'''MIT/{model_name}''' ) if __name__ == "__main__": __lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""ast-finetuned-audioset-10-10-0.4593""", type=str, help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) __lowercase = parser.parse_args() convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
40
"""simple docstring""" from itertools import permutations def lowercase ( A_ )-> bool: '''simple docstring''' if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False a : Optional[int] = [7, 11, 13, 17] for i, test in enumerate(A_ ): if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def lowercase ( A_ = 10 )-> int: '''simple docstring''' return sum( int("".join(map(A_ , A_ ) ) ) for num in permutations(range(A_ ) ) if is_substring_divisible(A_ ) ) if __name__ == "__main__": print(f'''{solution() = }''')
40
1
"""simple docstring""" import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler __lowercase = 16 __lowercase = 32 def lowercase ( A_ , A_ = 16 , A_ = "bert-base-cased" )-> Dict: '''simple docstring''' a : str = AutoTokenizer.from_pretrained(A_ ) a : int = load_dataset("glue" , "mrpc" ) def tokenize_function(A_ ): # max_length=None => use the model max length (it's actually the default) a : List[str] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=A_ , max_length=A_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset a : Any = datasets.map( A_ , batched=A_ , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=A_ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library a : List[Any] = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(A_ ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(A_ , padding="max_length" , max_length=128 , return_tensors="pt" ) return tokenizer.pad(A_ , padding="longest" , return_tensors="pt" ) # Instantiate dataloaders. a : Dict = DataLoader( tokenized_datasets["train"] , shuffle=A_ , collate_fn=A_ , batch_size=A_ ) a : str = DataLoader( tokenized_datasets["validation"] , shuffle=A_ , collate_fn=A_ , batch_size=A_ ) return train_dataloader, eval_dataloader def lowercase ( A_ , A_ , A_ , A_ )-> Tuple: '''simple docstring''' model.eval() a : str = 0 for step, batch in enumerate(A_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): a : Optional[Any] = model(**A_ ) a : str = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times a , a : Optional[int] = accelerator.gather( (predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(A_ ) - 1: a : str = predictions[: len(eval_dataloader.dataset ) - samples_seen] a : Tuple = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=A_ , references=A_ , ) a : Tuple = metric.compute() return eval_metric["accuracy"] def lowercase ( A_ , A_ )-> Any: '''simple docstring''' a : int = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs a : Tuple = config["lr"] a : Any = int(config["num_epochs"] ) a : Optional[int] = int(config["seed"] ) a : str = int(config["batch_size"] ) a : Tuple = args.model_name_or_path set_seed(A_ ) a , a : Tuple = get_dataloaders(A_ , A_ , A_ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) a : Optional[int] = AutoModelForSequenceClassification.from_pretrained(A_ , return_dict=A_ ) # Instantiate optimizer a : Optional[int] = ( AdamW if accelerator.state.deepspeed_plugin is None or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) a : Optional[int] = optimizer_cls(params=model.parameters() , lr=A_ ) if accelerator.state.deepspeed_plugin is not None: a : str = accelerator.state.deepspeed_plugin.deepspeed_config[ "gradient_accumulation_steps" ] else: a : str = 1 a : Union[str, Any] = (len(A_ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): a : List[Any] = get_linear_schedule_with_warmup( optimizer=A_ , num_warmup_steps=0 , num_training_steps=A_ , ) else: a : Union[str, Any] = DummyScheduler(A_ , total_num_steps=A_ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. a , a , a , a , a : List[Any] = accelerator.prepare( A_ , A_ , A_ , A_ , A_ ) # We need to keep track of how many total steps we have iterated over a : Dict = 0 # We also need to keep track of the stating epoch so files are named properly a : Any = 0 a : Tuple = evaluate.load("glue" , "mrpc" ) a : Dict = num_epochs if args.partial_train_epoch is not None: a : Optional[Any] = args.partial_train_epoch if args.resume_from_checkpoint: accelerator.load_state(args.resume_from_checkpoint ) a : Tuple = args.resume_from_checkpoint.split("epoch_" )[1] a : Dict = "" for char in epoch_string: if char.isdigit(): state_epoch_num += char else: break a : List[str] = int(A_ ) + 1 a : List[Any] = evaluation_loop(A_ , A_ , A_ , A_ ) accelerator.print("resumed checkpoint performance:" , A_ ) accelerator.print("resumed checkpoint's scheduler's lr:" , lr_scheduler.get_lr()[0] ) accelerator.print("resumed optimizers's lr:" , optimizer.param_groups[0]["lr"] ) with open(os.path.join(args.output_dir , F'''state_{starting_epoch-1}.json''' ) , "r" ) as f: a : Optional[int] = json.load(A_ ) assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed" assert ( resumed_state["lr"] == lr_scheduler.get_lr()[0] ), "Scheduler learning rate mismatch, loading from checkpoint failed" assert ( resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"] ), "Optimizer learning rate mismatch, loading from checkpoint failed" assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed" return # Now we train the model a : int = {} for epoch in range(A_ , A_ ): model.train() for step, batch in enumerate(A_ ): a : List[Any] = model(**A_ ) a : int = outputs.loss a : Union[str, Any] = loss / gradient_accumulation_steps accelerator.backward(A_ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 a : Tuple = F'''epoch_{epoch}''' a : Dict = os.path.join(args.output_dir , A_ ) accelerator.save_state(A_ ) a : List[Any] = evaluation_loop(A_ , A_ , A_ , A_ ) a : Union[str, Any] = accuracy a : Optional[Any] = lr_scheduler.get_lr()[0] a : List[Any] = optimizer.param_groups[0]["lr"] a : Optional[Any] = epoch a : Dict = overall_step accelerator.print(F'''epoch {epoch}:''' , A_ ) accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , F'''state_{epoch}.json''' ) , "w" ) as f: json.dump(A_ , A_ ) def lowercase ( )-> Optional[int]: '''simple docstring''' a : List[str] = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." ) parser.add_argument( "--model_name_or_path" , type=A_ , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=A_ , ) parser.add_argument( "--output_dir" , type=A_ , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , ) parser.add_argument( "--resume_from_checkpoint" , type=A_ , default=A_ , help="If the training should continue from a checkpoint folder." , ) parser.add_argument( "--partial_train_epoch" , type=A_ , default=A_ , help="If passed, the training will stop after this number of epochs." , ) parser.add_argument( "--num_epochs" , type=A_ , default=2 , help="Number of train epochs." , ) a : Union[str, Any] = parser.parse_args() a : Union[str, Any] = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16} training_function(A_ , A_ ) if __name__ == "__main__": main()
40
"""simple docstring""" import gc import random import unittest import numpy as np import torch from diffusers import ( DDIMScheduler, KandinskyVaaControlnetPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _A ( _a ,unittest.TestCase ): """simple docstring""" UpperCAmelCase : Dict = KandinskyVaaControlnetPipeline UpperCAmelCase : List[str] = ["""image_embeds""", """negative_image_embeds""", """hint"""] UpperCAmelCase : Optional[Any] = ["""image_embeds""", """negative_image_embeds""", """hint"""] UpperCAmelCase : Dict = [ """generator""", """height""", """width""", """latents""", """guidance_scale""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] UpperCAmelCase : Optional[int] = False @property def __snake_case ( self : Optional[Any]): return 32 @property def __snake_case ( self : Dict): return 32 @property def __snake_case ( self : Dict): return self.time_input_dim @property def __snake_case ( self : Any): return self.time_input_dim * 4 @property def __snake_case ( self : str): return 100 @property def __snake_case ( self : str): torch.manual_seed(0) a : str = { "in_channels": 8, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image_hint", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } a : Dict = UNetaDConditionModel(**__UpperCAmelCase) return model @property def __snake_case ( self : str): return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def __snake_case ( self : Union[str, Any]): torch.manual_seed(0) a : Dict = VQModel(**self.dummy_movq_kwargs) return model def __snake_case ( self : Optional[Any]): a : Optional[Any] = self.dummy_unet a : int = self.dummy_movq a : str = DDIMScheduler( num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , steps_offset=1 , prediction_type="epsilon" , thresholding=__UpperCAmelCase , ) a : Optional[Any] = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def __snake_case ( self : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int=0): a : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase) a : List[str] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to( __UpperCAmelCase) # create hint a : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase) if str(__UpperCAmelCase).startswith("mps"): a : Union[str, Any] = torch.manual_seed(__UpperCAmelCase) else: a : List[Any] = torch.Generator(device=__UpperCAmelCase).manual_seed(__UpperCAmelCase) a : str = { "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "hint": hint, "generator": generator, "height": 64, "width": 64, "guidance_scale": 4.0, "num_inference_steps": 2, "output_type": "np", } return inputs def __snake_case ( self : Dict): a : str = "cpu" a : Tuple = self.get_dummy_components() a : Dict = self.pipeline_class(**__UpperCAmelCase) a : Optional[int] = pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) a : Optional[Any] = pipe(**self.get_dummy_inputs(__UpperCAmelCase)) a : Any = output.images a : Any = pipe( **self.get_dummy_inputs(__UpperCAmelCase) , return_dict=__UpperCAmelCase , )[0] a : Union[str, Any] = image[0, -3:, -3:, -1] a : int = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) a : Tuple = np.array( [0.6_959_826, 0.868_279, 0.7_558_092, 0.68_769_467, 0.85_805_804, 0.65_977_496, 0.44_885_302, 0.5_959_111, 0.4_251_595]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : Optional[int]): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __snake_case ( self : List[str]): a : List[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy") a : Union[str, Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/hint_image_cat.png") a : List[Any] = torch.from_numpy(np.array(__UpperCAmelCase)).float() / 255.0 a : str = hint.permute(2 , 0 , 1).unsqueeze(0) a : Optional[int] = KandinskyVaaPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa) pipe_prior.to(__UpperCAmelCase) a : List[str] = KandinskyVaaControlnetPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa) a : int = pipeline.to(__UpperCAmelCase) pipeline.set_progress_bar_config(disable=__UpperCAmelCase) a : Tuple = "A robot, 4k photo" a : Any = torch.Generator(device="cuda").manual_seed(0) a , a : int = pipe_prior( __UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple() a : str = torch.Generator(device="cuda").manual_seed(0) a : Union[str, Any] = pipeline( image_embeds=__UpperCAmelCase , negative_image_embeds=__UpperCAmelCase , hint=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=100 , output_type="np" , ) a : str = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase)
40
1
"""simple docstring""" import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def lowercase ( )-> List[str]: '''simple docstring''' with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(A_ ): requests.request("GET" , "https://huggingface.co" ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request("GET" , "https://huggingface.co" , timeout=1.0 ) @pytest.mark.integration def lowercase ( )-> Any: '''simple docstring''' with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request("GET" , "https://huggingface.co" ) def lowercase ( )-> List[Any]: '''simple docstring''' with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(A_ ): http_head("https://huggingface.co" )
40
"""simple docstring""" import argparse import json from pathlib import Path import torch import torchaudio from datasets import load_dataset from huggingface_hub import hf_hub_download from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification from transformers.utils import logging logging.set_verbosity_info() __lowercase = logging.get_logger(__name__) def lowercase ( A_ )-> Dict: '''simple docstring''' a : str = ASTConfig() if "10-10" in model_name: pass elif "speech-commands" in model_name: a : Union[str, Any] = 128 elif "12-12" in model_name: a : List[Any] = 12 a : str = 12 elif "14-14" in model_name: a : List[Any] = 14 a : Optional[int] = 14 elif "16-16" in model_name: a : Any = 16 a : List[Any] = 16 else: raise ValueError("Model not supported" ) a : Optional[int] = "huggingface/label-files" if "speech-commands" in model_name: a : Optional[int] = 35 a : List[str] = "speech-commands-v2-id2label.json" else: a : Optional[Any] = 527 a : Tuple = "audioset-id2label.json" a : List[str] = json.load(open(hf_hub_download(A_ , A_ , repo_type="dataset" ) , "r" ) ) a : Union[str, Any] = {int(A_ ): v for k, v in idalabel.items()} a : Any = idalabel a : str = {v: k for k, v in idalabel.items()} return config def lowercase ( A_ )-> Tuple: '''simple docstring''' if "module.v" in name: a : Union[str, Any] = name.replace("module.v" , "audio_spectrogram_transformer" ) if "cls_token" in name: a : List[Any] = name.replace("cls_token" , "embeddings.cls_token" ) if "dist_token" in name: a : Union[str, Any] = name.replace("dist_token" , "embeddings.distillation_token" ) if "pos_embed" in name: a : str = name.replace("pos_embed" , "embeddings.position_embeddings" ) if "patch_embed.proj" in name: a : Union[str, Any] = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) # transformer blocks if "blocks" in name: a : Union[str, Any] = name.replace("blocks" , "encoder.layer" ) if "attn.proj" in name: a : str = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: a : Tuple = name.replace("attn" , "attention.self" ) if "norm1" in name: a : int = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: a : Union[str, Any] = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: a : Union[str, Any] = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: a : Optional[Any] = name.replace("mlp.fc2" , "output.dense" ) # final layernorm if "audio_spectrogram_transformer.norm" in name: a : Tuple = name.replace("audio_spectrogram_transformer.norm" , "audio_spectrogram_transformer.layernorm" ) # classifier head if "module.mlp_head.0" in name: a : List[str] = name.replace("module.mlp_head.0" , "classifier.layernorm" ) if "module.mlp_head.1" in name: a : Optional[int] = name.replace("module.mlp_head.1" , "classifier.dense" ) return name def lowercase ( A_ , A_ )-> Any: '''simple docstring''' for key in orig_state_dict.copy().keys(): a : str = orig_state_dict.pop(A_ ) if "qkv" in key: a : int = key.split("." ) a : Optional[int] = int(key_split[3] ) a : int = config.hidden_size if "weight" in key: a : List[str] = val[:dim, :] a : Any = val[dim : dim * 2, :] a : int = val[-dim:, :] else: a : Optional[Any] = val[:dim] a : Union[str, Any] = val[dim : dim * 2] a : str = val[-dim:] else: a : str = val return orig_state_dict def lowercase ( A_ )-> Dict: '''simple docstring''' a : Union[str, Any] = [ "module.v.head.weight", "module.v.head.bias", "module.v.head_dist.weight", "module.v.head_dist.bias", ] for k in ignore_keys: state_dict.pop(A_ , A_ ) @torch.no_grad() def lowercase ( A_ , A_ , A_=False )-> Optional[int]: '''simple docstring''' a : Optional[int] = get_audio_spectrogram_transformer_config(A_ ) a : Dict = { "ast-finetuned-audioset-10-10-0.4593": ( "https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.450": ( "https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.448": ( "https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.448-v2": ( "https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1" ), "ast-finetuned-audioset-12-12-0.447": ( "https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1" ), "ast-finetuned-audioset-14-14-0.443": ( "https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1" ), "ast-finetuned-audioset-16-16-0.442": ( "https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1" ), "ast-finetuned-speech-commands-v2": ( "https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1" ), } # load original state_dict a : Any = model_name_to_url[model_name] a : List[Any] = torch.hub.load_state_dict_from_url(A_ , map_location="cpu" ) # remove some keys remove_keys(A_ ) # rename some keys a : Union[str, Any] = convert_state_dict(A_ , A_ ) # load 🤗 model a : List[str] = ASTForAudioClassification(A_ ) model.eval() model.load_state_dict(A_ ) # verify outputs on dummy input # source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62 a : Tuple = -4.2_6_7_7_3_9_3 if "speech-commands" not in model_name else -6.8_4_5_9_7_8 a : Union[str, Any] = 4.5_6_8_9_9_7_4 if "speech-commands" not in model_name else 5.5_6_5_4_5_2_6 a : str = 1_024 if "speech-commands" not in model_name else 128 a : List[Any] = ASTFeatureExtractor(mean=A_ , std=A_ , max_length=A_ ) if "speech-commands" in model_name: a : List[str] = load_dataset("speech_commands" , "v0.02" , split="validation" ) a : int = dataset[0]["audio"]["array"] else: a : Tuple = hf_hub_download( repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" , ) a , a : Tuple = torchaudio.load(A_ ) a : Optional[Any] = waveform.squeeze().numpy() a : Union[str, Any] = feature_extractor(A_ , sampling_rate=16_000 , return_tensors="pt" ) # forward pass a : Optional[Any] = model(**A_ ) a : List[str] = outputs.logits if model_name == "ast-finetuned-audioset-10-10-0.4593": a : Any = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] ) elif model_name == "ast-finetuned-audioset-10-10-0.450": a : Optional[int] = torch.tensor([-1.1_9_8_6, -7.0_9_0_3, -8.2_7_1_8] ) elif model_name == "ast-finetuned-audioset-10-10-0.448": a : List[str] = torch.tensor([-2.6_1_2_8, -8.0_0_8_0, -9.4_3_4_4] ) elif model_name == "ast-finetuned-audioset-10-10-0.448-v2": a : Tuple = torch.tensor([-1.5_0_8_0, -7.4_5_3_4, -8.8_9_1_7] ) elif model_name == "ast-finetuned-audioset-12-12-0.447": a : int = torch.tensor([-0.5_0_5_0, -6.5_8_3_3, -8.0_8_4_3] ) elif model_name == "ast-finetuned-audioset-14-14-0.443": a : Any = torch.tensor([-0.3_8_2_6, -7.0_3_3_6, -8.2_4_1_3] ) elif model_name == "ast-finetuned-audioset-16-16-0.442": a : Dict = torch.tensor([-1.2_1_1_3, -6.9_1_0_1, -8.3_4_7_0] ) elif model_name == "ast-finetuned-speech-commands-v2": a : Union[str, Any] = torch.tensor([6.1_5_8_9, -8.0_5_6_6, -8.7_9_8_4] ) else: raise ValueError("Unknown model name" ) if not torch.allclose(logits[0, :3] , A_ , atol=1e-4 ): raise ValueError("Logits don't match" ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(A_ ).mkdir(exist_ok=A_ ) print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(A_ ) print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' ) feature_extractor.save_pretrained(A_ ) if push_to_hub: print("Pushing model and feature extractor to the hub..." ) model.push_to_hub(F'''MIT/{model_name}''' ) feature_extractor.push_to_hub(F'''MIT/{model_name}''' ) if __name__ == "__main__": __lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""ast-finetuned-audioset-10-10-0.4593""", type=str, help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) __lowercase = parser.parse_args() convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
40
1
"""simple docstring""" import unittest import numpy as np from diffusers import OnnxStableDiffusionInpaintPipelineLegacy from diffusers.utils.testing_utils import ( is_onnx_available, load_image, load_numpy, nightly, require_onnxruntime, require_torch_gpu, ) if is_onnx_available(): import onnxruntime as ort @nightly @require_onnxruntime @require_torch_gpu class _A ( unittest.TestCase ): """simple docstring""" @property def __snake_case ( self : Tuple): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def __snake_case ( self : List[str]): a : Optional[int] = ort.SessionOptions() a : str = False return options def __snake_case ( self : Optional[int]): a : Tuple = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo.png") a : Optional[int] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo_mask.png") a : List[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy") # using the PNDM scheduler by default a : List[Any] = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__UpperCAmelCase) a : int = "A red cat sitting on a park bench" a : Optional[Any] = np.random.RandomState(0) a : Optional[Any] = pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=__UpperCAmelCase , output_type="np" , ) a : str = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image).max() < 1e-2
40
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __lowercase = { """configuration_rag""": ["""RagConfig"""], """retrieval_rag""": ["""RagRetriever"""], """tokenization_rag""": ["""RagTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """RagModel""", """RagPreTrainedModel""", """RagSequenceForGeneration""", """RagTokenForGeneration""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """TFRagModel""", """TFRagPreTrainedModel""", """TFRagSequenceForGeneration""", """TFRagTokenForGeneration""", ] if TYPE_CHECKING: from .configuration_rag import RagConfig from .retrieval_rag import RagRetriever from .tokenization_rag import RagTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rag import ( TFRagModel, TFRagPreTrainedModel, TFRagSequenceForGeneration, TFRagTokenForGeneration, ) else: import sys __lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
40
1
"""simple docstring""" import math def lowercase ( A_ , A_ = 0 , A_ = 0 )-> list: '''simple docstring''' a : Optional[Any] = end or len(A_ ) for i in range(A_ , A_ ): a : Optional[Any] = i a : Dict = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: a : str = array[temp_index - 1] temp_index -= 1 a : Optional[Any] = temp_index_value return array def lowercase ( A_ , A_ , A_ )-> None: # Max Heap '''simple docstring''' a : Union[str, Any] = index a : str = 2 * index + 1 # Left Node a : Optional[int] = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: a : List[Any] = left_index if right_index < heap_size and array[largest] < array[right_index]: a : Optional[int] = right_index if largest != index: a , a : List[Any] = array[largest], array[index] heapify(A_ , A_ , A_ ) def lowercase ( A_ )-> list: '''simple docstring''' a : Optional[int] = len(A_ ) for i in range(n // 2 , -1 , -1 ): heapify(A_ , A_ , A_ ) for i in range(n - 1 , 0 , -1 ): a , a : Any = array[0], array[i] heapify(A_ , 0 , A_ ) return array def lowercase ( A_ , A_ , A_ , A_ )-> int: '''simple docstring''' if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def lowercase ( A_ , A_ , A_ , A_ )-> int: '''simple docstring''' a : str = low a : int = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i a , a : int = array[j], array[i] i += 1 def lowercase ( A_ )-> list: '''simple docstring''' if len(A_ ) == 0: return array a : Union[str, Any] = 2 * math.ceil(math.loga(len(A_ ) ) ) a : Optional[int] = 16 return intro_sort(A_ , 0 , len(A_ ) , A_ , A_ ) def lowercase ( A_ , A_ , A_ , A_ , A_ )-> list: '''simple docstring''' while end - start > size_threshold: if max_depth == 0: return heap_sort(A_ ) max_depth -= 1 a : Tuple = median_of_a(A_ , A_ , start + ((end - start) // 2) + 1 , end - 1 ) a : Optional[Any] = partition(A_ , A_ , A_ , A_ ) intro_sort(A_ , A_ , A_ , A_ , A_ ) a : Dict = p return insertion_sort(A_ , A_ , A_ ) if __name__ == "__main__": import doctest doctest.testmod() __lowercase = input("""Enter numbers separated by a comma : """).strip() __lowercase = [float(item) for item in user_input.split(""",""")] print(sort(unsorted))
40
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _A ( _a ,_a ,_a ,unittest.TestCase ): """simple docstring""" UpperCAmelCase : str = StableDiffusionInpaintPipeline UpperCAmelCase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS UpperCAmelCase : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS UpperCAmelCase : Union[str, Any] = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess UpperCAmelCase : int = frozenset([] ) def __snake_case ( self : Dict): torch.manual_seed(0) a : List[Any] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__UpperCAmelCase , ) a : Tuple = PNDMScheduler(skip_prk_steps=__UpperCAmelCase) torch.manual_seed(0) a : str = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0) a : List[str] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , ) a : Any = CLIPTextModel(__UpperCAmelCase) a : List[str] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") a : Optional[Any] = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Union[str, Any]=0): # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched a : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase) a : List[str] = image.cpu().permute(0 , 2 , 3 , 1)[0] a : Union[str, Any] = Image.fromarray(np.uinta(__UpperCAmelCase)).convert("RGB").resize((64, 64)) a : Dict = Image.fromarray(np.uinta(image + 4)).convert("RGB").resize((64, 64)) if str(__UpperCAmelCase).startswith("mps"): a : Tuple = torch.manual_seed(__UpperCAmelCase) else: a : Tuple = torch.Generator(device=__UpperCAmelCase).manual_seed(__UpperCAmelCase) a : Optional[Any] = { "prompt": "A painting of a squirrel eating a burger", "image": init_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def __snake_case ( self : List[str]): a : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator a : Tuple = self.get_dummy_components() a : Optional[int] = StableDiffusionInpaintPipeline(**__UpperCAmelCase) a : int = sd_pipe.to(__UpperCAmelCase) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase) a : Any = self.get_dummy_inputs(__UpperCAmelCase) a : Optional[int] = sd_pipe(**__UpperCAmelCase).images a : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) a : int = np.array([0.4_727, 0.5_735, 0.3_941, 0.5_446, 0.5_926, 0.4_394, 0.5_062, 0.4_654, 0.4_476]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def __snake_case ( self : str): super().test_inference_batch_single_identical(expected_max_diff=3e-3) @slow @require_torch_gpu class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : Union[str, Any]): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __snake_case ( self : Dict): a : str = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png") a : str = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png") a : List[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint" "/yellow_cat_sitting_on_a_park_bench.npy") a : Tuple = "stabilityai/stable-diffusion-2-inpainting" a : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(__UpperCAmelCase , safety_checker=__UpperCAmelCase) pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) pipe.enable_attention_slicing() a : Any = "Face of a yellow cat, high resolution, sitting on a park bench" a : str = torch.manual_seed(0) a : Union[str, Any] = pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="np" , ) a : List[str] = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image).max() < 9e-3 def __snake_case ( self : Any): a : str = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png") a : Optional[Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png") a : str = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint" "/yellow_cat_sitting_on_a_park_bench_fp16.npy") a : Optional[Any] = "stabilityai/stable-diffusion-2-inpainting" a : Any = StableDiffusionInpaintPipeline.from_pretrained( __UpperCAmelCase , torch_dtype=torch.floataa , safety_checker=__UpperCAmelCase , ) pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) pipe.enable_attention_slicing() a : Optional[int] = "Face of a yellow cat, high resolution, sitting on a park bench" a : Dict = torch.manual_seed(0) a : List[Any] = pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="np" , ) a : Optional[Any] = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image).max() < 5e-1 def __snake_case ( self : int): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() a : Tuple = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png") a : Optional[int] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png") a : Optional[Any] = "stabilityai/stable-diffusion-2-inpainting" a : Optional[int] = PNDMScheduler.from_pretrained(__UpperCAmelCase , subfolder="scheduler") a : int = StableDiffusionInpaintPipeline.from_pretrained( __UpperCAmelCase , safety_checker=__UpperCAmelCase , scheduler=__UpperCAmelCase , torch_dtype=torch.floataa , ) pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() a : Optional[int] = "Face of a yellow cat, high resolution, sitting on a park bench" a : Optional[int] = torch.manual_seed(0) a : str = pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type="np" , ) a : int = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.65 * 10**9
40
1
"""simple docstring""" import random import timeit from functools import wraps from typing import Callable, Optional from ..configuration_utils import PretrainedConfig from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING from ..utils import is_pyanvml_available, is_tf_available, logging from .benchmark_utils import ( Benchmark, Memory, MemorySummary, measure_peak_memory_cpu, start_memory_tracing, stop_memory_tracing, ) if is_tf_available(): import tensorflow as tf from tensorflow.python.framework.errors_impl import ResourceExhaustedError from .benchmark_args_tf import TensorFlowBenchmarkArguments if is_pyanvml_available(): import pyanvml.pyanvml as nvml __lowercase = logging.get_logger(__name__) def lowercase ( A_ , A_ )-> Optional[Any]: '''simple docstring''' def run_func(A_ ): @wraps(A_ ) def run_in_eager_mode(*A_ , **A_ ): return func(*A_ , **A_ ) @wraps(A_ ) @tf.function(experimental_compile=A_ ) def run_in_graph_mode(*A_ , **A_ ): return func(*A_ , **A_ ) if do_eager_mode is True: if use_xla is not False: raise ValueError( "Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`." ) return run_in_eager_mode else: return run_in_graph_mode return run_func def lowercase ( A_ , A_ , A_ )-> ["tf.Tensor"]: '''simple docstring''' a : Dict = random.Random() a : Optional[int] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )] return tf.constant(A_ , shape=(batch_size, sequence_length) , dtype=tf.intaa ) class _A ( _a ): """simple docstring""" UpperCAmelCase : TensorFlowBenchmarkArguments UpperCAmelCase : PretrainedConfig UpperCAmelCase : str = "TensorFlow" @property def __snake_case ( self : Any): return tf.__version__ def __snake_case ( self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : int): # initialize GPU on separate process a : Union[str, Any] = self.args.strategy if strategy is None: raise ValueError("A device strategy has to be initialized before using TensorFlow.") a : Tuple = self._prepare_inference_func(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) return self._measure_speed(_inference) def __snake_case ( self : str , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : int): a : int = self.args.strategy if strategy is None: raise ValueError("A device strategy has to be initialized before using TensorFlow.") a : Any = self._prepare_train_func(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) return self._measure_speed(_train) def __snake_case ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : int): # initialize GPU on separate process if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __UpperCAmelCase) a : List[Any] = self.args.strategy if strategy is None: raise ValueError("A device strategy has to be initialized before using TensorFlow.") a : Any = self._prepare_inference_func(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) return self._measure_memory(_inference) def __snake_case ( self : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : int): if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __UpperCAmelCase) a : List[str] = self.args.strategy if strategy is None: raise ValueError("A device strategy has to be initialized before using TensorFlow.") a : Optional[int] = self._prepare_train_func(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) return self._measure_memory(_train) def __snake_case ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : int): a : List[Any] = self.config_dict[model_name] if self.args.fpaa: raise NotImplementedError("Mixed precision is currently not supported.") a : int = ( hasattr(__UpperCAmelCase , "architectures") and isinstance(config.architectures , __UpperCAmelCase) and len(config.architectures) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: a : Union[str, Any] = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model a : int = __import__("transformers" , fromlist=[model_class]) a : int = getattr(__UpperCAmelCase , __UpperCAmelCase) a : List[Any] = model_cls(__UpperCAmelCase) except ImportError: raise ImportError( f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to''' " set `--only_pretrain_model` or `args.only_pretrain_model=True`.") else: a : str = TF_MODEL_MAPPING[config.__class__](__UpperCAmelCase) # encoder-decoder has vocab size saved differently a : Dict = config.vocab_size if hasattr(__UpperCAmelCase , "vocab_size") else config.encoder.vocab_size a : List[str] = random_input_ids(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla) def encoder_decoder_forward(): return model(__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase , training=__UpperCAmelCase) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla) def encoder_forward(): return model(__UpperCAmelCase , training=__UpperCAmelCase) a : str = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward return _inference def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : int): a : str = self.config_dict[model_name] if self.args.eager_mode is not False: raise ValueError("Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.") if self.args.fpaa: raise NotImplementedError("Mixed precision is currently not supported.") a : Dict = ( hasattr(__UpperCAmelCase , "architectures") and isinstance(config.architectures , __UpperCAmelCase) and len(config.architectures) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: a : Union[str, Any] = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model a : Optional[Any] = __import__("transformers" , fromlist=[model_class]) a : List[str] = getattr(__UpperCAmelCase , __UpperCAmelCase) a : List[Any] = model_cls(__UpperCAmelCase) except ImportError: raise ImportError( f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to''' " set `--only_pretrain_model` or `args.only_pretrain_model=True`.") else: a : Optional[int] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__UpperCAmelCase) # encoder-decoder has vocab size saved differently a : int = config.vocab_size if hasattr(__UpperCAmelCase , "vocab_size") else config.encoder.vocab_size a : Union[str, Any] = random_input_ids(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla) def encoder_decoder_train(): a : str = model(__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase , labels=__UpperCAmelCase , training=__UpperCAmelCase)[0] a : Any = tf.gradients(__UpperCAmelCase , model.trainable_variables) return gradients @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla) def encoder_train(): a : List[str] = model(__UpperCAmelCase , labels=__UpperCAmelCase , training=__UpperCAmelCase)[0] a : int = tf.gradients(__UpperCAmelCase , model.trainable_variables) return gradients a : List[Any] = encoder_decoder_train if config.is_encoder_decoder else encoder_train return _train def __snake_case ( self : Optional[int] , __UpperCAmelCase : Any): with self.args.strategy.scope(): try: if self.args.is_tpu or self.args.use_xla: # run additional 10 times to stabilize compilation for tpu logger.info("Do inference on TPU. Running model 5 times to stabilize compilation") timeit.repeat(__UpperCAmelCase , repeat=1 , number=5) # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average a : List[Any] = timeit.repeat( __UpperCAmelCase , repeat=self.args.repeat , number=10 , ) return min(__UpperCAmelCase) / 10.0 except ResourceExhaustedError as e: self.print_fn(f'''Doesn\'t fit on GPU. {e}''') def __snake_case ( self : Dict , __UpperCAmelCase : Callable[[], None]): logger.info( "Note that TensorFlow allocates more memory than " "it might need to speed up computation. " "The memory reported here corresponds to the memory " "reported by `nvidia-smi`, which can vary depending " "on total available memory on the GPU that is used.") with self.args.strategy.scope(): try: if self.args.trace_memory_line_by_line: if not self.args.eager_mode: raise ValueError( "`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory" " consumption line by line.") a : List[Any] = start_memory_tracing("transformers") if self.args.is_tpu: # tpu raise NotImplementedError( "Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking" " with `args.memory=False`") elif self.args.is_gpu: # gpu if not is_pyanvml_available(): logger.warning( "py3nvml not installed, we won't log GPU memory usage. " "Install py3nvml (pip install py3nvml) to log information about GPU.") a : Any = "N/A" else: logger.info( "Measuring total GPU usage on GPU device. Make sure to not have additional processes" " running on the same GPU.") # init nvml nvml.nvmlInit() func() a : Any = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx) a : Tuple = nvml.nvmlDeviceGetMemoryInfo(__UpperCAmelCase) a : Tuple = meminfo.used a : List[Any] = Memory(__UpperCAmelCase) # shutdown nvml nvml.nvmlShutdown() else: # cpu if self.args.trace_memory_line_by_line: logger.info( "When enabling line by line tracing, the max peak memory for CPU is inaccurate in" " TensorFlow.") a : Tuple = None else: a : int = measure_peak_memory_cpu(__UpperCAmelCase) a : List[Any] = Memory(__UpperCAmelCase) if isinstance(__UpperCAmelCase , __UpperCAmelCase) else memory_bytes if self.args.trace_memory_line_by_line: a : Dict = stop_memory_tracing(__UpperCAmelCase) if memory is None: a : str = summary.total else: a : int = None return memory, summary except ResourceExhaustedError as e: self.print_fn(f'''Doesn\'t fit on GPU. {e}''') return "N/A", None
40
"""simple docstring""" def lowercase ( A_ )-> bool: '''simple docstring''' if not all(x.isalpha() for x in string ): raise ValueError("String must only contain alphabetic characters." ) a : Tuple = sorted(string.lower() ) return len(A_ ) == len(set(A_ ) ) if __name__ == "__main__": __lowercase = input("""Enter a string """).strip() __lowercase = is_isogram(input_str) print(f'''{input_str} is {'an' if isogram else 'not an'} isogram.''')
40
1
"""simple docstring""" from __future__ import annotations class _A : """simple docstring""" def __init__( self : List[str] , __UpperCAmelCase : int = 0): a : Tuple = key def __snake_case ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : int): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) a : Dict = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(__UpperCAmelCase) ^ key) for ch in content] def __snake_case ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : int): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) a : Optional[Any] = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(__UpperCAmelCase) ^ key) for ch in content] def __snake_case ( self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : int = 0): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) a : List[Any] = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned a : Any = "" for ch in content: ans += chr(ord(__UpperCAmelCase) ^ key) return ans def __snake_case ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : int = 0): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) a : Dict = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned a : str = "" for ch in content: ans += chr(ord(__UpperCAmelCase) ^ key) return ans def __snake_case ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : int = 0): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) try: with open(__UpperCAmelCase) as fin, open("encrypt.out" , "w+") as fout: # actual encrypt-process for line in fin: fout.write(self.encrypt_string(__UpperCAmelCase , __UpperCAmelCase)) except OSError: return False return True def __snake_case ( self : Any , __UpperCAmelCase : str , __UpperCAmelCase : int): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) try: with open(__UpperCAmelCase) as fin, open("decrypt.out" , "w+") as fout: # actual encrypt-process for line in fin: fout.write(self.decrypt_string(__UpperCAmelCase , __UpperCAmelCase)) except OSError: return False return True # Tests # crypt = XORCipher() # key = 67 # # test encrypt # print(crypt.encrypt("hallo welt",key)) # # test decrypt # print(crypt.decrypt(crypt.encrypt("hallo welt",key), key)) # # test encrypt_string # print(crypt.encrypt_string("hallo welt",key)) # # test decrypt_string # print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key)) # if (crypt.encrypt_file("test.txt",key)): # print("encrypt successful") # else: # print("encrypt unsuccessful") # if (crypt.decrypt_file("encrypt.out",key)): # print("decrypt successful") # else: # print("decrypt unsuccessful")
40
"""simple docstring""" import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast __lowercase = datasets.utils.logging.get_logger(__name__) @dataclass class _A ( datasets.BuilderConfig ): """simple docstring""" UpperCAmelCase : int = 1_0_0_0_0 UpperCAmelCase : Optional[List[str]] = None UpperCAmelCase : Optional[datasets.Features] = None class _A ( datasets.ArrowBasedBuilder ): """simple docstring""" UpperCAmelCase : str = ParquetConfig def __snake_case ( self : Tuple): return datasets.DatasetInfo(features=self.config.features) def __snake_case ( self : List[Any] , __UpperCAmelCase : str): if not self.config.data_files: raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''') a : str = dl_manager.download_and_extract(self.config.data_files) if isinstance(__UpperCAmelCase , (str, list, tuple)): a : Dict = data_files if isinstance(__UpperCAmelCase , __UpperCAmelCase): a : str = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive a : List[Any] = [dl_manager.iter_files(__UpperCAmelCase) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files})] a : Dict = [] for split_name, files in data_files.items(): if isinstance(__UpperCAmelCase , __UpperCAmelCase): a : Optional[int] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive a : Tuple = [dl_manager.iter_files(__UpperCAmelCase) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(__UpperCAmelCase): with open(__UpperCAmelCase , "rb") as f: a : Tuple = datasets.Features.from_arrow_schema(pq.read_schema(__UpperCAmelCase)) break splits.append(datasets.SplitGenerator(name=__UpperCAmelCase , gen_kwargs={"files": files})) return splits def __snake_case ( self : List[str] , __UpperCAmelCase : pa.Table): if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example a : Optional[int] = table_cast(__UpperCAmelCase , self.info.features.arrow_schema) return pa_table def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int): a : Tuple = self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema) != sorted(self.config.columns): raise ValueError( f'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''') for file_idx, file in enumerate(itertools.chain.from_iterable(__UpperCAmelCase)): with open(__UpperCAmelCase , "rb") as f: a : Tuple = pq.ParquetFile(__UpperCAmelCase) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns)): a : Optional[Any] = pa.Table.from_batches([record_batch]) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield f'''{file_idx}_{batch_idx}''', self._cast_table(__UpperCAmelCase) except ValueError as e: logger.error(f'''Failed to read file \'{file}\' with error {type(__UpperCAmelCase)}: {e}''') raise
40
1
"""simple docstring""" # Function to print upper half of diamond (pyramid) def lowercase ( A_ )-> List[Any]: '''simple docstring''' for i in range(0 , A_ ): for _ in range(0 , n - i - 1 ): # printing spaces print(" " , end="" ) for _ in range(0 , i + 1 ): # printing stars print("* " , end="" ) print() def lowercase ( A_ )-> Union[str, Any]: '''simple docstring''' for i in range(A_ , 0 , -1 ): for _ in range(A_ , 0 , -1 ): # printing stars print("* " , end="" ) print() for _ in range(n - i + 1 , 0 , -1 ): # printing spaces print(" " , end="" ) def lowercase ( A_ )-> int: '''simple docstring''' if n <= 0: print(" ... .... nothing printing :(" ) return floyd(A_ ) # upper half reverse_floyd(A_ ) # lower half if __name__ == "__main__": print(R"""| /\ | |- | |- |--| |\ /| |-""") print(R"""|/ \| |- |_ |_ |__| | \/ | |_""") __lowercase = 1 while K: __lowercase = int(input("""enter the number and , and see the magic : """)) print() pretty_print(user_number) __lowercase = int(input("""press 0 to exit... and 1 to continue...""")) print("""Good Bye...""")
40
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __lowercase = logging.get_logger(__name__) __lowercase = { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json""" ), """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json""" ), """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json""" ), } class _A ( _a ): """simple docstring""" UpperCAmelCase : int = """dpr""" def __init__( self : List[Any] , __UpperCAmelCase : int=30522 , __UpperCAmelCase : Union[str, Any]=768 , __UpperCAmelCase : Dict=12 , __UpperCAmelCase : List[str]=12 , __UpperCAmelCase : Any=3072 , __UpperCAmelCase : Optional[int]="gelu" , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : str=512 , __UpperCAmelCase : List[str]=2 , __UpperCAmelCase : Tuple=0.02 , __UpperCAmelCase : List[str]=1e-12 , __UpperCAmelCase : List[str]=0 , __UpperCAmelCase : str="absolute" , __UpperCAmelCase : int = 0 , **__UpperCAmelCase : Tuple , ): super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase) a : List[Any] = vocab_size a : Optional[Any] = hidden_size a : Union[str, Any] = num_hidden_layers a : Dict = num_attention_heads a : int = hidden_act a : Any = intermediate_size a : Any = hidden_dropout_prob a : Dict = attention_probs_dropout_prob a : Any = max_position_embeddings a : Union[str, Any] = type_vocab_size a : Optional[Any] = initializer_range a : Dict = layer_norm_eps a : int = projection_dim a : str = position_embedding_type
40
1
"""simple docstring""" from graphs.minimum_spanning_tree_kruskal import kruskal def lowercase ( )-> Any: '''simple docstring''' a : str = 9 a : Tuple = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] a : List[str] = kruskal(A_ , A_ ) a : List[Any] = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] assert sorted(A_ ) == sorted(A_ )
40
"""simple docstring""" class _A : """simple docstring""" def __init__( self : int , __UpperCAmelCase : int): a : Tuple = size a : Dict = [0] * size a : Optional[int] = [0] * size @staticmethod def __snake_case ( __UpperCAmelCase : int): return index | (index + 1) @staticmethod def __snake_case ( __UpperCAmelCase : int): return (index & (index + 1)) - 1 def __snake_case ( self : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : int): a : Union[str, Any] = value while index < self.size: a : Dict = self.get_prev(__UpperCAmelCase) + 1 if current_left_border == index: a : Optional[int] = value else: a : Any = max(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) a : Optional[int] = self.get_next(__UpperCAmelCase) def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : int): right -= 1 # Because of right is exclusive a : List[str] = 0 while left <= right: a : Dict = self.get_prev(__UpperCAmelCase) if left <= current_left: a : Optional[int] = max(__UpperCAmelCase , self.tree[right]) a : Optional[Any] = current_left else: a : List[str] = max(__UpperCAmelCase , self.arr[right]) right -= 1 return result if __name__ == "__main__": import doctest doctest.testmod()
40
1
"""simple docstring""" import json import os from dataclasses import dataclass from functools import partial from typing import Callable import flax.linen as nn import jax import jax.numpy as jnp import joblib import optax import wandb from flax import jax_utils, struct, traverse_util from flax.serialization import from_bytes, to_bytes from flax.training import train_state from flax.training.common_utils import shard from tqdm.auto import tqdm from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule class _A ( _a ): """simple docstring""" UpperCAmelCase : BigBirdConfig UpperCAmelCase : jnp.dtype = jnp.floataa UpperCAmelCase : bool = True def __snake_case ( self : List[str]): super().setup() a : Optional[Any] = nn.Dense(5 , dtype=self.dtype) def __call__( self : List[Any] , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Optional[Any]): a : int = super().__call__(*__UpperCAmelCase , **__UpperCAmelCase) a : int = self.cls(outputs[2]) return outputs[:2] + (cls_out,) class _A ( _a ): """simple docstring""" UpperCAmelCase : Union[str, Any] = FlaxBigBirdForNaturalQuestionsModule def lowercase ( A_ , A_ , A_ , A_ , A_ , A_ )-> Union[str, Any]: '''simple docstring''' def cross_entropy(A_ , A_ , A_=None ): a : List[str] = logits.shape[-1] a : Dict = (labels[..., None] == jnp.arange(A_ )[None]).astype("f4" ) a : Optional[int] = jax.nn.log_softmax(A_ , axis=-1 ) a : List[str] = -jnp.sum(labels * logits , axis=-1 ) if reduction is not None: a : List[Any] = reduction(A_ ) return loss a : int = partial(A_ , reduction=jnp.mean ) a : int = cross_entropy(A_ , A_ ) a : Optional[Any] = cross_entropy(A_ , A_ ) a : List[str] = cross_entropy(A_ , A_ ) return (start_loss + end_loss + pooled_loss) / 3 @dataclass class _A : """simple docstring""" UpperCAmelCase : str = "google/bigbird-roberta-base" UpperCAmelCase : int = 3_0_0_0 UpperCAmelCase : int = 1_0_5_0_0 UpperCAmelCase : int = 1_2_8 UpperCAmelCase : int = 3 UpperCAmelCase : int = 1 UpperCAmelCase : int = 5 # tx_args UpperCAmelCase : float = 3E-5 UpperCAmelCase : float = 0.0 UpperCAmelCase : int = 2_0_0_0_0 UpperCAmelCase : float = 0.0095 UpperCAmelCase : str = "bigbird-roberta-natural-questions" UpperCAmelCase : str = "training-expt" UpperCAmelCase : str = "data/nq-training.jsonl" UpperCAmelCase : str = "data/nq-validation.jsonl" def __snake_case ( self : Tuple): os.makedirs(self.base_dir , exist_ok=__UpperCAmelCase) a : Tuple = os.path.join(self.base_dir , self.save_dir) a : Any = self.batch_size_per_device * jax.device_count() @dataclass class _A : """simple docstring""" UpperCAmelCase : int UpperCAmelCase : int = 4_0_9_6 # no dynamic padding on TPUs def __call__( self : Tuple , __UpperCAmelCase : Union[str, Any]): a : Optional[int] = self.collate_fn(__UpperCAmelCase) a : Union[str, Any] = jax.tree_util.tree_map(__UpperCAmelCase , __UpperCAmelCase) return batch def __snake_case ( self : Optional[int] , __UpperCAmelCase : Tuple): a , a : Union[str, Any] = self.fetch_inputs(features["input_ids"]) a : Dict = { "input_ids": jnp.array(__UpperCAmelCase , dtype=jnp.intaa), "attention_mask": jnp.array(__UpperCAmelCase , dtype=jnp.intaa), "start_labels": jnp.array(features["start_token"] , dtype=jnp.intaa), "end_labels": jnp.array(features["end_token"] , dtype=jnp.intaa), "pooled_labels": jnp.array(features["category"] , dtype=jnp.intaa), } return batch def __snake_case ( self : List[str] , __UpperCAmelCase : list): a : Optional[int] = [self._fetch_inputs(__UpperCAmelCase) for ids in input_ids] return zip(*__UpperCAmelCase) def __snake_case ( self : List[Any] , __UpperCAmelCase : list): a : List[Any] = [1 for _ in range(len(__UpperCAmelCase))] while len(__UpperCAmelCase) < self.max_length: input_ids.append(self.pad_id) attention_mask.append(0) return input_ids, attention_mask def lowercase ( A_ , A_ , A_=None )-> Tuple: '''simple docstring''' if seed is not None: a : Any = dataset.shuffle(seed=A_ ) for i in range(len(A_ ) // batch_size ): a : Union[str, Any] = dataset[i * batch_size : (i + 1) * batch_size] yield dict(A_ ) @partial(jax.pmap , axis_name="batch" ) def lowercase ( A_ , A_ , **A_ )-> List[str]: '''simple docstring''' def loss_fn(A_ ): a : Tuple = model_inputs.pop("start_labels" ) a : List[str] = model_inputs.pop("end_labels" ) a : Union[str, Any] = model_inputs.pop("pooled_labels" ) a : Tuple = state.apply_fn(**A_ , params=A_ , dropout_rng=A_ , train=A_ ) a , a , a : List[Any] = outputs return state.loss_fn( A_ , A_ , A_ , A_ , A_ , A_ , ) a , a : List[str] = jax.random.split(A_ ) a : List[str] = jax.value_and_grad(A_ ) a , a : Dict = grad_fn(state.params ) a : Tuple = jax.lax.pmean({"loss": loss} , axis_name="batch" ) a : Optional[int] = jax.lax.pmean(A_ , "batch" ) a : List[Any] = state.apply_gradients(grads=A_ ) return state, metrics, new_drp_rng @partial(jax.pmap , axis_name="batch" ) def lowercase ( A_ , **A_ )-> str: '''simple docstring''' a : Dict = model_inputs.pop("start_labels" ) a : str = model_inputs.pop("end_labels" ) a : Any = model_inputs.pop("pooled_labels" ) a : Any = state.apply_fn(**A_ , params=state.params , train=A_ ) a , a , a : str = outputs a : Dict = state.loss_fn(A_ , A_ , A_ , A_ , A_ , A_ ) a : List[str] = jax.lax.pmean({"loss": loss} , axis_name="batch" ) return metrics class _A ( train_state.TrainState ): """simple docstring""" UpperCAmelCase : Callable = struct.field(pytree_node=_a ) @dataclass class _A : """simple docstring""" UpperCAmelCase : Args UpperCAmelCase : Callable UpperCAmelCase : Callable UpperCAmelCase : Callable UpperCAmelCase : Callable UpperCAmelCase : wandb UpperCAmelCase : Callable = None def __snake_case ( self : Optional[int] , __UpperCAmelCase : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : Optional[int]=None): a : Optional[int] = model.params a : str = TrainState.create( apply_fn=model.__call__ , params=__UpperCAmelCase , tx=__UpperCAmelCase , loss_fn=__UpperCAmelCase , ) if ckpt_dir is not None: a , a , a , a , a : Optional[Any] = restore_checkpoint(__UpperCAmelCase , __UpperCAmelCase) a : Union[str, Any] = { "lr": args.lr, "init_lr": args.init_lr, "warmup_steps": args.warmup_steps, "num_train_steps": num_train_steps, "weight_decay": args.weight_decay, } a , a : List[Any] = build_tx(**__UpperCAmelCase) a : str = train_state.TrainState( step=__UpperCAmelCase , apply_fn=model.__call__ , params=__UpperCAmelCase , tx=__UpperCAmelCase , opt_state=__UpperCAmelCase , ) a : Union[str, Any] = args a : Tuple = data_collator a : Optional[Any] = lr a : List[str] = params a : Tuple = jax_utils.replicate(__UpperCAmelCase) return state def __snake_case ( self : Tuple , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any]): a : Optional[int] = self.args a : int = len(__UpperCAmelCase) // args.batch_size a : str = jax.random.PRNGKey(0) a : Optional[int] = jax.random.split(__UpperCAmelCase , jax.device_count()) for epoch in range(args.max_epochs): a : List[str] = jnp.array(0 , dtype=jnp.floataa) a : List[Any] = get_batched_dataset(__UpperCAmelCase , args.batch_size , seed=__UpperCAmelCase) a : Union[str, Any] = 0 for batch in tqdm(__UpperCAmelCase , total=__UpperCAmelCase , desc=f'''Running EPOCH-{epoch}'''): a : List[Any] = self.data_collator(__UpperCAmelCase) a , a , a : Any = self.train_step_fn(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase) running_loss += jax_utils.unreplicate(metrics["loss"]) i += 1 if i % args.logging_steps == 0: a : Optional[int] = jax_utils.unreplicate(state.step) a : str = running_loss.item() / i a : Tuple = self.scheduler_fn(state_step - 1) a : int = self.evaluate(__UpperCAmelCase , __UpperCAmelCase) a : int = { "step": state_step.item(), "eval_loss": eval_loss.item(), "tr_loss": tr_loss, "lr": lr.item(), } tqdm.write(str(__UpperCAmelCase)) self.logger.log(__UpperCAmelCase , commit=__UpperCAmelCase) if i % args.save_steps == 0: self.save_checkpoint(args.save_dir + f'''-e{epoch}-s{i}''' , state=__UpperCAmelCase) def __snake_case ( self : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str]): a : List[str] = get_batched_dataset(__UpperCAmelCase , self.args.batch_size) a : Optional[Any] = len(__UpperCAmelCase) // self.args.batch_size a : Dict = jnp.array(0 , dtype=jnp.floataa) a : Any = 0 for batch in tqdm(__UpperCAmelCase , total=__UpperCAmelCase , desc="Evaluating ... "): a : Tuple = self.data_collator(__UpperCAmelCase) a : List[str] = self.val_step_fn(__UpperCAmelCase , **__UpperCAmelCase) running_loss += jax_utils.unreplicate(metrics["loss"]) i += 1 return running_loss / i def __snake_case ( self : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict): a : List[str] = jax_utils.unreplicate(__UpperCAmelCase) print(f'''SAVING CHECKPOINT IN {save_dir}''' , end=" ... ") self.model_save_fn(__UpperCAmelCase , params=state.params) with open(os.path.join(__UpperCAmelCase , "opt_state.msgpack") , "wb") as f: f.write(to_bytes(state.opt_state)) joblib.dump(self.args , os.path.join(__UpperCAmelCase , "args.joblib")) joblib.dump(self.data_collator , os.path.join(__UpperCAmelCase , "data_collator.joblib")) with open(os.path.join(__UpperCAmelCase , "training_state.json") , "w") as f: json.dump({"step": state.step.item()} , __UpperCAmelCase) print("DONE") def lowercase ( A_ , A_ )-> Tuple: '''simple docstring''' print(F'''RESTORING CHECKPOINT FROM {save_dir}''' , end=" ... " ) with open(os.path.join(A_ , "flax_model.msgpack" ) , "rb" ) as f: a : Union[str, Any] = from_bytes(state.params , f.read() ) with open(os.path.join(A_ , "opt_state.msgpack" ) , "rb" ) as f: a : List[str] = from_bytes(state.opt_state , f.read() ) a : Any = joblib.load(os.path.join(A_ , "args.joblib" ) ) a : Tuple = joblib.load(os.path.join(A_ , "data_collator.joblib" ) ) with open(os.path.join(A_ , "training_state.json" ) , "r" ) as f: a : List[Any] = json.load(A_ ) a : Dict = training_state["step"] print("DONE" ) return params, opt_state, step, args, data_collator def lowercase ( A_ , A_ , A_ , A_ )-> List[str]: '''simple docstring''' a : Optional[Any] = num_train_steps - warmup_steps a : List[str] = optax.linear_schedule(init_value=A_ , end_value=A_ , transition_steps=A_ ) a : str = optax.linear_schedule(init_value=A_ , end_value=1e-7 , transition_steps=A_ ) a : Union[str, Any] = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] ) return lr def lowercase ( A_ , A_ , A_ , A_ , A_ )-> List[Any]: '''simple docstring''' def weight_decay_mask(A_ ): a : Tuple = traverse_util.flatten_dict(A_ ) a : Any = {k: (v[-1] != "bias" and v[-2:] != ("LayerNorm", "scale")) for k, v in params.items()} return traverse_util.unflatten_dict(A_ ) a : Dict = scheduler_fn(A_ , A_ , A_ , A_ ) a : str = optax.adamw(learning_rate=A_ , weight_decay=A_ , mask=A_ ) return tx, lr
40
"""simple docstring""" import unittest from knapsack import knapsack as k class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : List[Any]): a : str = 0 a : Optional[int] = [0] a : Union[str, Any] = [0] a : Any = len(__UpperCAmelCase) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 0) a : List[str] = [60] a : str = [10] a : Optional[int] = len(__UpperCAmelCase) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 0) def __snake_case ( self : Optional[int]): a : Any = 3 a : str = [1, 2, 3] a : Tuple = [3, 2, 1] a : Any = len(__UpperCAmelCase) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 5) def __snake_case ( self : Tuple): a : int = 50 a : List[Any] = [60, 100, 120] a : Optional[int] = [10, 20, 30] a : str = len(__UpperCAmelCase) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 220) if __name__ == "__main__": unittest.main()
40
1
"""simple docstring""" def lowercase ( A_ )-> str: '''simple docstring''' a : Optional[Any] = 0 # if input_string is "aba" than new_input_string become "a|b|a" a : int = "" a : Any = "" # append each character + "|" in new_string for range(0, length-1) for i in input_string[: len(A_ ) - 1]: new_input_string += i + "|" # append last character new_input_string += input_string[-1] # we will store the starting and ending of previous furthest ending palindromic # substring a , a : int = 0, 0 # length[i] shows the length of palindromic substring with center i a : Optional[Any] = [1 for i in range(len(A_ ) )] # for each character in new_string find corresponding palindromic string a : Optional[Any] = 0 for j in range(len(A_ ) ): a : Any = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 ) while ( j - k >= 0 and j + k < len(A_ ) and new_input_string[k + j] == new_input_string[j - k] ): k += 1 a : Optional[int] = 2 * k - 1 # does this string is ending after the previously explored end (that is r) ? # if yes the update the new r to the last index of this if j + k - 1 > r: a : List[str] = j - k + 1 # noqa: E741 a : Union[str, Any] = j + k - 1 # update max_length and start position if max_length < length[j]: a : Optional[int] = length[j] a : List[str] = j # create that string a : Dict = new_input_string[start - max_length // 2 : start + max_length // 2 + 1] for i in s: if i != "|": output_string += i return output_string if __name__ == "__main__": import doctest doctest.testmod()
40
"""simple docstring""" import os import unittest from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _A ( _a ,unittest.TestCase ): """simple docstring""" UpperCAmelCase : str = LayoutLMTokenizer UpperCAmelCase : int = LayoutLMTokenizerFast UpperCAmelCase : Union[str, Any] = True UpperCAmelCase : Optional[Any] = True def __snake_case ( self : Optional[int]): super().setUp() a : Tuple = [ "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] a : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) def __snake_case ( self : Optional[int] , **__UpperCAmelCase : Tuple): return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase) def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : str): a : Tuple = "UNwant\u00E9d,running" a : Dict = "unwanted, running" return input_text, output_text def __snake_case ( self : Any): a : List[Any] = self.tokenizer_class(self.vocab_file) a : str = tokenizer.tokenize("UNwant\u00E9d,running") self.assertListEqual(__UpperCAmelCase , ["un", "##want", "##ed", ",", "runn", "##ing"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase) , [7, 4, 5, 10, 8, 9]) def __snake_case ( self : Dict): pass
40
1
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_pegasus import PegasusTokenizer else: __lowercase = None __lowercase = logging.get_logger(__name__) __lowercase = """▁""" __lowercase = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} __lowercase = { """vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""}, """tokenizer_file""": { """google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json""" }, } __lowercase = { """google/pegasus-xsum""": 512, } class _A ( _a ): """simple docstring""" UpperCAmelCase : Optional[int] = VOCAB_FILES_NAMES UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase : Optional[Any] = PegasusTokenizer UpperCAmelCase : str = ["""input_ids""", """attention_mask"""] def __init__( self : Dict , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : List[Any]="<pad>" , __UpperCAmelCase : Optional[int]="</s>" , __UpperCAmelCase : Optional[int]="<unk>" , __UpperCAmelCase : str="<mask_2>" , __UpperCAmelCase : List[str]="<mask_1>" , __UpperCAmelCase : str=None , __UpperCAmelCase : Any=103 , **__UpperCAmelCase : Union[str, Any] , ): a : List[Any] = offset if additional_special_tokens is not None: if not isinstance(__UpperCAmelCase , __UpperCAmelCase): raise TypeError( f'''additional_special_tokens should be of type {type(__UpperCAmelCase)}, but is''' f''' {type(__UpperCAmelCase)}''') a : Dict = ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ f'''<unk_{i}>''' for i in range(len(__UpperCAmelCase) , self.offset - 1) ] if len(set(__UpperCAmelCase)) != len(__UpperCAmelCase): raise ValueError( "Please make sure that the provided additional_special_tokens do not contain an incorrectly" f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''') a : int = additional_special_tokens_extended else: a : List[str] = [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset)] super().__init__( __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , pad_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , mask_token_sent=__UpperCAmelCase , offset=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , **__UpperCAmelCase , ) a : str = vocab_file a : str = False if not self.vocab_file else True def __snake_case ( self : Tuple , __UpperCAmelCase : int): a : int = set(self.all_special_ids) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id) # <unk> is only sometimes special if all_special_ids != set(range(len(self.additional_special_tokens) + 3)): raise ValueError( "There should be 3 special tokens: mask_token, pad_token, and eos_token +" f''' {len(self.additional_special_tokens)} additional_special_tokens, but got {all_special_ids}''') return [1 if x in all_special_ids else 0 for x in seq] def __snake_case ( self : Any , __UpperCAmelCase : List , __UpperCAmelCase : Optional[List] = None , __UpperCAmelCase : bool = False): if already_has_special_tokens: return self._special_token_mask(__UpperCAmelCase) elif token_ids_a is None: return self._special_token_mask(__UpperCAmelCase) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a) + [1] def __snake_case ( self : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int]=None): if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def __snake_case ( self : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None): if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer.") if not os.path.isdir(__UpperCAmelCase): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''') return a : Dict = os.path.join( __UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) if os.path.abspath(self.vocab_file) != os.path.abspath(__UpperCAmelCase): copyfile(self.vocab_file , __UpperCAmelCase) return (out_vocab_file,)
40
"""simple docstring""" def lowercase ( A_ )-> str: '''simple docstring''' if isinstance(A_ , A_ ): raise TypeError("'float' object cannot be interpreted as an integer" ) if isinstance(A_ , A_ ): raise TypeError("'str' object cannot be interpreted as an integer" ) if num == 0: return "0b0" a : Optional[Any] = False if num < 0: a : Tuple = True a : str = -num a : list[int] = [] while num > 0: binary.insert(0 , num % 2 ) num >>= 1 if negative: return "-0b" + "".join(str(A_ ) for e in binary ) return "0b" + "".join(str(A_ ) for e in binary ) if __name__ == "__main__": import doctest doctest.testmod()
40
1
"""simple docstring""" def lowercase ( )-> int: '''simple docstring''' return 1 def lowercase ( A_ )-> int: '''simple docstring''' return 0 if x < 0 else two_pence(x - 2 ) + one_pence() def lowercase ( A_ )-> int: '''simple docstring''' return 0 if x < 0 else five_pence(x - 5 ) + two_pence(A_ ) def lowercase ( A_ )-> int: '''simple docstring''' return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(A_ ) def lowercase ( A_ )-> int: '''simple docstring''' return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(A_ ) def lowercase ( A_ )-> int: '''simple docstring''' return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(A_ ) def lowercase ( A_ )-> int: '''simple docstring''' return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(A_ ) def lowercase ( A_ )-> int: '''simple docstring''' return 0 if x < 0 else two_pound(x - 200 ) + one_pound(A_ ) def lowercase ( A_ = 200 )-> int: '''simple docstring''' return two_pound(A_ ) if __name__ == "__main__": print(solution(int(input().strip())))
40
"""simple docstring""" from bisect import bisect from itertools import accumulate def lowercase ( A_ , A_ , A_ , A_ )-> Union[str, Any]: '''simple docstring''' a : Any = sorted(zip(A_ , A_ ) , key=lambda A_ : x[0] / x[1] , reverse=A_ ) a , a : int = [i[0] for i in r], [i[1] for i in r] a : Union[str, Any] = list(accumulate(A_ ) ) a : Optional[Any] = bisect(A_ , A_ ) return ( 0 if k == 0 else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k]) if k != n else sum(vl[:k] ) ) if __name__ == "__main__": import doctest doctest.testmod()
40
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __lowercase = logging.get_logger(__name__) __lowercase = { """unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json""", } class _A ( _a ): """simple docstring""" UpperCAmelCase : Optional[Any] = """lxmert""" UpperCAmelCase : str = {} def __init__( self : int , __UpperCAmelCase : List[str]=30522 , __UpperCAmelCase : Optional[Any]=768 , __UpperCAmelCase : str=12 , __UpperCAmelCase : List[Any]=9500 , __UpperCAmelCase : Dict=1600 , __UpperCAmelCase : Tuple=400 , __UpperCAmelCase : List[str]=3072 , __UpperCAmelCase : Tuple="gelu" , __UpperCAmelCase : str=0.1 , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Union[str, Any]=512 , __UpperCAmelCase : Dict=2 , __UpperCAmelCase : List[str]=0.02 , __UpperCAmelCase : List[str]=1e-12 , __UpperCAmelCase : Optional[int]=9 , __UpperCAmelCase : List[str]=5 , __UpperCAmelCase : Dict=5 , __UpperCAmelCase : List[str]=2048 , __UpperCAmelCase : List[Any]=4 , __UpperCAmelCase : Tuple=6.67 , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : int=True , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : Optional[int]=True , **__UpperCAmelCase : str , ): a : Tuple = vocab_size a : Optional[Any] = hidden_size a : List[str] = num_attention_heads a : int = hidden_act a : Dict = intermediate_size a : Optional[Any] = hidden_dropout_prob a : Tuple = attention_probs_dropout_prob a : str = max_position_embeddings a : Optional[int] = type_vocab_size a : str = initializer_range a : Tuple = layer_norm_eps a : Any = num_qa_labels a : str = num_object_labels a : Any = num_attr_labels a : Union[str, Any] = l_layers a : int = x_layers a : str = r_layers a : Dict = visual_feat_dim a : int = visual_pos_dim a : Optional[int] = visual_loss_normalizer a : Any = task_matched a : Optional[int] = task_mask_lm a : Union[str, Any] = task_obj_predict a : List[str] = task_qa a : Any = visual_obj_loss a : Union[str, Any] = visual_attr_loss a : int = visual_feat_loss a : int = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers} super().__init__(**__UpperCAmelCase)
40
"""simple docstring""" from __future__ import annotations from numpy import array, cos, cross, floataa, radians, sin from numpy.typing import NDArray def lowercase ( A_ , A_ , A_ = False )-> list[float]: '''simple docstring''' if radian_mode: return [magnitude * cos(A_ ), magnitude * sin(A_ )] return [magnitude * cos(radians(A_ ) ), magnitude * sin(radians(A_ ) )] def lowercase ( A_ , A_ , A_ = 10**-1 )-> bool: '''simple docstring''' a : NDArray[floataa] = cross(A_ , A_ ) a : float = sum(A_ ) return abs(A_ ) < eps if __name__ == "__main__": # Test to check if it works __lowercase = array( [ polar_force(7_18.4, 180 - 30), polar_force(8_79.54, 45), polar_force(100, -90), ] ) __lowercase = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem 1 in image_data/2D_problems.jpg __lowercase = array( [ polar_force(30 * 9.81, 15), polar_force(215, 180 - 45), polar_force(264, 90 - 30), ] ) __lowercase = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem in image_data/2D_problems_1.jpg __lowercase = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]]) __lowercase = array([[0, 0], [6, 0], [10, 0], [12, 0]]) assert in_static_equilibrium(forces, location) import doctest doctest.testmod()
40
1
"""simple docstring""" import copy import tempfile import unittest from huggingface_hub import HfFolder, delete_repo from parameterized import parameterized from requests.exceptions import HTTPError from transformers import AutoConfig, GenerationConfig from transformers.testing_utils import TOKEN, USER, is_staging_test class _A ( unittest.TestCase ): """simple docstring""" @parameterized.expand([(None,), ("foo.json",)]) def __snake_case ( self : List[Any] , __UpperCAmelCase : List[Any]): a : str = GenerationConfig( do_sample=__UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(__UpperCAmelCase , config_name=__UpperCAmelCase) a : Tuple = GenerationConfig.from_pretrained(__UpperCAmelCase , config_name=__UpperCAmelCase) # Checks parameters that were specified self.assertEqual(loaded_config.do_sample , __UpperCAmelCase) self.assertEqual(loaded_config.temperature , 0.7) self.assertEqual(loaded_config.length_penalty , 1.0) self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]]) # Checks parameters that were not specified (defaults) self.assertEqual(loaded_config.top_k , 50) self.assertEqual(loaded_config.max_length , 20) self.assertEqual(loaded_config.max_time , __UpperCAmelCase) def __snake_case ( self : int): a : Dict = AutoConfig.from_pretrained("gpt2") a : Optional[Any] = GenerationConfig.from_model_config(__UpperCAmelCase) a : List[Any] = GenerationConfig() # The generation config has loaded a few non-default parameters from the model config self.assertNotEqual(__UpperCAmelCase , __UpperCAmelCase) # One of those parameters is eos_token_id -- check if it matches self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id) self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id) def __snake_case ( self : Optional[int]): a : List[Any] = GenerationConfig() a : str = { "max_new_tokens": 1024, "foo": "bar", } a : Dict = copy.deepcopy(__UpperCAmelCase) a : Tuple = generation_config.update(**__UpperCAmelCase) # update_kwargs was not modified (no side effects) self.assertEqual(__UpperCAmelCase , __UpperCAmelCase) # update_kwargs was used to update the config on valid attributes self.assertEqual(generation_config.max_new_tokens , 1024) # `.update()` returns a dictionary of unused kwargs self.assertEqual(__UpperCAmelCase , {"foo": "bar"}) def __snake_case ( self : Optional[int]): a : List[Any] = GenerationConfig() a : Optional[int] = "bar" with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir: generation_config.save_pretrained(__UpperCAmelCase) a : Optional[Any] = GenerationConfig.from_pretrained(__UpperCAmelCase) # update_kwargs was used to update the config on valid attributes self.assertEqual(new_config.foo , "bar") a : List[str] = GenerationConfig.from_model_config(__UpperCAmelCase) assert not hasattr(__UpperCAmelCase , "foo") # no new kwargs should be initialized if from config def __snake_case ( self : List[str]): a : List[Any] = GenerationConfig() self.assertEqual(default_config.temperature , 1.0) self.assertEqual(default_config.do_sample , __UpperCAmelCase) self.assertEqual(default_config.num_beams , 1) a : Optional[int] = GenerationConfig( do_sample=__UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) self.assertEqual(config.temperature , 0.7) self.assertEqual(config.do_sample , __UpperCAmelCase) self.assertEqual(config.num_beams , 1) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(__UpperCAmelCase) a : Optional[Any] = GenerationConfig.from_pretrained(__UpperCAmelCase , temperature=1.0) self.assertEqual(loaded_config.temperature , 1.0) self.assertEqual(loaded_config.do_sample , __UpperCAmelCase) self.assertEqual(loaded_config.num_beams , 1) # default value @is_staging_test class _A ( unittest.TestCase ): """simple docstring""" @classmethod def __snake_case ( cls : Tuple): a : Optional[Any] = TOKEN HfFolder.save_token(__UpperCAmelCase) @classmethod def __snake_case ( cls : Dict): try: delete_repo(token=cls._token , repo_id="test-generation-config") except HTTPError: pass try: delete_repo(token=cls._token , repo_id="valid_org/test-generation-config-org") except HTTPError: pass def __snake_case ( self : str): a : Dict = GenerationConfig( do_sample=__UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub("test-generation-config" , use_auth_token=self._token) a : Union[str, Any] = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''') for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(__UpperCAmelCase , getattr(__UpperCAmelCase , __UpperCAmelCase)) # Reset repo delete_repo(token=self._token , repo_id="test-generation-config") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( __UpperCAmelCase , repo_id="test-generation-config" , push_to_hub=__UpperCAmelCase , use_auth_token=self._token) a : Any = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''') for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(__UpperCAmelCase , getattr(__UpperCAmelCase , __UpperCAmelCase)) def __snake_case ( self : Union[str, Any]): a : Union[str, Any] = GenerationConfig( do_sample=__UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub("valid_org/test-generation-config-org" , use_auth_token=self._token) a : str = GenerationConfig.from_pretrained("valid_org/test-generation-config-org") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(__UpperCAmelCase , getattr(__UpperCAmelCase , __UpperCAmelCase)) # Reset repo delete_repo(token=self._token , repo_id="valid_org/test-generation-config-org") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( __UpperCAmelCase , repo_id="valid_org/test-generation-config-org" , push_to_hub=__UpperCAmelCase , use_auth_token=self._token) a : List[Any] = GenerationConfig.from_pretrained("valid_org/test-generation-config-org") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(__UpperCAmelCase , getattr(__UpperCAmelCase , __UpperCAmelCase))
40
"""simple docstring""" def lowercase ( A_ , A_ )-> float: '''simple docstring''' if mass < 0: raise ValueError("The mass of a body cannot be negative" ) return 0.5 * mass * abs(A_ ) * abs(A_ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
40
1
"""simple docstring""" def lowercase ( A_ , A_ )-> int: '''simple docstring''' return number | (1 << position) def lowercase ( A_ , A_ )-> int: '''simple docstring''' return number & ~(1 << position) def lowercase ( A_ , A_ )-> int: '''simple docstring''' return number ^ (1 << position) def lowercase ( A_ , A_ )-> bool: '''simple docstring''' return ((number >> position) & 1) == 1 def lowercase ( A_ , A_ )-> int: '''simple docstring''' return int((number & (1 << position)) != 0 ) if __name__ == "__main__": import doctest doctest.testmod()
40
"""simple docstring""" import os import sys import unittest __lowercase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path __lowercase = os.path.join(git_repo_path, """src""", """diffusers""") class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : Any): a : List[Any] = find_backend(" if not is_torch_available():") self.assertEqual(__UpperCAmelCase , "torch") # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") a : Dict = find_backend(" if not (is_torch_available() and is_transformers_available()):") self.assertEqual(__UpperCAmelCase , "torch_and_transformers") # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") a : int = find_backend( " if not (is_torch_available() and is_transformers_available() and is_onnx_available()):") self.assertEqual(__UpperCAmelCase , "torch_and_transformers_and_onnx") def __snake_case ( self : Union[str, Any]): a : Dict = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("torch" , __UpperCAmelCase) self.assertIn("torch_and_transformers" , __UpperCAmelCase) self.assertIn("flax_and_transformers" , __UpperCAmelCase) self.assertIn("torch_and_transformers_and_onnx" , __UpperCAmelCase) # Likewise, we can't assert on the exact content of a key self.assertIn("UNet2DModel" , objects["torch"]) self.assertIn("FlaxUNet2DConditionModel" , objects["flax"]) self.assertIn("StableDiffusionPipeline" , objects["torch_and_transformers"]) self.assertIn("FlaxStableDiffusionPipeline" , objects["flax_and_transformers"]) self.assertIn("LMSDiscreteScheduler" , objects["torch_and_scipy"]) self.assertIn("OnnxStableDiffusionPipeline" , objects["torch_and_transformers_and_onnx"]) def __snake_case ( self : Tuple): a : Optional[int] = create_dummy_object("CONSTANT" , "'torch'") self.assertEqual(__UpperCAmelCase , "\nCONSTANT = None\n") a : Dict = create_dummy_object("function" , "'torch'") self.assertEqual( __UpperCAmelCase , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n") a : Optional[Any] = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n" a : int = create_dummy_object("FakeClass" , "'torch'") self.assertEqual(__UpperCAmelCase , __UpperCAmelCase) def __snake_case ( self : List[str]): a : List[str] = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n" a : Tuple = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]}) self.assertEqual(dummy_files["torch"] , __UpperCAmelCase)
40
1
"""simple docstring""" import random from .binary_exp_mod import bin_exp_mod def lowercase ( A_ , A_=1_000 )-> Optional[int]: '''simple docstring''' if n < 2: return False if n % 2 == 0: return n == 2 # this means n is odd a : Tuple = n - 1 a : int = 0 while d % 2 == 0: d /= 2 exp += 1 # n - 1=d*(2**exp) a : Dict = 0 while count < prec: a : List[str] = random.randint(2 , n - 1 ) a : Union[str, Any] = bin_exp_mod(A_ , A_ , A_ ) if b != 1: a : List[str] = True for _ in range(A_ ): if b == n - 1: a : Optional[int] = False break a : Optional[Any] = b * b b %= n if flag: return False count += 1 return True if __name__ == "__main__": __lowercase = abs(int(input("""Enter bound : """).strip())) print("""Here's the list of primes:""") print(""", """.join(str(i) for i in range(n + 1) if is_prime_big(i)))
40
"""simple docstring""" __lowercase = { """Pillow""": """Pillow<10.0.0""", """accelerate""": """accelerate>=0.20.3""", """av""": """av==9.2.0""", """beautifulsoup4""": """beautifulsoup4""", """black""": """black~=23.1""", """codecarbon""": """codecarbon==1.2.0""", """cookiecutter""": """cookiecutter==1.7.3""", """dataclasses""": """dataclasses""", """datasets""": """datasets!=2.5.0""", """decord""": """decord==0.6.0""", """deepspeed""": """deepspeed>=0.9.3""", """diffusers""": """diffusers""", """dill""": """dill<0.3.5""", """evaluate""": """evaluate>=0.2.0""", """fairscale""": """fairscale>0.3""", """faiss-cpu""": """faiss-cpu""", """fastapi""": """fastapi""", """filelock""": """filelock""", """flax""": """flax>=0.4.1,<=0.7.0""", """ftfy""": """ftfy""", """fugashi""": """fugashi>=1.0""", """GitPython""": """GitPython<3.1.19""", """hf-doc-builder""": """hf-doc-builder>=0.3.0""", """huggingface-hub""": """huggingface-hub>=0.14.1,<1.0""", """importlib_metadata""": """importlib_metadata""", """ipadic""": """ipadic>=1.0.0,<2.0""", """isort""": """isort>=5.5.4""", """jax""": """jax>=0.2.8,!=0.3.2,<=0.4.13""", """jaxlib""": """jaxlib>=0.1.65,<=0.4.13""", """jieba""": """jieba""", """kenlm""": """kenlm""", """keras-nlp""": """keras-nlp>=0.3.1""", """librosa""": """librosa""", """nltk""": """nltk""", """natten""": """natten>=0.14.6""", """numpy""": """numpy>=1.17""", """onnxconverter-common""": """onnxconverter-common""", """onnxruntime-tools""": """onnxruntime-tools>=1.4.2""", """onnxruntime""": """onnxruntime>=1.4.0""", """opencv-python""": """opencv-python""", """optuna""": """optuna""", """optax""": """optax>=0.0.8,<=0.1.4""", """packaging""": """packaging>=20.0""", """parameterized""": """parameterized""", """phonemizer""": """phonemizer""", """protobuf""": """protobuf""", """psutil""": """psutil""", """pyyaml""": """pyyaml>=5.1""", """pydantic""": """pydantic<2""", """pytest""": """pytest>=7.2.0""", """pytest-timeout""": """pytest-timeout""", """pytest-xdist""": """pytest-xdist""", """python""": """python>=3.8.0""", """ray[tune]""": """ray[tune]""", """regex""": """regex!=2019.12.17""", """requests""": """requests""", """rhoknp""": """rhoknp>=1.1.0,<1.3.1""", """rjieba""": """rjieba""", """rouge-score""": """rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1""", """ruff""": """ruff>=0.0.241,<=0.0.259""", """sacrebleu""": """sacrebleu>=1.4.12,<2.0.0""", """sacremoses""": """sacremoses""", """safetensors""": """safetensors>=0.3.1""", """sagemaker""": """sagemaker>=2.31.0""", """scikit-learn""": """scikit-learn""", """sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""", """sigopt""": """sigopt""", """starlette""": """starlette""", """sudachipy""": """sudachipy>=0.6.6""", """sudachidict_core""": """sudachidict_core>=20220729""", """tensorflow-cpu""": """tensorflow-cpu>=2.6,<2.14""", """tensorflow""": """tensorflow>=2.6,<2.14""", """tensorflow-text""": """tensorflow-text<2.14""", """tf2onnx""": """tf2onnx""", """timeout-decorator""": """timeout-decorator""", """timm""": """timm""", """tokenizers""": """tokenizers>=0.11.1,!=0.11.3,<0.14""", """torch""": """torch>=1.9,!=1.12.0""", """torchaudio""": """torchaudio""", """torchvision""": """torchvision""", """pyctcdecode""": """pyctcdecode>=0.4.0""", """tqdm""": """tqdm>=4.27""", """unidic""": """unidic>=1.0.2""", """unidic_lite""": """unidic_lite>=1.0.7""", """urllib3""": """urllib3<2.0.0""", """uvicorn""": """uvicorn""", }
40
1
"""simple docstring""" from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import ScoreSdeVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class _A ( _a ): """simple docstring""" UpperCAmelCase : UNetaDModel UpperCAmelCase : ScoreSdeVeScheduler def __init__( self : Optional[int] , __UpperCAmelCase : UNetaDModel , __UpperCAmelCase : ScoreSdeVeScheduler): super().__init__() self.register_modules(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase) @torch.no_grad() def __call__( self : Dict , __UpperCAmelCase : int = 1 , __UpperCAmelCase : int = 2000 , __UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCAmelCase : Optional[str] = "pil" , __UpperCAmelCase : bool = True , **__UpperCAmelCase : Optional[int] , ): a : int = self.unet.config.sample_size a : List[Any] = (batch_size, 3, img_size, img_size) a : str = self.unet a : Tuple = randn_tensor(__UpperCAmelCase , generator=__UpperCAmelCase) * self.scheduler.init_noise_sigma a : List[Any] = sample.to(self.device) self.scheduler.set_timesteps(__UpperCAmelCase) self.scheduler.set_sigmas(__UpperCAmelCase) for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)): a : Optional[Any] = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device) # correction step for _ in range(self.scheduler.config.correct_steps): a : str = self.unet(__UpperCAmelCase , __UpperCAmelCase).sample a : Tuple = self.scheduler.step_correct(__UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase).prev_sample # prediction step a : Optional[int] = model(__UpperCAmelCase , __UpperCAmelCase).sample a : int = self.scheduler.step_pred(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase) a , a : List[Any] = output.prev_sample, output.prev_sample_mean a : int = sample_mean.clamp(0 , 1) a : Any = sample.cpu().permute(0 , 2 , 3 , 1).numpy() if output_type == "pil": a : str = self.numpy_to_pil(__UpperCAmelCase) if not return_dict: return (sample,) return ImagePipelineOutput(images=__UpperCAmelCase)
40
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tensorflow_text_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowercase = { """configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""], """tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = ["""BertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """BERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """BertForMaskedLM""", """BertForMultipleChoice""", """BertForNextSentencePrediction""", """BertForPreTraining""", """BertForQuestionAnswering""", """BertForSequenceClassification""", """BertForTokenClassification""", """BertLayer""", """BertLMHeadModel""", """BertModel""", """BertPreTrainedModel""", """load_tf_weights_in_bert""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFBertEmbeddings""", """TFBertForMaskedLM""", """TFBertForMultipleChoice""", """TFBertForNextSentencePrediction""", """TFBertForPreTraining""", """TFBertForQuestionAnswering""", """TFBertForSequenceClassification""", """TFBertForTokenClassification""", """TFBertLMHeadModel""", """TFBertMainLayer""", """TFBertModel""", """TFBertPreTrainedModel""", ] try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = ["""TFBertTokenizer"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """FlaxBertForCausalLM""", """FlaxBertForMaskedLM""", """FlaxBertForMultipleChoice""", """FlaxBertForNextSentencePrediction""", """FlaxBertForPreTraining""", """FlaxBertForQuestionAnswering""", """FlaxBertForSequenceClassification""", """FlaxBertForTokenClassification""", """FlaxBertModel""", """FlaxBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_fast import BertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bert import ( BERT_PRETRAINED_MODEL_ARCHIVE_LIST, BertForMaskedLM, BertForMultipleChoice, BertForNextSentencePrediction, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, BertLayer, BertLMHeadModel, BertModel, BertPreTrainedModel, load_tf_weights_in_bert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_bert import ( TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFBertEmbeddings, TFBertForMaskedLM, TFBertForMultipleChoice, TFBertForNextSentencePrediction, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertForTokenClassification, TFBertLMHeadModel, TFBertMainLayer, TFBertModel, TFBertPreTrainedModel, ) try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_tf import TFBertTokenizer try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_bert import ( FlaxBertForCausalLM, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, FlaxBertPreTrainedModel, ) else: import sys __lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
40
1
"""simple docstring""" import dataclasses import json import sys import types from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError from copy import copy from enum import Enum from inspect import isclass from pathlib import Path from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints import yaml __lowercase = NewType("""DataClass""", Any) __lowercase = NewType("""DataClassType""", Any) def lowercase ( A_ )-> List[str]: '''simple docstring''' if isinstance(A_ , A_ ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise ArgumentTypeError( F'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''' ) def lowercase ( A_ )-> Callable[[str], Any]: '''simple docstring''' a : Tuple = {str(A_ ): choice for choice in choices} return lambda A_ : str_to_choice.get(A_ , A_ ) def lowercase ( *, A_ = None , A_ = None , A_ = dataclasses.MISSING , A_ = dataclasses.MISSING , A_ = None , **A_ , )-> dataclasses.Field: '''simple docstring''' if metadata is None: # Important, don't use as default param in function signature because dict is mutable and shared across function calls a : List[Any] = {} if aliases is not None: a : Optional[Any] = aliases if help is not None: a : Optional[int] = help return dataclasses.field(metadata=A_ , default=A_ , default_factory=A_ , **A_ ) class _A ( _a ): """simple docstring""" UpperCAmelCase : Iterable[DataClassType] def __init__( self : Optional[int] , __UpperCAmelCase : Union[DataClassType, Iterable[DataClassType]] , **__UpperCAmelCase : Union[str, Any]): # To make the default appear when using --help if "formatter_class" not in kwargs: a : Optional[Any] = ArgumentDefaultsHelpFormatter super().__init__(**__UpperCAmelCase) if dataclasses.is_dataclass(__UpperCAmelCase): a : List[Any] = [dataclass_types] a : Optional[int] = list(__UpperCAmelCase) for dtype in self.dataclass_types: self._add_dataclass_arguments(__UpperCAmelCase) @staticmethod def __snake_case ( __UpperCAmelCase : ArgumentParser , __UpperCAmelCase : dataclasses.Field): a : List[str] = f'''--{field.name}''' a : int = field.metadata.copy() # field.metadata is not used at all by Data Classes, # it is provided as a third-party extension mechanism. if isinstance(field.type , __UpperCAmelCase): raise RuntimeError( "Unresolved type detected, which should have been done with the help of " "`typing.get_type_hints` method by default") a : Optional[Any] = kwargs.pop("aliases" , []) if isinstance(__UpperCAmelCase , __UpperCAmelCase): a : Dict = [aliases] a : Union[str, Any] = getattr(field.type , "__origin__" , field.type) if origin_type is Union or (hasattr(__UpperCAmelCase , "UnionType") and isinstance(__UpperCAmelCase , types.UnionType)): if str not in field.type.__args__ and ( len(field.type.__args__) != 2 or type(__UpperCAmelCase) not in field.type.__args__ ): raise ValueError( "Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because" " the argument parser only supports one type per argument." f''' Problem encountered in field \'{field.name}\'.''') if type(__UpperCAmelCase) not in field.type.__args__: # filter `str` in Union a : Dict = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1] a : List[str] = getattr(field.type , "__origin__" , field.type) elif bool not in field.type.__args__: # filter `NoneType` in Union (except for `Union[bool, NoneType]`) a : str = ( field.type.__args__[0] if isinstance(__UpperCAmelCase , field.type.__args__[1]) else field.type.__args__[1] ) a : str = getattr(field.type , "__origin__" , field.type) # A variable to store kwargs for a boolean field, if needed # so that we can init a `no_*` complement argument (see below) a : List[Any] = {} if origin_type is Literal or (isinstance(field.type , __UpperCAmelCase) and issubclass(field.type , __UpperCAmelCase)): if origin_type is Literal: a : Dict = field.type.__args__ else: a : Dict = [x.value for x in field.type] a : int = make_choice_type_function(kwargs["choices"]) if field.default is not dataclasses.MISSING: a : Optional[Any] = field.default else: a : Optional[int] = True elif field.type is bool or field.type == Optional[bool]: # Copy the currect kwargs to use to instantiate a `no_*` complement argument below. # We do not initialize it here because the `no_*` alternative must be instantiated after the real argument a : int = copy(__UpperCAmelCase) # Hack because type=bool in argparse does not behave as we want. a : Optional[int] = string_to_bool if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING): # Default value is False if we have no default when of type bool. a : List[Any] = False if field.default is dataclasses.MISSING else field.default # This is the value that will get picked if we don't include --field_name in any way a : Optional[Any] = default # This tells argparse we accept 0 or 1 value after --field_name a : Any = "?" # This is the value that will get picked if we do --field_name (without value) a : List[Any] = True elif isclass(__UpperCAmelCase) and issubclass(__UpperCAmelCase , __UpperCAmelCase): a : List[str] = field.type.__args__[0] a : Optional[int] = "+" if field.default_factory is not dataclasses.MISSING: a : List[Any] = field.default_factory() elif field.default is dataclasses.MISSING: a : Optional[Any] = True else: a : Tuple = field.type if field.default is not dataclasses.MISSING: a : List[Any] = field.default elif field.default_factory is not dataclasses.MISSING: a : str = field.default_factory() else: a : List[str] = True parser.add_argument(__UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase) # Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added. # Order is important for arguments with the same destination! # We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down # here and we do not need those changes/additional keys. if field.default is True and (field.type is bool or field.type == Optional[bool]): a : Any = False parser.add_argument(f'''--no_{field.name}''' , action="store_false" , dest=field.name , **__UpperCAmelCase) def __snake_case ( self : Dict , __UpperCAmelCase : DataClassType): if hasattr(__UpperCAmelCase , "_argument_group_name"): a : Tuple = self.add_argument_group(dtype._argument_group_name) else: a : Union[str, Any] = self try: a : Dict[str, type] = get_type_hints(__UpperCAmelCase) except NameError: raise RuntimeError( f'''Type resolution failed for {dtype}. Try declaring the class in global scope or ''' "removing line of `from __future__ import annotations` which opts in Postponed " "Evaluation of Annotations (PEP 563)") except TypeError as ex: # Remove this block when we drop Python 3.9 support if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(__UpperCAmelCase): a : Tuple = ".".join(map(__UpperCAmelCase , sys.version_info[:3])) raise RuntimeError( f'''Type resolution failed for {dtype} on Python {python_version}. Try removing ''' "line of `from __future__ import annotations` which opts in union types as " "`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To " "support Python versions that lower than 3.10, you need to use " "`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of " "`X | None`.") from ex raise for field in dataclasses.fields(__UpperCAmelCase): if not field.init: continue a : List[Any] = type_hints[field.name] self._parse_dataclass_field(__UpperCAmelCase , __UpperCAmelCase) def __snake_case ( self : str , __UpperCAmelCase : str=None , __UpperCAmelCase : List[str]=False , __UpperCAmelCase : Dict=True , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Dict=None , ): if args_file_flag or args_filename or (look_for_args_file and len(sys.argv)): a : Optional[Any] = [] if args_filename: args_files.append(Path(__UpperCAmelCase)) elif look_for_args_file and len(sys.argv): args_files.append(Path(sys.argv[0]).with_suffix(".args")) # args files specified via command line flag should overwrite default args files so we add them last if args_file_flag: # Create special parser just to extract the args_file_flag values a : List[str] = ArgumentParser() args_file_parser.add_argument(__UpperCAmelCase , type=__UpperCAmelCase , action="append") # Use only remaining args for further parsing (remove the args_file_flag) a , a : str = args_file_parser.parse_known_args(args=__UpperCAmelCase) a : List[str] = vars(__UpperCAmelCase).get(args_file_flag.lstrip("-") , __UpperCAmelCase) if cmd_args_file_paths: args_files.extend([Path(__UpperCAmelCase) for p in cmd_args_file_paths]) a : int = [] for args_file in args_files: if args_file.exists(): file_args += args_file.read_text().split() # in case of duplicate arguments the last one has precedence # args specified via the command line should overwrite args from files, so we add them last a : Dict = file_args + args if args is not None else file_args + sys.argv[1:] a , a : Tuple = self.parse_known_args(args=__UpperCAmelCase) a : str = [] for dtype in self.dataclass_types: a : Union[str, Any] = {f.name for f in dataclasses.fields(__UpperCAmelCase) if f.init} a : List[Any] = {k: v for k, v in vars(__UpperCAmelCase).items() if k in keys} for k in keys: delattr(__UpperCAmelCase , __UpperCAmelCase) a : Dict = dtype(**__UpperCAmelCase) outputs.append(__UpperCAmelCase) if len(namespace.__dict__) > 0: # additional namespace. outputs.append(__UpperCAmelCase) if return_remaining_strings: return (*outputs, remaining_args) else: if remaining_args: raise ValueError(f'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''') return (*outputs,) def __snake_case ( self : Optional[Any] , __UpperCAmelCase : Dict[str, Any] , __UpperCAmelCase : bool = False): a : List[str] = set(args.keys()) a : Optional[Any] = [] for dtype in self.dataclass_types: a : Optional[int] = {f.name for f in dataclasses.fields(__UpperCAmelCase) if f.init} a : Tuple = {k: v for k, v in args.items() if k in keys} unused_keys.difference_update(inputs.keys()) a : Any = dtype(**__UpperCAmelCase) outputs.append(__UpperCAmelCase) if not allow_extra_keys and unused_keys: raise ValueError(f'''Some keys are not used by the HfArgumentParser: {sorted(__UpperCAmelCase)}''') return tuple(__UpperCAmelCase) def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : bool = False): with open(Path(__UpperCAmelCase) , encoding="utf-8") as open_json_file: a : Any = json.loads(open_json_file.read()) a : Optional[Any] = self.parse_dict(__UpperCAmelCase , allow_extra_keys=__UpperCAmelCase) return tuple(__UpperCAmelCase) def __snake_case ( self : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : bool = False): a : Any = self.parse_dict(yaml.safe_load(Path(__UpperCAmelCase).read_text()) , allow_extra_keys=__UpperCAmelCase) return tuple(__UpperCAmelCase)
40
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class _A ( _a ): """simple docstring""" UpperCAmelCase : str = """naver-clova-ix/donut-base-finetuned-docvqa""" UpperCAmelCase : Tuple = ( """This is a tool that answers a question about an document (pdf). It takes an input named `document` which """ """should be the document containing the information, as well as a `question` that is the question about the """ """document. It returns a text that contains the answer to the question.""" ) UpperCAmelCase : List[str] = """document_qa""" UpperCAmelCase : str = AutoProcessor UpperCAmelCase : Optional[int] = VisionEncoderDecoderModel UpperCAmelCase : int = ["""image""", """text"""] UpperCAmelCase : int = ["""text"""] def __init__( self : Tuple , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Any): if not is_vision_available(): raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool.") super().__init__(*__UpperCAmelCase , **__UpperCAmelCase) def __snake_case ( self : Tuple , __UpperCAmelCase : "Image" , __UpperCAmelCase : str): a : Any = "<s_docvqa><s_question>{user_input}</s_question><s_answer>" a : Union[str, Any] = task_prompt.replace("{user_input}" , __UpperCAmelCase) a : Optional[Any] = self.pre_processor.tokenizer( __UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors="pt").input_ids a : Any = self.pre_processor(__UpperCAmelCase , return_tensors="pt").pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def __snake_case ( self : int , __UpperCAmelCase : int): return self.model.generate( inputs["pixel_values"].to(self.device) , decoder_input_ids=inputs["decoder_input_ids"].to(self.device) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__UpperCAmelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__UpperCAmelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__UpperCAmelCase , ).sequences def __snake_case ( self : str , __UpperCAmelCase : List[Any]): a : Union[str, Any] = self.pre_processor.batch_decode(__UpperCAmelCase)[0] a : Optional[Any] = sequence.replace(self.pre_processor.tokenizer.eos_token , "") a : Any = sequence.replace(self.pre_processor.tokenizer.pad_token , "") a : Optional[Any] = re.sub(r"<.*?>" , "" , __UpperCAmelCase , count=1).strip() # remove first task start token a : List[str] = self.pre_processor.tokenajson(__UpperCAmelCase) return sequence["answer"]
40
1
"""simple docstring""" import numpy as np import torch from imwatermark import WatermarkEncoder # Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66 __lowercase = 0B1011_0011_1110_1100_1001_0000_0111_1011_1011_0001_1001_1110 # bin(x)[2:] gives bits of x as str, use int to convert them to 0/1 __lowercase = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]] class _A : """simple docstring""" def __init__( self : Tuple): a : List[str] = WATERMARK_BITS a : int = WatermarkEncoder() self.encoder.set_watermark("bits" , self.watermark) def __snake_case ( self : int , __UpperCAmelCase : torch.FloatTensor): # can't encode images that are smaller than 256 if images.shape[-1] < 256: return images a : Optional[Any] = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1).float().numpy() a : str = [self.encoder.encode(__UpperCAmelCase , "dwtDct") for image in images] a : Any = torch.from_numpy(np.array(__UpperCAmelCase)).permute(0 , 3 , 1 , 2) a : Any = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0) return images
40
"""simple docstring""" from __future__ import annotations class _A : """simple docstring""" def __init__( self : List[str] , __UpperCAmelCase : int = 0): a : Tuple = key def __snake_case ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : int): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) a : Dict = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(__UpperCAmelCase) ^ key) for ch in content] def __snake_case ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : int): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) a : Optional[Any] = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(__UpperCAmelCase) ^ key) for ch in content] def __snake_case ( self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : int = 0): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) a : List[Any] = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned a : Any = "" for ch in content: ans += chr(ord(__UpperCAmelCase) ^ key) return ans def __snake_case ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : int = 0): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) a : Dict = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned a : str = "" for ch in content: ans += chr(ord(__UpperCAmelCase) ^ key) return ans def __snake_case ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : int = 0): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) try: with open(__UpperCAmelCase) as fin, open("encrypt.out" , "w+") as fout: # actual encrypt-process for line in fin: fout.write(self.encrypt_string(__UpperCAmelCase , __UpperCAmelCase)) except OSError: return False return True def __snake_case ( self : Any , __UpperCAmelCase : str , __UpperCAmelCase : int): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) try: with open(__UpperCAmelCase) as fin, open("decrypt.out" , "w+") as fout: # actual encrypt-process for line in fin: fout.write(self.decrypt_string(__UpperCAmelCase , __UpperCAmelCase)) except OSError: return False return True # Tests # crypt = XORCipher() # key = 67 # # test encrypt # print(crypt.encrypt("hallo welt",key)) # # test decrypt # print(crypt.decrypt(crypt.encrypt("hallo welt",key), key)) # # test encrypt_string # print(crypt.encrypt_string("hallo welt",key)) # # test decrypt_string # print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key)) # if (crypt.encrypt_file("test.txt",key)): # print("encrypt successful") # else: # print("encrypt unsuccessful") # if (crypt.decrypt_file("encrypt.out",key)): # print("decrypt successful") # else: # print("decrypt unsuccessful")
40
1
"""simple docstring""" def lowercase ( A_ = 1_000 )-> int: '''simple docstring''' a : List[str] = 3 a : List[str] = 0 while a < n: if a % 3 == 0 or a % 5 == 0: result += a elif a % 15 == 0: result -= a a += 1 return result if __name__ == "__main__": print(f'''{solution() = }''')
40
"""simple docstring""" import pytest from datasets import inspect_metric, list_metrics, load_metric @pytest.fixture def lowercase ( A_ )-> List[Any]: '''simple docstring''' monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() ) @pytest.fixture def lowercase ( A_ )-> Tuple: '''simple docstring''' class _A : """simple docstring""" def __init__( self : str , __UpperCAmelCase : int): a : List[Any] = metric_id class _A : """simple docstring""" UpperCAmelCase : Union[str, Any] = [MetricMock(_a ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]] def __snake_case ( self : List[str]): return self._metrics monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() ) @pytest.mark.parametrize( "func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] ) def lowercase ( A_ , A_ , A_ , A_ , A_ )-> Any: '''simple docstring''' if "tmp_path" in args: a : Union[str, Any] = tuple(arg if arg != "tmp_path" else tmp_path for arg in args ) with pytest.warns(A_ , match="https://huggingface.co/docs/evaluate" ): func(*A_ )
40
1
"""simple docstring""" import glob import os import random from string import ascii_lowercase, digits import cva import numpy as np # Parrameters __lowercase = (720, 1280) # Height, Width __lowercase = (0.4, 0.6) # if height or width lower than this scale, drop it. __lowercase = 1 / 100 __lowercase = """""" __lowercase = """""" __lowercase = """""" __lowercase = 250 def lowercase ( )-> None: '''simple docstring''' a , a : Optional[Any] = get_dataset(A_ , A_ ) for index in range(A_ ): a : Optional[int] = random.sample(range(len(A_ ) ) , 4 ) a , a , a : str = update_image_and_anno( A_ , A_ , A_ , A_ , A_ , filter_scale=A_ , ) # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' a : str = random_chars(32 ) a : Optional[Any] = path.split(os.sep )[-1].rsplit("." , 1 )[0] a : Union[str, Any] = F'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}''' cva.imwrite(F'''{file_root}.jpg''' , A_ , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(F'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' ) a : str = [] for anno in new_annos: a : Union[str, Any] = anno[3] - anno[1] a : Any = anno[4] - anno[2] a : Any = anno[1] + width / 2 a : str = anno[2] + height / 2 a : Optional[Any] = F'''{anno[0]} {x_center} {y_center} {width} {height}''' annos_list.append(A_ ) with open(F'''{file_root}.txt''' , "w" ) as outfile: outfile.write("\n".join(line for line in annos_list ) ) def lowercase ( A_ , A_ )-> tuple[list, list]: '''simple docstring''' a : Any = [] a : Dict = [] for label_file in glob.glob(os.path.join(A_ , "*.txt" ) ): a : Union[str, Any] = label_file.split(os.sep )[-1].rsplit("." , 1 )[0] with open(A_ ) as in_file: a : Union[str, Any] = in_file.readlines() a : Tuple = os.path.join(A_ , F'''{label_name}.jpg''' ) a : Optional[Any] = [] for obj_list in obj_lists: a : str = obj_list.rstrip("\n" ).split(" " ) a : str = float(obj[1] ) - float(obj[3] ) / 2 a : Optional[Any] = float(obj[2] ) - float(obj[4] ) / 2 a : Optional[int] = float(obj[1] ) + float(obj[3] ) / 2 a : Optional[Any] = float(obj[2] ) + float(obj[4] ) / 2 boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] ) if not boxes: continue img_paths.append(A_ ) labels.append(A_ ) return img_paths, labels def lowercase ( A_ , A_ , A_ , A_ , A_ , A_ = 0.0 , )-> tuple[list, list, str]: '''simple docstring''' a : Union[str, Any] = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta ) a : Dict = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) a : List[Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) a : Union[str, Any] = int(scale_x * output_size[1] ) a : Tuple = int(scale_y * output_size[0] ) a : List[Any] = [] a : int = [] for i, index in enumerate(A_ ): a : Union[str, Any] = all_img_list[index] path_list.append(A_ ) a : List[str] = all_annos[index] a : Union[str, Any] = cva.imread(A_ ) if i == 0: # top-left a : Any = cva.resize(A_ , (divid_point_x, divid_point_y) ) a : Tuple = img for bbox in img_annos: a : List[Any] = bbox[1] * scale_x a : Dict = bbox[2] * scale_y a : List[str] = bbox[3] * scale_x a : Any = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 1: # top-right a : Any = cva.resize(A_ , (output_size[1] - divid_point_x, divid_point_y) ) a : int = img for bbox in img_annos: a : List[Any] = scale_x + bbox[1] * (1 - scale_x) a : int = bbox[2] * scale_y a : List[str] = scale_x + bbox[3] * (1 - scale_x) a : Union[str, Any] = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 2: # bottom-left a : Optional[Any] = cva.resize(A_ , (divid_point_x, output_size[0] - divid_point_y) ) a : Optional[int] = img for bbox in img_annos: a : Any = bbox[1] * scale_x a : Tuple = scale_y + bbox[2] * (1 - scale_y) a : List[Any] = bbox[3] * scale_x a : Optional[Any] = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) else: # bottom-right a : int = cva.resize( A_ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) ) a : List[Any] = img for bbox in img_annos: a : Tuple = scale_x + bbox[1] * (1 - scale_x) a : Dict = scale_y + bbox[2] * (1 - scale_y) a : Tuple = scale_x + bbox[3] * (1 - scale_x) a : List[str] = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) # Remove bounding box small than scale of filter if filter_scale > 0: a : Dict = [ anno for anno in new_anno if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2]) ] return output_img, new_anno, path_list[0] def lowercase ( A_ )-> str: '''simple docstring''' assert number_char > 1, "The number of character should greater than 1" a : Optional[int] = ascii_lowercase + digits return "".join(random.choice(A_ ) for _ in range(A_ ) ) if __name__ == "__main__": main() print("""DONE ✅""")
40
"""simple docstring""" from __future__ import annotations from PIL import Image # Define glider example __lowercase = [ [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], ] # Define blinker example __lowercase = [[0, 1, 0], [0, 1, 0], [0, 1, 0]] def lowercase ( A_ )-> list[list[int]]: '''simple docstring''' a : str = [] for i in range(len(A_ ) ): a : str = [] for j in range(len(cells[i] ) ): # Get the number of live neighbours a : Union[str, Any] = 0 if i > 0 and j > 0: neighbour_count += cells[i - 1][j - 1] if i > 0: neighbour_count += cells[i - 1][j] if i > 0 and j < len(cells[i] ) - 1: neighbour_count += cells[i - 1][j + 1] if j > 0: neighbour_count += cells[i][j - 1] if j < len(cells[i] ) - 1: neighbour_count += cells[i][j + 1] if i < len(A_ ) - 1 and j > 0: neighbour_count += cells[i + 1][j - 1] if i < len(A_ ) - 1: neighbour_count += cells[i + 1][j] if i < len(A_ ) - 1 and j < len(cells[i] ) - 1: neighbour_count += cells[i + 1][j + 1] # Rules of the game of life (excerpt from Wikipedia): # 1. Any live cell with two or three live neighbours survives. # 2. Any dead cell with three live neighbours becomes a live cell. # 3. All other live cells die in the next generation. # Similarly, all other dead cells stay dead. a : Tuple = cells[i][j] == 1 if ( (alive and 2 <= neighbour_count <= 3) or not alive and neighbour_count == 3 ): next_generation_row.append(1 ) else: next_generation_row.append(0 ) next_generation.append(A_ ) return next_generation def lowercase ( A_ , A_ )-> list[Image.Image]: '''simple docstring''' a : List[str] = [] for _ in range(A_ ): # Create output image a : str = Image.new("RGB" , (len(cells[0] ), len(A_ )) ) a : Union[str, Any] = img.load() # Save cells to image for x in range(len(A_ ) ): for y in range(len(cells[0] ) ): a : Optional[Any] = 255 - cells[y][x] * 255 a : str = (colour, colour, colour) # Save image images.append(A_ ) a : Tuple = new_generation(A_ ) return images if __name__ == "__main__": __lowercase = generate_images(GLIDER, 16) images[0].save("""out.gif""", save_all=True, append_images=images[1:])
40
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tensorflow_text_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowercase = { """configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""], """tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = ["""BertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """BERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """BertForMaskedLM""", """BertForMultipleChoice""", """BertForNextSentencePrediction""", """BertForPreTraining""", """BertForQuestionAnswering""", """BertForSequenceClassification""", """BertForTokenClassification""", """BertLayer""", """BertLMHeadModel""", """BertModel""", """BertPreTrainedModel""", """load_tf_weights_in_bert""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFBertEmbeddings""", """TFBertForMaskedLM""", """TFBertForMultipleChoice""", """TFBertForNextSentencePrediction""", """TFBertForPreTraining""", """TFBertForQuestionAnswering""", """TFBertForSequenceClassification""", """TFBertForTokenClassification""", """TFBertLMHeadModel""", """TFBertMainLayer""", """TFBertModel""", """TFBertPreTrainedModel""", ] try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = ["""TFBertTokenizer"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """FlaxBertForCausalLM""", """FlaxBertForMaskedLM""", """FlaxBertForMultipleChoice""", """FlaxBertForNextSentencePrediction""", """FlaxBertForPreTraining""", """FlaxBertForQuestionAnswering""", """FlaxBertForSequenceClassification""", """FlaxBertForTokenClassification""", """FlaxBertModel""", """FlaxBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_fast import BertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bert import ( BERT_PRETRAINED_MODEL_ARCHIVE_LIST, BertForMaskedLM, BertForMultipleChoice, BertForNextSentencePrediction, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, BertLayer, BertLMHeadModel, BertModel, BertPreTrainedModel, load_tf_weights_in_bert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_bert import ( TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFBertEmbeddings, TFBertForMaskedLM, TFBertForMultipleChoice, TFBertForNextSentencePrediction, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertForTokenClassification, TFBertLMHeadModel, TFBertMainLayer, TFBertModel, TFBertPreTrainedModel, ) try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_tf import TFBertTokenizer try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_bert import ( FlaxBertForCausalLM, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, FlaxBertPreTrainedModel, ) else: import sys __lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
40
"""simple docstring""" from itertools import permutations def lowercase ( A_ )-> bool: '''simple docstring''' if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False a : Optional[int] = [7, 11, 13, 17] for i, test in enumerate(A_ ): if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def lowercase ( A_ = 10 )-> int: '''simple docstring''' return sum( int("".join(map(A_ , A_ ) ) ) for num in permutations(range(A_ ) ) if is_substring_divisible(A_ ) ) if __name__ == "__main__": print(f'''{solution() = }''')
40
1
"""simple docstring""" def lowercase ( A_ )-> bool: '''simple docstring''' if not all(x.isalpha() for x in string ): raise ValueError("String must only contain alphabetic characters." ) a : Tuple = sorted(string.lower() ) return len(A_ ) == len(set(A_ ) ) if __name__ == "__main__": __lowercase = input("""Enter a string """).strip() __lowercase = is_isogram(input_str) print(f'''{input_str} is {'an' if isogram else 'not an'} isogram.''')
40
"""simple docstring""" import gc import random import unittest import numpy as np import torch from diffusers import ( DDIMScheduler, KandinskyVaaControlnetPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _A ( _a ,unittest.TestCase ): """simple docstring""" UpperCAmelCase : Dict = KandinskyVaaControlnetPipeline UpperCAmelCase : List[str] = ["""image_embeds""", """negative_image_embeds""", """hint"""] UpperCAmelCase : Optional[Any] = ["""image_embeds""", """negative_image_embeds""", """hint"""] UpperCAmelCase : Dict = [ """generator""", """height""", """width""", """latents""", """guidance_scale""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] UpperCAmelCase : Optional[int] = False @property def __snake_case ( self : Optional[Any]): return 32 @property def __snake_case ( self : Dict): return 32 @property def __snake_case ( self : Dict): return self.time_input_dim @property def __snake_case ( self : Any): return self.time_input_dim * 4 @property def __snake_case ( self : str): return 100 @property def __snake_case ( self : str): torch.manual_seed(0) a : str = { "in_channels": 8, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image_hint", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } a : Dict = UNetaDConditionModel(**__UpperCAmelCase) return model @property def __snake_case ( self : str): return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def __snake_case ( self : Union[str, Any]): torch.manual_seed(0) a : Dict = VQModel(**self.dummy_movq_kwargs) return model def __snake_case ( self : Optional[Any]): a : Optional[Any] = self.dummy_unet a : int = self.dummy_movq a : str = DDIMScheduler( num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , steps_offset=1 , prediction_type="epsilon" , thresholding=__UpperCAmelCase , ) a : Optional[Any] = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def __snake_case ( self : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int=0): a : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase) a : List[str] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to( __UpperCAmelCase) # create hint a : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase) if str(__UpperCAmelCase).startswith("mps"): a : Union[str, Any] = torch.manual_seed(__UpperCAmelCase) else: a : List[Any] = torch.Generator(device=__UpperCAmelCase).manual_seed(__UpperCAmelCase) a : str = { "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "hint": hint, "generator": generator, "height": 64, "width": 64, "guidance_scale": 4.0, "num_inference_steps": 2, "output_type": "np", } return inputs def __snake_case ( self : Dict): a : str = "cpu" a : Tuple = self.get_dummy_components() a : Dict = self.pipeline_class(**__UpperCAmelCase) a : Optional[int] = pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) a : Optional[Any] = pipe(**self.get_dummy_inputs(__UpperCAmelCase)) a : Any = output.images a : Any = pipe( **self.get_dummy_inputs(__UpperCAmelCase) , return_dict=__UpperCAmelCase , )[0] a : Union[str, Any] = image[0, -3:, -3:, -1] a : int = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) a : Tuple = np.array( [0.6_959_826, 0.868_279, 0.7_558_092, 0.68_769_467, 0.85_805_804, 0.65_977_496, 0.44_885_302, 0.5_959_111, 0.4_251_595]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : Optional[int]): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __snake_case ( self : List[str]): a : List[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy") a : Union[str, Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/hint_image_cat.png") a : List[Any] = torch.from_numpy(np.array(__UpperCAmelCase)).float() / 255.0 a : str = hint.permute(2 , 0 , 1).unsqueeze(0) a : Optional[int] = KandinskyVaaPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa) pipe_prior.to(__UpperCAmelCase) a : List[str] = KandinskyVaaControlnetPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa) a : int = pipeline.to(__UpperCAmelCase) pipeline.set_progress_bar_config(disable=__UpperCAmelCase) a : Tuple = "A robot, 4k photo" a : Any = torch.Generator(device="cuda").manual_seed(0) a , a : int = pipe_prior( __UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple() a : str = torch.Generator(device="cuda").manual_seed(0) a : Union[str, Any] = pipeline( image_embeds=__UpperCAmelCase , negative_image_embeds=__UpperCAmelCase , hint=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=100 , output_type="np" , ) a : str = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase)
40
1
"""simple docstring""" import os import re from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __lowercase = logging.get_logger(__name__) __lowercase = {"""vocab_file""": """spiece.model"""} __lowercase = { """vocab_file""": { """google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""", """google/bigbird-roberta-large""": ( """https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model""" ), """google/bigbird-base-trivia-itc""": ( """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model""" ), } } __lowercase = { """google/bigbird-roberta-base""": 4096, """google/bigbird-roberta-large""": 4096, """google/bigbird-base-trivia-itc""": 4096, } class _A ( _a ): """simple docstring""" UpperCAmelCase : Dict = VOCAB_FILES_NAMES UpperCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase : Any = ["""input_ids""", """attention_mask"""] UpperCAmelCase : List[int] = [] def __init__( self : str , __UpperCAmelCase : Dict , __UpperCAmelCase : List[str]="<unk>" , __UpperCAmelCase : List[str]="<s>" , __UpperCAmelCase : int="</s>" , __UpperCAmelCase : List[str]="<pad>" , __UpperCAmelCase : List[Any]="[SEP]" , __UpperCAmelCase : int="[MASK]" , __UpperCAmelCase : Optional[Any]="[CLS]" , __UpperCAmelCase : Optional[Dict[str, Any]] = None , **__UpperCAmelCase : int , ): a : str = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase) if isinstance(__UpperCAmelCase , __UpperCAmelCase) else bos_token a : List[str] = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase) if isinstance(__UpperCAmelCase , __UpperCAmelCase) else eos_token a : List[str] = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase) if isinstance(__UpperCAmelCase , __UpperCAmelCase) else unk_token a : Optional[Any] = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase) if isinstance(__UpperCAmelCase , __UpperCAmelCase) else pad_token a : Union[str, Any] = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase) if isinstance(__UpperCAmelCase , __UpperCAmelCase) else cls_token a : str = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase) if isinstance(__UpperCAmelCase , __UpperCAmelCase) else sep_token # Mask token behave like a normal word, i.e. include the space before it a : str = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase) if isinstance(__UpperCAmelCase , __UpperCAmelCase) else mask_token a : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , ) a : Optional[int] = vocab_file a : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(__UpperCAmelCase) @property def __snake_case ( self : Dict): return self.sp_model.get_piece_size() def __snake_case ( self : str): a : str = {self.convert_ids_to_tokens(__UpperCAmelCase): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__( self : Optional[int]): a : Union[str, Any] = self.__dict__.copy() a : Union[str, Any] = None return state def __setstate__( self : Optional[Any] , __UpperCAmelCase : Dict): a : Tuple = d # for backward compatibility if not hasattr(self , "sp_model_kwargs"): a : List[str] = {} a : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def __snake_case ( self : Optional[Any] , __UpperCAmelCase : str): return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase) def __snake_case ( self : List[str] , __UpperCAmelCase : Optional[Any]): return self.sp_model.piece_to_id(__UpperCAmelCase) def __snake_case ( self : Any , __UpperCAmelCase : Optional[int]): a : int = self.sp_model.IdToPiece(__UpperCAmelCase) return token def __snake_case ( self : str , __UpperCAmelCase : Tuple): a : Dict = [] a : Optional[Any] = "" a : Any = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__UpperCAmelCase) + token a : Optional[Any] = True a : int = [] else: current_sub_tokens.append(__UpperCAmelCase) a : int = False out_string += self.sp_model.decode(__UpperCAmelCase) return out_string.strip() def __snake_case ( self : List[str] , __UpperCAmelCase : List[int] , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = None , __UpperCAmelCase : bool = True , **__UpperCAmelCase : Tuple , ): a : Tuple = kwargs.pop("use_source_tokenizer" , __UpperCAmelCase) a : Any = self.convert_ids_to_tokens(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 a : List[str] = [] a : List[str] = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(__UpperCAmelCase)) a : Tuple = [] sub_texts.append(__UpperCAmelCase) else: current_sub_text.append(__UpperCAmelCase) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(__UpperCAmelCase)) # Mimic the behavior of the Rust tokenizer: # No space before [MASK] and [SEP] if spaces_between_special_tokens: a : int = re.sub(r" (\[(MASK|SEP)\])" , r"\1" , " ".join(__UpperCAmelCase)) else: a : Dict = "".join(__UpperCAmelCase) a : List[str] = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: a : Optional[int] = self.clean_up_tokenization(__UpperCAmelCase) return clean_text else: return text def __snake_case ( self : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None): if not os.path.isdir(__UpperCAmelCase): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''') return a : Optional[Any] = os.path.join( __UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) if os.path.abspath(self.vocab_file) != os.path.abspath(__UpperCAmelCase) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , __UpperCAmelCase) elif not os.path.isfile(self.vocab_file): with open(__UpperCAmelCase , "wb") as fi: a : int = self.sp_model.serialized_model_proto() fi.write(__UpperCAmelCase) return (out_vocab_file,) def __snake_case ( self : Any , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] a : Optional[Any] = [self.cls_token_id] a : Dict = [self.sep_token_id] return cls + token_ids_a + sep + token_ids_a + sep def __snake_case ( self : str , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None , __UpperCAmelCase : bool = False): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase) if token_ids_a is None: return [1] + ([0] * len(__UpperCAmelCase)) + [1] return [1] + ([0] * len(__UpperCAmelCase)) + [1] + ([0] * len(__UpperCAmelCase)) + [1] def __snake_case ( self : Dict , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None): a : str = [self.sep_token_id] a : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
40
"""simple docstring""" import argparse import json from pathlib import Path import torch import torchaudio from datasets import load_dataset from huggingface_hub import hf_hub_download from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification from transformers.utils import logging logging.set_verbosity_info() __lowercase = logging.get_logger(__name__) def lowercase ( A_ )-> Dict: '''simple docstring''' a : str = ASTConfig() if "10-10" in model_name: pass elif "speech-commands" in model_name: a : Union[str, Any] = 128 elif "12-12" in model_name: a : List[Any] = 12 a : str = 12 elif "14-14" in model_name: a : List[Any] = 14 a : Optional[int] = 14 elif "16-16" in model_name: a : Any = 16 a : List[Any] = 16 else: raise ValueError("Model not supported" ) a : Optional[int] = "huggingface/label-files" if "speech-commands" in model_name: a : Optional[int] = 35 a : List[str] = "speech-commands-v2-id2label.json" else: a : Optional[Any] = 527 a : Tuple = "audioset-id2label.json" a : List[str] = json.load(open(hf_hub_download(A_ , A_ , repo_type="dataset" ) , "r" ) ) a : Union[str, Any] = {int(A_ ): v for k, v in idalabel.items()} a : Any = idalabel a : str = {v: k for k, v in idalabel.items()} return config def lowercase ( A_ )-> Tuple: '''simple docstring''' if "module.v" in name: a : Union[str, Any] = name.replace("module.v" , "audio_spectrogram_transformer" ) if "cls_token" in name: a : List[Any] = name.replace("cls_token" , "embeddings.cls_token" ) if "dist_token" in name: a : Union[str, Any] = name.replace("dist_token" , "embeddings.distillation_token" ) if "pos_embed" in name: a : str = name.replace("pos_embed" , "embeddings.position_embeddings" ) if "patch_embed.proj" in name: a : Union[str, Any] = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) # transformer blocks if "blocks" in name: a : Union[str, Any] = name.replace("blocks" , "encoder.layer" ) if "attn.proj" in name: a : str = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: a : Tuple = name.replace("attn" , "attention.self" ) if "norm1" in name: a : int = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: a : Union[str, Any] = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: a : Union[str, Any] = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: a : Optional[Any] = name.replace("mlp.fc2" , "output.dense" ) # final layernorm if "audio_spectrogram_transformer.norm" in name: a : Tuple = name.replace("audio_spectrogram_transformer.norm" , "audio_spectrogram_transformer.layernorm" ) # classifier head if "module.mlp_head.0" in name: a : List[str] = name.replace("module.mlp_head.0" , "classifier.layernorm" ) if "module.mlp_head.1" in name: a : Optional[int] = name.replace("module.mlp_head.1" , "classifier.dense" ) return name def lowercase ( A_ , A_ )-> Any: '''simple docstring''' for key in orig_state_dict.copy().keys(): a : str = orig_state_dict.pop(A_ ) if "qkv" in key: a : int = key.split("." ) a : Optional[int] = int(key_split[3] ) a : int = config.hidden_size if "weight" in key: a : List[str] = val[:dim, :] a : Any = val[dim : dim * 2, :] a : int = val[-dim:, :] else: a : Optional[Any] = val[:dim] a : Union[str, Any] = val[dim : dim * 2] a : str = val[-dim:] else: a : str = val return orig_state_dict def lowercase ( A_ )-> Dict: '''simple docstring''' a : Union[str, Any] = [ "module.v.head.weight", "module.v.head.bias", "module.v.head_dist.weight", "module.v.head_dist.bias", ] for k in ignore_keys: state_dict.pop(A_ , A_ ) @torch.no_grad() def lowercase ( A_ , A_ , A_=False )-> Optional[int]: '''simple docstring''' a : Optional[int] = get_audio_spectrogram_transformer_config(A_ ) a : Dict = { "ast-finetuned-audioset-10-10-0.4593": ( "https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.450": ( "https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.448": ( "https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.448-v2": ( "https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1" ), "ast-finetuned-audioset-12-12-0.447": ( "https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1" ), "ast-finetuned-audioset-14-14-0.443": ( "https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1" ), "ast-finetuned-audioset-16-16-0.442": ( "https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1" ), "ast-finetuned-speech-commands-v2": ( "https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1" ), } # load original state_dict a : Any = model_name_to_url[model_name] a : List[Any] = torch.hub.load_state_dict_from_url(A_ , map_location="cpu" ) # remove some keys remove_keys(A_ ) # rename some keys a : Union[str, Any] = convert_state_dict(A_ , A_ ) # load 🤗 model a : List[str] = ASTForAudioClassification(A_ ) model.eval() model.load_state_dict(A_ ) # verify outputs on dummy input # source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62 a : Tuple = -4.2_6_7_7_3_9_3 if "speech-commands" not in model_name else -6.8_4_5_9_7_8 a : Union[str, Any] = 4.5_6_8_9_9_7_4 if "speech-commands" not in model_name else 5.5_6_5_4_5_2_6 a : str = 1_024 if "speech-commands" not in model_name else 128 a : List[Any] = ASTFeatureExtractor(mean=A_ , std=A_ , max_length=A_ ) if "speech-commands" in model_name: a : List[str] = load_dataset("speech_commands" , "v0.02" , split="validation" ) a : int = dataset[0]["audio"]["array"] else: a : Tuple = hf_hub_download( repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" , ) a , a : Tuple = torchaudio.load(A_ ) a : Optional[Any] = waveform.squeeze().numpy() a : Union[str, Any] = feature_extractor(A_ , sampling_rate=16_000 , return_tensors="pt" ) # forward pass a : Optional[Any] = model(**A_ ) a : List[str] = outputs.logits if model_name == "ast-finetuned-audioset-10-10-0.4593": a : Any = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] ) elif model_name == "ast-finetuned-audioset-10-10-0.450": a : Optional[int] = torch.tensor([-1.1_9_8_6, -7.0_9_0_3, -8.2_7_1_8] ) elif model_name == "ast-finetuned-audioset-10-10-0.448": a : List[str] = torch.tensor([-2.6_1_2_8, -8.0_0_8_0, -9.4_3_4_4] ) elif model_name == "ast-finetuned-audioset-10-10-0.448-v2": a : Tuple = torch.tensor([-1.5_0_8_0, -7.4_5_3_4, -8.8_9_1_7] ) elif model_name == "ast-finetuned-audioset-12-12-0.447": a : int = torch.tensor([-0.5_0_5_0, -6.5_8_3_3, -8.0_8_4_3] ) elif model_name == "ast-finetuned-audioset-14-14-0.443": a : Any = torch.tensor([-0.3_8_2_6, -7.0_3_3_6, -8.2_4_1_3] ) elif model_name == "ast-finetuned-audioset-16-16-0.442": a : Dict = torch.tensor([-1.2_1_1_3, -6.9_1_0_1, -8.3_4_7_0] ) elif model_name == "ast-finetuned-speech-commands-v2": a : Union[str, Any] = torch.tensor([6.1_5_8_9, -8.0_5_6_6, -8.7_9_8_4] ) else: raise ValueError("Unknown model name" ) if not torch.allclose(logits[0, :3] , A_ , atol=1e-4 ): raise ValueError("Logits don't match" ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(A_ ).mkdir(exist_ok=A_ ) print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(A_ ) print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' ) feature_extractor.save_pretrained(A_ ) if push_to_hub: print("Pushing model and feature extractor to the hub..." ) model.push_to_hub(F'''MIT/{model_name}''' ) feature_extractor.push_to_hub(F'''MIT/{model_name}''' ) if __name__ == "__main__": __lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""ast-finetuned-audioset-10-10-0.4593""", type=str, help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) __lowercase = parser.parse_args() convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
40
1
"""simple docstring""" import random def lowercase ( A_ , A_ )-> tuple: '''simple docstring''' a , a , a : List[str] = [], [], [] for element in data: if element < pivot: less.append(A_ ) elif element > pivot: greater.append(A_ ) else: equal.append(A_ ) return less, equal, greater def lowercase ( A_ , A_ )-> Dict: '''simple docstring''' if index >= len(A_ ) or index < 0: return None a : Optional[Any] = items[random.randint(0 , len(A_ ) - 1 )] a : int = 0 a , a , a : Any = _partition(A_ , A_ ) a : Union[str, Any] = len(A_ ) a : Any = len(A_ ) # index is the pivot if m <= index < m + count: return pivot # must be in smaller elif m > index: return quick_select(A_ , A_ ) # must be in larger else: return quick_select(A_ , index - (m + count) )
40
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __lowercase = { """configuration_rag""": ["""RagConfig"""], """retrieval_rag""": ["""RagRetriever"""], """tokenization_rag""": ["""RagTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """RagModel""", """RagPreTrainedModel""", """RagSequenceForGeneration""", """RagTokenForGeneration""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """TFRagModel""", """TFRagPreTrainedModel""", """TFRagSequenceForGeneration""", """TFRagTokenForGeneration""", ] if TYPE_CHECKING: from .configuration_rag import RagConfig from .retrieval_rag import RagRetriever from .tokenization_rag import RagTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rag import ( TFRagModel, TFRagPreTrainedModel, TFRagSequenceForGeneration, TFRagTokenForGeneration, ) else: import sys __lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
40
1
"""simple docstring""" import os import warnings from typing import List, Optional from ...tokenization_utils_base import BatchEncoding from ...utils import logging from .configuration_rag import RagConfig __lowercase = logging.get_logger(__name__) class _A : """simple docstring""" def __init__( self : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : Any): a : List[str] = question_encoder a : Union[str, Any] = generator a : Dict = self.question_encoder def __snake_case ( self : List[Any] , __UpperCAmelCase : Any): if os.path.isfile(__UpperCAmelCase): raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''') os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase) a : Tuple = os.path.join(__UpperCAmelCase , "question_encoder_tokenizer") a : Optional[Any] = os.path.join(__UpperCAmelCase , "generator_tokenizer") self.question_encoder.save_pretrained(__UpperCAmelCase) self.generator.save_pretrained(__UpperCAmelCase) @classmethod def __snake_case ( cls : Optional[Any] , __UpperCAmelCase : Optional[Any] , **__UpperCAmelCase : Optional[Any]): # dynamically import AutoTokenizer from ..auto.tokenization_auto import AutoTokenizer a : int = kwargs.pop("config" , __UpperCAmelCase) if config is None: a : Union[str, Any] = RagConfig.from_pretrained(__UpperCAmelCase) a : str = AutoTokenizer.from_pretrained( __UpperCAmelCase , config=config.question_encoder , subfolder="question_encoder_tokenizer") a : Optional[Any] = AutoTokenizer.from_pretrained( __UpperCAmelCase , config=config.generator , subfolder="generator_tokenizer") return cls(question_encoder=__UpperCAmelCase , generator=__UpperCAmelCase) def __call__( self : Optional[int] , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : int): return self.current_tokenizer(*__UpperCAmelCase , **__UpperCAmelCase) def __snake_case ( self : List[Any] , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : Tuple): return self.generator.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase) def __snake_case ( self : List[str] , *__UpperCAmelCase : str , **__UpperCAmelCase : List[Any]): return self.generator.decode(*__UpperCAmelCase , **__UpperCAmelCase) def __snake_case ( self : str): a : Optional[int] = self.question_encoder def __snake_case ( self : Optional[int]): a : str = self.generator def __snake_case ( self : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[List[str]] = None , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : str = "longest" , __UpperCAmelCase : str = None , __UpperCAmelCase : bool = True , **__UpperCAmelCase : List[Any] , ): warnings.warn( "`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the " "regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` " "context manager to prepare your targets. See the documentation of your specific tokenizer for more " "details" , __UpperCAmelCase , ) if max_length is None: a : str = self.current_tokenizer.model_max_length a : int = self( __UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors=__UpperCAmelCase , max_length=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , **__UpperCAmelCase , ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: a : str = self.current_tokenizer.model_max_length a : int = self( text_target=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors=__UpperCAmelCase , padding=__UpperCAmelCase , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase , **__UpperCAmelCase , ) a : str = labels["input_ids"] return model_inputs
40
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _A ( _a ,_a ,_a ,unittest.TestCase ): """simple docstring""" UpperCAmelCase : str = StableDiffusionInpaintPipeline UpperCAmelCase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS UpperCAmelCase : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS UpperCAmelCase : Union[str, Any] = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess UpperCAmelCase : int = frozenset([] ) def __snake_case ( self : Dict): torch.manual_seed(0) a : List[Any] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__UpperCAmelCase , ) a : Tuple = PNDMScheduler(skip_prk_steps=__UpperCAmelCase) torch.manual_seed(0) a : str = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0) a : List[str] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , ) a : Any = CLIPTextModel(__UpperCAmelCase) a : List[str] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") a : Optional[Any] = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Union[str, Any]=0): # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched a : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase) a : List[str] = image.cpu().permute(0 , 2 , 3 , 1)[0] a : Union[str, Any] = Image.fromarray(np.uinta(__UpperCAmelCase)).convert("RGB").resize((64, 64)) a : Dict = Image.fromarray(np.uinta(image + 4)).convert("RGB").resize((64, 64)) if str(__UpperCAmelCase).startswith("mps"): a : Tuple = torch.manual_seed(__UpperCAmelCase) else: a : Tuple = torch.Generator(device=__UpperCAmelCase).manual_seed(__UpperCAmelCase) a : Optional[Any] = { "prompt": "A painting of a squirrel eating a burger", "image": init_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def __snake_case ( self : List[str]): a : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator a : Tuple = self.get_dummy_components() a : Optional[int] = StableDiffusionInpaintPipeline(**__UpperCAmelCase) a : int = sd_pipe.to(__UpperCAmelCase) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase) a : Any = self.get_dummy_inputs(__UpperCAmelCase) a : Optional[int] = sd_pipe(**__UpperCAmelCase).images a : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) a : int = np.array([0.4_727, 0.5_735, 0.3_941, 0.5_446, 0.5_926, 0.4_394, 0.5_062, 0.4_654, 0.4_476]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def __snake_case ( self : str): super().test_inference_batch_single_identical(expected_max_diff=3e-3) @slow @require_torch_gpu class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : Union[str, Any]): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __snake_case ( self : Dict): a : str = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png") a : str = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png") a : List[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint" "/yellow_cat_sitting_on_a_park_bench.npy") a : Tuple = "stabilityai/stable-diffusion-2-inpainting" a : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(__UpperCAmelCase , safety_checker=__UpperCAmelCase) pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) pipe.enable_attention_slicing() a : Any = "Face of a yellow cat, high resolution, sitting on a park bench" a : str = torch.manual_seed(0) a : Union[str, Any] = pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="np" , ) a : List[str] = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image).max() < 9e-3 def __snake_case ( self : Any): a : str = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png") a : Optional[Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png") a : str = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint" "/yellow_cat_sitting_on_a_park_bench_fp16.npy") a : Optional[Any] = "stabilityai/stable-diffusion-2-inpainting" a : Any = StableDiffusionInpaintPipeline.from_pretrained( __UpperCAmelCase , torch_dtype=torch.floataa , safety_checker=__UpperCAmelCase , ) pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) pipe.enable_attention_slicing() a : Optional[int] = "Face of a yellow cat, high resolution, sitting on a park bench" a : Dict = torch.manual_seed(0) a : List[Any] = pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="np" , ) a : Optional[Any] = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image).max() < 5e-1 def __snake_case ( self : int): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() a : Tuple = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png") a : Optional[int] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png") a : Optional[Any] = "stabilityai/stable-diffusion-2-inpainting" a : Optional[int] = PNDMScheduler.from_pretrained(__UpperCAmelCase , subfolder="scheduler") a : int = StableDiffusionInpaintPipeline.from_pretrained( __UpperCAmelCase , safety_checker=__UpperCAmelCase , scheduler=__UpperCAmelCase , torch_dtype=torch.floataa , ) pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() a : Optional[int] = "Face of a yellow cat, high resolution, sitting on a park bench" a : Optional[int] = torch.manual_seed(0) a : str = pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type="np" , ) a : int = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.65 * 10**9
40
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, is_vision_available, ) __lowercase = {"""processing_layoutxlm""": ["""LayoutXLMProcessor"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = ["""LayoutXLMTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = ["""LayoutXLMTokenizerFast"""] if TYPE_CHECKING: from .processing_layoutxlm import LayoutXLMProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm import LayoutXLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast else: import sys __lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
40
"""simple docstring""" def lowercase ( A_ )-> bool: '''simple docstring''' if not all(x.isalpha() for x in string ): raise ValueError("String must only contain alphabetic characters." ) a : Tuple = sorted(string.lower() ) return len(A_ ) == len(set(A_ ) ) if __name__ == "__main__": __lowercase = input("""Enter a string """).strip() __lowercase = is_isogram(input_str) print(f'''{input_str} is {'an' if isogram else 'not an'} isogram.''')
40
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __lowercase = logging.get_logger(__name__) __lowercase = { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json""" ), """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json""" ), """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json""" ), } class _A ( _a ): """simple docstring""" UpperCAmelCase : int = """dpr""" def __init__( self : List[Any] , __UpperCAmelCase : int=30522 , __UpperCAmelCase : Union[str, Any]=768 , __UpperCAmelCase : Dict=12 , __UpperCAmelCase : List[str]=12 , __UpperCAmelCase : Any=3072 , __UpperCAmelCase : Optional[int]="gelu" , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : str=512 , __UpperCAmelCase : List[str]=2 , __UpperCAmelCase : Tuple=0.02 , __UpperCAmelCase : List[str]=1e-12 , __UpperCAmelCase : List[str]=0 , __UpperCAmelCase : str="absolute" , __UpperCAmelCase : int = 0 , **__UpperCAmelCase : Tuple , ): super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase) a : List[Any] = vocab_size a : Optional[Any] = hidden_size a : Union[str, Any] = num_hidden_layers a : Dict = num_attention_heads a : int = hidden_act a : Any = intermediate_size a : Any = hidden_dropout_prob a : Dict = attention_probs_dropout_prob a : Any = max_position_embeddings a : Union[str, Any] = type_vocab_size a : Optional[Any] = initializer_range a : Dict = layer_norm_eps a : int = projection_dim a : str = position_embedding_type
40
"""simple docstring""" import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast __lowercase = datasets.utils.logging.get_logger(__name__) @dataclass class _A ( datasets.BuilderConfig ): """simple docstring""" UpperCAmelCase : int = 1_0_0_0_0 UpperCAmelCase : Optional[List[str]] = None UpperCAmelCase : Optional[datasets.Features] = None class _A ( datasets.ArrowBasedBuilder ): """simple docstring""" UpperCAmelCase : str = ParquetConfig def __snake_case ( self : Tuple): return datasets.DatasetInfo(features=self.config.features) def __snake_case ( self : List[Any] , __UpperCAmelCase : str): if not self.config.data_files: raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''') a : str = dl_manager.download_and_extract(self.config.data_files) if isinstance(__UpperCAmelCase , (str, list, tuple)): a : Dict = data_files if isinstance(__UpperCAmelCase , __UpperCAmelCase): a : str = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive a : List[Any] = [dl_manager.iter_files(__UpperCAmelCase) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files})] a : Dict = [] for split_name, files in data_files.items(): if isinstance(__UpperCAmelCase , __UpperCAmelCase): a : Optional[int] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive a : Tuple = [dl_manager.iter_files(__UpperCAmelCase) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(__UpperCAmelCase): with open(__UpperCAmelCase , "rb") as f: a : Tuple = datasets.Features.from_arrow_schema(pq.read_schema(__UpperCAmelCase)) break splits.append(datasets.SplitGenerator(name=__UpperCAmelCase , gen_kwargs={"files": files})) return splits def __snake_case ( self : List[str] , __UpperCAmelCase : pa.Table): if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example a : Optional[int] = table_cast(__UpperCAmelCase , self.info.features.arrow_schema) return pa_table def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int): a : Tuple = self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema) != sorted(self.config.columns): raise ValueError( f'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''') for file_idx, file in enumerate(itertools.chain.from_iterable(__UpperCAmelCase)): with open(__UpperCAmelCase , "rb") as f: a : Tuple = pq.ParquetFile(__UpperCAmelCase) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns)): a : Optional[Any] = pa.Table.from_batches([record_batch]) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield f'''{file_idx}_{batch_idx}''', self._cast_table(__UpperCAmelCase) except ValueError as e: logger.error(f'''Failed to read file \'{file}\' with error {type(__UpperCAmelCase)}: {e}''') raise
40
1
"""simple docstring""" import argparse import collections import os import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_table.py __lowercase = """src/transformers""" __lowercase = """docs/source/en""" __lowercase = """.""" def lowercase ( A_ , A_ , A_ )-> List[Any]: '''simple docstring''' with open(A_ , "r" , encoding="utf-8" , newline="\n" ) as f: a : List[str] = f.readlines() # Find the start prompt. a : List[str] = 0 while not lines[start_index].startswith(A_ ): start_index += 1 start_index += 1 a : List[Any] = start_index while not lines[end_index].startswith(A_ ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # Add here suffixes that are used to identify models, separated by | __lowercase = """Model|Encoder|Decoder|ForConditionalGeneration""" # Regexes that match TF/Flax/PT model names. __lowercase = re.compile(R"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""") __lowercase = re.compile(R"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""") # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. __lowercase = re.compile(R"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""") # This is to make sure the transformers module imported is the one in the repo. __lowercase = direct_transformers_import(TRANSFORMERS_PATH) def lowercase ( A_ )-> List[Any]: '''simple docstring''' a : List[Any] = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" , A_ ) return [m.group(0 ) for m in matches] def lowercase ( A_ , A_ )-> List[str]: '''simple docstring''' a : Dict = 2 if text == "✅" or text == "❌" else len(A_ ) a : List[Any] = (width - text_length) // 2 a : Optional[Any] = width - text_length - left_indent return " " * left_indent + text + " " * right_indent def lowercase ( )-> Optional[Any]: '''simple docstring''' a : Dict = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES a : Any = { name: config_maping_names[code] for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if code in config_maping_names } a : Tuple = {name: config.replace("Config" , "" ) for name, config in model_name_to_config.items()} # Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax. a : Optional[int] = collections.defaultdict(A_ ) a : Optional[int] = collections.defaultdict(A_ ) a : List[str] = collections.defaultdict(A_ ) a : List[str] = collections.defaultdict(A_ ) a : Dict = collections.defaultdict(A_ ) # Let's lookup through all transformers object (once). for attr_name in dir(A_ ): a : Optional[Any] = None if attr_name.endswith("Tokenizer" ): a : Optional[Any] = slow_tokenizers a : int = attr_name[:-9] elif attr_name.endswith("TokenizerFast" ): a : Dict = fast_tokenizers a : Optional[Any] = attr_name[:-13] elif _re_tf_models.match(A_ ) is not None: a : Union[str, Any] = tf_models a : List[Any] = _re_tf_models.match(A_ ).groups()[0] elif _re_flax_models.match(A_ ) is not None: a : Union[str, Any] = flax_models a : str = _re_flax_models.match(A_ ).groups()[0] elif _re_pt_models.match(A_ ) is not None: a : Optional[Any] = pt_models a : Any = _re_pt_models.match(A_ ).groups()[0] if lookup_dict is not None: while len(A_ ) > 0: if attr_name in model_name_to_prefix.values(): a : Optional[Any] = True break # Try again after removing the last word in the name a : Dict = "".join(camel_case_split(A_ )[:-1] ) # Let's build that table! a : List[Any] = list(model_name_to_config.keys() ) model_names.sort(key=str.lower ) a : int = ["Model", "Tokenizer slow", "Tokenizer fast", "PyTorch support", "TensorFlow support", "Flax Support"] # We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side). a : Union[str, Any] = [len(A_ ) + 2 for c in columns] a : Tuple = max([len(A_ ) for name in model_names] ) + 2 # Build the table per se a : Union[str, Any] = "|" + "|".join([_center_text(A_ , A_ ) for c, w in zip(A_ , A_ )] ) + "|\n" # Use ":-----:" format to center-aligned table cell texts table += "|" + "|".join([":" + "-" * (w - 2) + ":" for w in widths] ) + "|\n" a : Optional[int] = {True: "✅", False: "❌"} for name in model_names: a : List[Any] = model_name_to_prefix[name] a : Union[str, Any] = [ name, check[slow_tokenizers[prefix]], check[fast_tokenizers[prefix]], check[pt_models[prefix]], check[tf_models[prefix]], check[flax_models[prefix]], ] table += "|" + "|".join([_center_text(A_ , A_ ) for l, w in zip(A_ , A_ )] ) + "|\n" return table def lowercase ( A_=False )-> Optional[int]: '''simple docstring''' a , a , a , a : Any = _find_text_in_file( filename=os.path.join(A_ , "index.md" ) , start_prompt="<!--This table is updated automatically from the auto modules" , end_prompt="<!-- End table-->" , ) a : str = get_model_table_from_auto_modules() if current_table != new_table: if overwrite: with open(os.path.join(A_ , "index.md" ) , "w" , encoding="utf-8" , newline="\n" ) as f: f.writelines(lines[:start_index] + [new_table] + lines[end_index:] ) else: raise ValueError( "The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this." ) if __name__ == "__main__": __lowercase = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") __lowercase = parser.parse_args() check_model_table(args.fix_and_overwrite)
40
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __lowercase = logging.get_logger(__name__) __lowercase = { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json""" ), """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json""" ), """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json""" ), } class _A ( _a ): """simple docstring""" UpperCAmelCase : int = """dpr""" def __init__( self : List[Any] , __UpperCAmelCase : int=30522 , __UpperCAmelCase : Union[str, Any]=768 , __UpperCAmelCase : Dict=12 , __UpperCAmelCase : List[str]=12 , __UpperCAmelCase : Any=3072 , __UpperCAmelCase : Optional[int]="gelu" , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : str=512 , __UpperCAmelCase : List[str]=2 , __UpperCAmelCase : Tuple=0.02 , __UpperCAmelCase : List[str]=1e-12 , __UpperCAmelCase : List[str]=0 , __UpperCAmelCase : str="absolute" , __UpperCAmelCase : int = 0 , **__UpperCAmelCase : Tuple , ): super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase) a : List[Any] = vocab_size a : Optional[Any] = hidden_size a : Union[str, Any] = num_hidden_layers a : Dict = num_attention_heads a : int = hidden_act a : Any = intermediate_size a : Any = hidden_dropout_prob a : Dict = attention_probs_dropout_prob a : Any = max_position_embeddings a : Union[str, Any] = type_vocab_size a : Optional[Any] = initializer_range a : Dict = layer_norm_eps a : int = projection_dim a : str = position_embedding_type
40
1
"""simple docstring""" import warnings from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch from ...models import UNetaDModel from ...schedulers import RePaintScheduler from ...utils import PIL_INTERPOLATION, logging, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput __lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name def lowercase ( A_ )-> List[Any]: '''simple docstring''' warnings.warn( "The preprocess method is deprecated and will be removed in a future version. Please" " use VaeImageProcessor.preprocess instead" , A_ , ) if isinstance(A_ , torch.Tensor ): return image elif isinstance(A_ , PIL.Image.Image ): a : Optional[Any] = [image] if isinstance(image[0] , PIL.Image.Image ): a , a : List[str] = image[0].size a , a : List[Any] = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 a : str = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image] a : Optional[int] = np.concatenate(A_ , axis=0 ) a : Tuple = np.array(A_ ).astype(np.floataa ) / 2_5_5.0 a : Any = image.transpose(0 , 3 , 1 , 2 ) a : Any = 2.0 * image - 1.0 a : Union[str, Any] = torch.from_numpy(A_ ) elif isinstance(image[0] , torch.Tensor ): a : int = torch.cat(A_ , dim=0 ) return image def lowercase ( A_ )-> str: '''simple docstring''' if isinstance(A_ , torch.Tensor ): return mask elif isinstance(A_ , PIL.Image.Image ): a : Dict = [mask] if isinstance(mask[0] , PIL.Image.Image ): a , a : str = mask[0].size a , a : Any = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 a : List[str] = [np.array(m.convert("L" ).resize((w, h) , resample=PIL_INTERPOLATION["nearest"] ) )[None, :] for m in mask] a : List[Any] = np.concatenate(A_ , axis=0 ) a : Optional[int] = mask.astype(np.floataa ) / 2_5_5.0 a : Optional[Any] = 0 a : Dict = 1 a : List[str] = torch.from_numpy(A_ ) elif isinstance(mask[0] , torch.Tensor ): a : int = torch.cat(A_ , dim=0 ) return mask class _A ( _a ): """simple docstring""" UpperCAmelCase : UNetaDModel UpperCAmelCase : RePaintScheduler def __init__( self : int , __UpperCAmelCase : str , __UpperCAmelCase : Dict): super().__init__() self.register_modules(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase) @torch.no_grad() def __call__( self : List[str] , __UpperCAmelCase : Union[torch.Tensor, PIL.Image.Image] , __UpperCAmelCase : Union[torch.Tensor, PIL.Image.Image] , __UpperCAmelCase : int = 250 , __UpperCAmelCase : float = 0.0 , __UpperCAmelCase : int = 10 , __UpperCAmelCase : int = 10 , __UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCAmelCase : Optional[str] = "pil" , __UpperCAmelCase : bool = True , ): a : str = image a : List[Any] = _preprocess_image(__UpperCAmelCase) a : Tuple = original_image.to(device=self.device , dtype=self.unet.dtype) a : Optional[Any] = _preprocess_mask(__UpperCAmelCase) a : Union[str, Any] = mask_image.to(device=self.device , dtype=self.unet.dtype) a : List[str] = original_image.shape[0] # sample gaussian noise to begin the loop if isinstance(__UpperCAmelCase , __UpperCAmelCase) and len(__UpperCAmelCase) != batch_size: raise ValueError( f'''You have passed a list of generators of length {len(__UpperCAmelCase)}, but requested an effective batch''' f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''') a : Any = original_image.shape a : List[str] = randn_tensor(__UpperCAmelCase , generator=__UpperCAmelCase , device=self.device , dtype=self.unet.dtype) # set step values self.scheduler.set_timesteps(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , self.device) a : List[str] = eta a : List[Any] = self.scheduler.timesteps[0] + 1 a : Union[str, Any] = generator[0] if isinstance(__UpperCAmelCase , __UpperCAmelCase) else generator for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)): if t < t_last: # predict the noise residual a : Tuple = self.unet(__UpperCAmelCase , __UpperCAmelCase).sample # compute previous image: x_t -> x_t-1 a : Dict = self.scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase).prev_sample else: # compute the reverse: x_t-1 -> x_t a : str = self.scheduler.undo_step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) a : Any = t a : Dict = (image / 2 + 0.5).clamp(0 , 1) a : Dict = image.cpu().permute(0 , 2 , 3 , 1).numpy() if output_type == "pil": a : Tuple = self.numpy_to_pil(__UpperCAmelCase) if not return_dict: return (image,) return ImagePipelineOutput(images=__UpperCAmelCase)
40
"""simple docstring""" class _A : """simple docstring""" def __init__( self : int , __UpperCAmelCase : int): a : Tuple = size a : Dict = [0] * size a : Optional[int] = [0] * size @staticmethod def __snake_case ( __UpperCAmelCase : int): return index | (index + 1) @staticmethod def __snake_case ( __UpperCAmelCase : int): return (index & (index + 1)) - 1 def __snake_case ( self : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : int): a : Union[str, Any] = value while index < self.size: a : Dict = self.get_prev(__UpperCAmelCase) + 1 if current_left_border == index: a : Optional[int] = value else: a : Any = max(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) a : Optional[int] = self.get_next(__UpperCAmelCase) def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : int): right -= 1 # Because of right is exclusive a : List[str] = 0 while left <= right: a : Dict = self.get_prev(__UpperCAmelCase) if left <= current_left: a : Optional[int] = max(__UpperCAmelCase , self.tree[right]) a : Optional[Any] = current_left else: a : List[str] = max(__UpperCAmelCase , self.arr[right]) right -= 1 return result if __name__ == "__main__": import doctest doctest.testmod()
40
1
"""simple docstring""" import unittest import numpy as np from transformers import is_flax_available from transformers.testing_utils import require_flax from ..test_modeling_flax_common import ids_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.generation import ( FlaxForcedBOSTokenLogitsProcessor, FlaxForcedEOSTokenLogitsProcessor, FlaxLogitsProcessorList, FlaxMinLengthLogitsProcessor, FlaxTemperatureLogitsWarper, FlaxTopKLogitsWarper, FlaxTopPLogitsWarper, ) @require_flax class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : int): a : str = jnp.ones((batch_size, length)) / length return scores def __snake_case ( self : Optional[Any]): a : Optional[int] = None a : Optional[Any] = 20 a : Optional[int] = self._get_uniform_logits(batch_size=2 , length=__UpperCAmelCase) # tweak scores to not be uniform anymore a : int = scores.at[1, 5].set((1 / length) + 0.1) # peak, 1st batch a : Union[str, Any] = scores.at[1, 10].set((1 / length) - 0.4) # valley, 1st batch # compute softmax a : Dict = jax.nn.softmax(__UpperCAmelCase , axis=-1) a : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5) a : Dict = FlaxTemperatureLogitsWarper(temperature=1.3) a : Any = jax.nn.softmax(temp_dist_warper_sharper(__UpperCAmelCase , scores.copy() , cur_len=__UpperCAmelCase) , axis=-1) a : Any = jax.nn.softmax(temp_dist_warper_smoother(__UpperCAmelCase , scores.copy() , cur_len=__UpperCAmelCase) , axis=-1) # uniform distribution stays uniform self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3)) self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3)) # sharp peaks get higher, valleys get lower self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max()) self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min()) # smooth peaks get lower, valleys get higher self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max()) self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min()) def __snake_case ( self : List[str]): a : Optional[int] = None a : Dict = 10 a : str = 2 # create ramp distribution a : Union[str, Any] = np.broadcast_to(np.arange(__UpperCAmelCase)[None, :] , (batch_size, vocab_size)).copy() a : Optional[Any] = ramp_logits[1:, : vocab_size // 2] + vocab_size a : str = FlaxTopKLogitsWarper(3) a : Union[str, Any] = top_k_warp(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase) # check that correct tokens are filtered self.assertListEqual(jnp.isinf(scores[0]).tolist() , 7 * [True] + 3 * [False]) self.assertListEqual(jnp.isinf(scores[1]).tolist() , 2 * [True] + 3 * [False] + 5 * [True]) # check special case a : List[Any] = 5 a : List[Any] = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3) a : Dict = np.broadcast_to(np.arange(__UpperCAmelCase)[None, :] , (batch_size, length)).copy() a : Any = top_k_warp_safety_check(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase) # min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified self.assertListEqual((scores == 0.0).sum(axis=-1).tolist() , [2, 2]) def __snake_case ( self : Tuple): a : List[Any] = None a : int = 10 a : Dict = 2 # create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper) a : List[Any] = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]])) a : List[Any] = FlaxTopPLogitsWarper(0.8) a : List[str] = np.exp(top_p_warp(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase)) # dist should be filtered to keep min num values so that sum is >= top_p # exp (-inf) => 0 a : str = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]]) self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3)) # check edge cases with negative and extreme logits a : int = np.broadcast_to(np.arange(__UpperCAmelCase)[None, :] , (batch_size, vocab_size)).copy() - ( vocab_size // 2 ) # make ramp_logits more extreme a : Optional[int] = ramp_logits[1] * 100.0 # make sure at least 2 tokens are kept a : int = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0) a : str = top_p_warp(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase) # first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2. self.assertListEqual((filtered_dist != 0.0).sum(axis=-1).tolist() , [3, 2]) def __snake_case ( self : List[str]): a : Union[str, Any] = 20 a : Any = 4 a : Any = 0 a : List[str] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__UpperCAmelCase) # check that min length is applied at length 5 a : Dict = ids_tensor((batch_size, 20) , vocab_size=20) a : Any = 5 a : List[str] = self._get_uniform_logits(__UpperCAmelCase , __UpperCAmelCase) a : Optional[int] = min_dist_processor(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase) self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("inf")]) # check that min length is not applied anymore at length 15 a : Any = self._get_uniform_logits(__UpperCAmelCase , __UpperCAmelCase) a : Optional[Any] = 15 a : Dict = min_dist_processor(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase) self.assertFalse(jnp.isinf(__UpperCAmelCase).any()) def __snake_case ( self : str): a : Dict = 20 a : Any = 4 a : str = 0 a : Optional[int] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__UpperCAmelCase) # check that all scores are -inf except the bos_token_id score a : Dict = ids_tensor((batch_size, 1) , vocab_size=20) a : List[Any] = 1 a : List[Any] = self._get_uniform_logits(__UpperCAmelCase , __UpperCAmelCase) a : int = logits_processor(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase) self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :]).all()) self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0]) # score for bos_token_id shold be zero # check that bos_token_id is not forced if current length is greater than 1 a : Optional[Any] = 3 a : List[Any] = self._get_uniform_logits(__UpperCAmelCase , __UpperCAmelCase) a : Optional[Any] = logits_processor(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase) self.assertFalse(jnp.isinf(__UpperCAmelCase).any()) def __snake_case ( self : Dict): a : List[Any] = 20 a : Tuple = 4 a : List[Any] = 0 a : str = 5 a : List[str] = FlaxForcedEOSTokenLogitsProcessor(max_length=__UpperCAmelCase , eos_token_id=__UpperCAmelCase) # check that all scores are -inf except the eos_token_id when max_length is reached a : Union[str, Any] = ids_tensor((batch_size, 4) , vocab_size=20) a : Optional[int] = 4 a : Union[str, Any] = self._get_uniform_logits(__UpperCAmelCase , __UpperCAmelCase) a : str = logits_processor(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase) self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :]).all()) self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0]) # score for eos_token_id should be zero # check that eos_token_id is not forced if max_length is not reached a : Any = 3 a : List[Any] = self._get_uniform_logits(__UpperCAmelCase , __UpperCAmelCase) a : List[Any] = logits_processor(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase) self.assertFalse(jnp.isinf(__UpperCAmelCase).any()) def __snake_case ( self : Optional[Any]): a : Optional[Any] = 4 a : int = 10 a : int = 15 a : int = 2 a : List[str] = 1 a : Dict = 15 # dummy input_ids and scores a : Any = ids_tensor((batch_size, sequence_length) , __UpperCAmelCase) a : List[Any] = input_ids.copy() a : Optional[int] = self._get_uniform_logits(__UpperCAmelCase , __UpperCAmelCase) a : Dict = scores.copy() # instantiate all dist processors a : int = FlaxTemperatureLogitsWarper(temperature=0.5) a : int = FlaxTopKLogitsWarper(3) a : int = FlaxTopPLogitsWarper(0.8) # instantiate all logits processors a : Tuple = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__UpperCAmelCase) a : Any = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__UpperCAmelCase) a : List[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=__UpperCAmelCase , eos_token_id=__UpperCAmelCase) a : Tuple = 10 # no processor list a : str = temp_dist_warp(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase) a : int = top_k_warp(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase) a : Tuple = top_p_warp(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase) a : Optional[Any] = min_dist_proc(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase) a : Dict = bos_dist_proc(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase) a : List[str] = eos_dist_proc(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase) # with processor list a : str = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc]) a : List[Any] = processor(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase) # scores should be equal self.assertTrue(jnp.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3)) # input_ids should never be changed self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist()) def __snake_case ( self : Optional[Any]): a : Dict = 4 a : Any = 10 a : Dict = 15 a : Union[str, Any] = 2 a : Tuple = 1 a : int = 15 # dummy input_ids and scores a : List[str] = ids_tensor((batch_size, sequence_length) , __UpperCAmelCase) a : List[str] = input_ids.copy() a : Union[str, Any] = self._get_uniform_logits(__UpperCAmelCase , __UpperCAmelCase) a : Optional[int] = scores.copy() # instantiate all dist processors a : Tuple = FlaxTemperatureLogitsWarper(temperature=0.5) a : Dict = FlaxTopKLogitsWarper(3) a : List[str] = FlaxTopPLogitsWarper(0.8) # instantiate all logits processors a : Optional[Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__UpperCAmelCase) a : List[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__UpperCAmelCase) a : str = FlaxForcedEOSTokenLogitsProcessor(max_length=__UpperCAmelCase , eos_token_id=__UpperCAmelCase) a : Optional[int] = 10 # no processor list def run_no_processor_list(__UpperCAmelCase : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : Tuple): a : List[str] = temp_dist_warp(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase) a : List[str] = top_k_warp(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase) a : Tuple = top_p_warp(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase) a : int = min_dist_proc(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase) a : int = bos_dist_proc(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase) a : List[Any] = eos_dist_proc(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase) return scores # with processor list def run_processor_list(__UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[Any]): a : Tuple = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc]) a : Dict = processor(__UpperCAmelCase , __UpperCAmelCase , cur_len=__UpperCAmelCase) return scores a : List[Any] = jax.jit(__UpperCAmelCase) a : Union[str, Any] = jax.jit(__UpperCAmelCase) a : List[Any] = jitted_run_no_processor_list(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) a : List[str] = jitted_run_processor_list(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) # scores should be equal self.assertTrue(jnp.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3)) # input_ids should never be changed self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist())
40
"""simple docstring""" import unittest from knapsack import knapsack as k class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : List[Any]): a : str = 0 a : Optional[int] = [0] a : Union[str, Any] = [0] a : Any = len(__UpperCAmelCase) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 0) a : List[str] = [60] a : str = [10] a : Optional[int] = len(__UpperCAmelCase) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 0) def __snake_case ( self : Optional[int]): a : Any = 3 a : str = [1, 2, 3] a : Tuple = [3, 2, 1] a : Any = len(__UpperCAmelCase) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 5) def __snake_case ( self : Tuple): a : int = 50 a : List[Any] = [60, 100, 120] a : Optional[int] = [10, 20, 30] a : str = len(__UpperCAmelCase) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 220) if __name__ == "__main__": unittest.main()
40
1
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import OwlViTImageProcessor, OwlViTProcessor @require_vision class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : List[str]): a : Dict = tempfile.mkdtemp() # fmt: off a : Any = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: on a : List[Any] = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase)))) a : Optional[int] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""] a : Optional[int] = {"unk_token": "<unk>"} a : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"]) a : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"]) with open(self.vocab_file , "w" , encoding="utf-8") as fp: fp.write(json.dumps(__UpperCAmelCase) + "\n") with open(self.merges_file , "w" , encoding="utf-8") as fp: fp.write("\n".join(__UpperCAmelCase)) a : Optional[Any] = { "do_resize": True, "size": 20, "do_center_crop": True, "crop_size": 18, "do_normalize": True, "image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073], "image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711], } a : int = os.path.join(self.tmpdirname , __UpperCAmelCase) with open(self.image_processor_file , "w" , encoding="utf-8") as fp: json.dump(__UpperCAmelCase , __UpperCAmelCase) def __snake_case ( self : int , **__UpperCAmelCase : int): return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="!" , **__UpperCAmelCase) def __snake_case ( self : Optional[int] , **__UpperCAmelCase : Optional[Any]): return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="!" , **__UpperCAmelCase) def __snake_case ( self : str , **__UpperCAmelCase : List[Any]): return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase) def __snake_case ( self : Tuple): shutil.rmtree(self.tmpdirname) def __snake_case ( self : Union[str, Any]): a : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)] a : Optional[int] = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1)) for x in image_inputs] return image_inputs def __snake_case ( self : Optional[Any]): a : Any = self.get_tokenizer() a : Any = self.get_rust_tokenizer() a : str = self.get_image_processor() a : Dict = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase) processor_slow.save_pretrained(self.tmpdirname) a : Optional[Any] = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__UpperCAmelCase) a : str = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase) processor_fast.save_pretrained(self.tmpdirname) a : Tuple = OwlViTProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab()) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab()) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab()) self.assertIsInstance(processor_slow.tokenizer , __UpperCAmelCase) self.assertIsInstance(processor_fast.tokenizer , __UpperCAmelCase) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string()) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string()) self.assertIsInstance(processor_slow.image_processor , __UpperCAmelCase) self.assertIsInstance(processor_fast.image_processor , __UpperCAmelCase) def __snake_case ( self : Optional[Any]): a : int = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor()) processor.save_pretrained(self.tmpdirname) a : Tuple = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)") a : Optional[int] = self.get_image_processor(do_normalize=__UpperCAmelCase) a : Optional[Any] = OwlViTProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__UpperCAmelCase) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer , __UpperCAmelCase) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor , __UpperCAmelCase) def __snake_case ( self : Union[str, Any]): a : Any = self.get_image_processor() a : Dict = self.get_tokenizer() a : Optional[int] = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase) a : Optional[Any] = self.prepare_image_inputs() a : Dict = image_processor(__UpperCAmelCase , return_tensors="np") a : str = processor(images=__UpperCAmelCase , return_tensors="np") for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2) def __snake_case ( self : int): a : int = self.get_image_processor() a : Dict = self.get_tokenizer() a : Optional[int] = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase) a : Tuple = "lower newer" a : Tuple = processor(text=__UpperCAmelCase , return_tensors="np") a : Tuple = tokenizer(__UpperCAmelCase , return_tensors="np") for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist()) def __snake_case ( self : str): a : Any = self.get_image_processor() a : str = self.get_tokenizer() a : Any = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase) a : int = "lower newer" a : int = self.prepare_image_inputs() a : Optional[int] = processor(text=__UpperCAmelCase , images=__UpperCAmelCase) self.assertListEqual(list(inputs.keys()) , ["input_ids", "attention_mask", "pixel_values"]) # test if it raises when no input is passed with pytest.raises(__UpperCAmelCase): processor() def __snake_case ( self : Optional[Any]): a : Tuple = "google/owlvit-base-patch32" a : Optional[Any] = OwlViTProcessor.from_pretrained(__UpperCAmelCase) a : Dict = ["cat", "nasa badge"] a : Union[str, Any] = processor(text=__UpperCAmelCase) a : Optional[Any] = 16 self.assertListEqual(list(inputs.keys()) , ["input_ids", "attention_mask"]) self.assertEqual(inputs["input_ids"].shape , (2, seq_length)) # test if it raises when no input is passed with pytest.raises(__UpperCAmelCase): processor() def __snake_case ( self : Optional[int]): a : Dict = "google/owlvit-base-patch32" a : Tuple = OwlViTProcessor.from_pretrained(__UpperCAmelCase) a : Optional[Any] = [["cat", "nasa badge"], ["person"]] a : Tuple = processor(text=__UpperCAmelCase) a : Any = 16 a : Any = len(__UpperCAmelCase) a : Optional[Any] = max([len(__UpperCAmelCase) for texts in input_texts]) self.assertListEqual(list(inputs.keys()) , ["input_ids", "attention_mask"]) self.assertEqual(inputs["input_ids"].shape , (batch_size * num_max_text_queries, seq_length)) # test if it raises when no input is passed with pytest.raises(__UpperCAmelCase): processor() def __snake_case ( self : Optional[Any]): a : Optional[int] = "google/owlvit-base-patch32" a : Tuple = OwlViTProcessor.from_pretrained(__UpperCAmelCase) a : str = ["cat", "nasa badge"] a : Union[str, Any] = processor(text=__UpperCAmelCase) a : Optional[Any] = 16 a : Dict = inputs["input_ids"] a : List[str] = [ [49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] self.assertListEqual(list(inputs.keys()) , ["input_ids", "attention_mask"]) self.assertEqual(inputs["input_ids"].shape , (2, seq_length)) self.assertListEqual(list(input_ids[0]) , predicted_ids[0]) self.assertListEqual(list(input_ids[1]) , predicted_ids[1]) def __snake_case ( self : Any): a : Tuple = self.get_image_processor() a : Union[str, Any] = self.get_tokenizer() a : str = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase) a : str = self.prepare_image_inputs() a : Optional[int] = self.prepare_image_inputs() a : Any = processor(images=__UpperCAmelCase , query_images=__UpperCAmelCase) self.assertListEqual(list(inputs.keys()) , ["query_pixel_values", "pixel_values"]) # test if it raises when no input is passed with pytest.raises(__UpperCAmelCase): processor() def __snake_case ( self : Union[str, Any]): a : int = self.get_image_processor() a : Optional[Any] = self.get_tokenizer() a : List[str] = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase) a : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] a : Any = processor.batch_decode(__UpperCAmelCase) a : List[str] = tokenizer.batch_decode(__UpperCAmelCase) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase)
40
"""simple docstring""" import os import unittest from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _A ( _a ,unittest.TestCase ): """simple docstring""" UpperCAmelCase : str = LayoutLMTokenizer UpperCAmelCase : int = LayoutLMTokenizerFast UpperCAmelCase : Union[str, Any] = True UpperCAmelCase : Optional[Any] = True def __snake_case ( self : Optional[int]): super().setUp() a : Tuple = [ "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] a : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) def __snake_case ( self : Optional[int] , **__UpperCAmelCase : Tuple): return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase) def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : str): a : Tuple = "UNwant\u00E9d,running" a : Dict = "unwanted, running" return input_text, output_text def __snake_case ( self : Any): a : List[Any] = self.tokenizer_class(self.vocab_file) a : str = tokenizer.tokenize("UNwant\u00E9d,running") self.assertListEqual(__UpperCAmelCase , ["un", "##want", "##ed", ",", "runn", "##ing"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase) , [7, 4, 5, 10, 8, 9]) def __snake_case ( self : Dict): pass
40
1
"""simple docstring""" import gc import unittest from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline from transformers.pipelines import PipelineException from transformers.testing_utils import ( is_pipeline_test, is_torch_available, nested_simplify, require_tf, require_torch, require_torch_gpu, slow, ) from .test_pipelines_common import ANY @is_pipeline_test class _A ( unittest.TestCase ): """simple docstring""" UpperCAmelCase : str = MODEL_FOR_MASKED_LM_MAPPING UpperCAmelCase : Any = TF_MODEL_FOR_MASKED_LM_MAPPING def __snake_case ( self : Tuple): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() if is_torch_available(): import torch torch.cuda.empty_cache() @require_tf def __snake_case ( self : Optional[int]): a : Any = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf") a : Any = unmasker("My name is <mask>") self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=6) , [ {"sequence": "My name is grouped", "score": 2.1e-05, "token": 38015, "token_str": " grouped"}, {"sequence": "My name is accuser", "score": 2.1e-05, "token": 25506, "token_str": " accuser"}, ] , ) a : Any = unmasker("The largest city in France is <mask>") self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=6) , [ { "sequence": "The largest city in France is grouped", "score": 2.1e-05, "token": 38015, "token_str": " grouped", }, { "sequence": "The largest city in France is accuser", "score": 2.1e-05, "token": 25506, "token_str": " accuser", }, ] , ) a : Dict = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=6) , [ {"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"}, {"sequence": "My name is Patrick", "score": 2e-05, "token": 3499, "token_str": " Patrick"}, {"sequence": "My name is Te", "score": 1.9e-05, "token": 2941, "token_str": " Te"}, ] , ) @require_torch def __snake_case ( self : Optional[int]): a : str = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt") a : Optional[Any] = unmasker("My name is <mask>") self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=6) , [ {"sequence": "My name is Maul", "score": 2.2e-05, "token": 35676, "token_str": " Maul"}, {"sequence": "My name isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"}, ] , ) a : Dict = unmasker("The largest city in France is <mask>") self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=6) , [ { "sequence": "The largest city in France is Maul", "score": 2.2e-05, "token": 35676, "token_str": " Maul", }, {"sequence": "The largest city in France isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"}, ] , ) a : Optional[Any] = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=6) , [ {"sequence": "My name is Patrick", "score": 2.1e-05, "token": 3499, "token_str": " Patrick"}, {"sequence": "My name is Te", "score": 2e-05, "token": 2941, "token_str": " Te"}, {"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"}, ] , ) a : Optional[int] = unmasker("My name is <mask> <mask>" , top_k=2) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=6) , [ [ { "score": 2.2e-05, "token": 35676, "token_str": " Maul", "sequence": "<s>My name is Maul<mask></s>", }, {"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"}, ], [ { "score": 2.2e-05, "token": 35676, "token_str": " Maul", "sequence": "<s>My name is<mask> Maul</s>", }, {"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"}, ], ] , ) @require_torch_gpu def __snake_case ( self : int): a : int = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt") # convert model to fp16 pipe.model.half() a : Tuple = pipe("Paris is the [MASK] of France.") # We actually don't care about the result, we just want to make sure # it works, meaning the float16 tensor got casted back to float32 # for postprocessing. self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase) @slow @require_torch def __snake_case ( self : Dict): a : Any = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt") self.run_large_test(__UpperCAmelCase) @slow @require_tf def __snake_case ( self : Optional[int]): a : List[Any] = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf") self.run_large_test(__UpperCAmelCase) def __snake_case ( self : int , __UpperCAmelCase : Optional[int]): a : Tuple = unmasker("My name is <mask>") self.assertEqual( nested_simplify(__UpperCAmelCase) , [ {"sequence": "My name is John", "score": 0.008, "token": 610, "token_str": " John"}, {"sequence": "My name is Chris", "score": 0.007, "token": 1573, "token_str": " Chris"}, ] , ) a : List[str] = unmasker("The largest city in France is <mask>") self.assertEqual( nested_simplify(__UpperCAmelCase) , [ { "sequence": "The largest city in France is Paris", "score": 0.251, "token": 2201, "token_str": " Paris", }, { "sequence": "The largest city in France is Lyon", "score": 0.214, "token": 12790, "token_str": " Lyon", }, ] , ) a : int = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3) self.assertEqual( nested_simplify(__UpperCAmelCase) , [ {"sequence": "My name is Patrick", "score": 0.005, "token": 3499, "token_str": " Patrick"}, {"sequence": "My name is Clara", "score": 0.000, "token": 13606, "token_str": " Clara"}, {"sequence": "My name is Te", "score": 0.000, "token": 2941, "token_str": " Te"}, ] , ) @require_torch def __snake_case ( self : Union[str, Any]): a : Any = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt") a : Dict = None a : str = None self.run_pipeline_test(__UpperCAmelCase , []) @require_tf def __snake_case ( self : Tuple): a : Optional[int] = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf") a : Tuple = None a : Optional[Any] = None self.run_pipeline_test(__UpperCAmelCase , []) def __snake_case ( self : Dict , __UpperCAmelCase : Tuple , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any]): if tokenizer is None or tokenizer.mask_token_id is None: self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)") a : Optional[Any] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase) a : Dict = [ f'''This is another {tokenizer.mask_token} test''', ] return fill_masker, examples def __snake_case ( self : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any]): a : Union[str, Any] = fill_masker.tokenizer a : Union[str, Any] = fill_masker.model a : Union[str, Any] = fill_masker( f'''This is a {tokenizer.mask_token}''' , ) self.assertEqual( __UpperCAmelCase , [ {"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)}, {"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)}, {"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)}, {"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)}, {"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)}, ] , ) a : List[Any] = fill_masker([f'''This is a {tokenizer.mask_token}''']) self.assertEqual( __UpperCAmelCase , [ {"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)}, {"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)}, {"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)}, {"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)}, {"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)}, ] , ) a : List[str] = fill_masker([f'''This is a {tokenizer.mask_token}''', f'''Another {tokenizer.mask_token} great test.''']) self.assertEqual( __UpperCAmelCase , [ [ {"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)}, {"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)}, {"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)}, {"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)}, {"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)}, ], [ {"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)}, {"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)}, {"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)}, {"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)}, {"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)}, ], ] , ) with self.assertRaises(__UpperCAmelCase): fill_masker([None]) # No mask_token is not supported with self.assertRaises(__UpperCAmelCase): fill_masker("This is") self.run_test_top_k(__UpperCAmelCase , __UpperCAmelCase) self.run_test_targets(__UpperCAmelCase , __UpperCAmelCase) self.run_test_top_k_targets(__UpperCAmelCase , __UpperCAmelCase) self.fill_mask_with_duplicate_targets_and_top_k(__UpperCAmelCase , __UpperCAmelCase) self.fill_mask_with_multiple_masks(__UpperCAmelCase , __UpperCAmelCase) def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : int): a : str = tokenizer.get_vocab() a : Any = sorted(vocab.keys())[:2] # Pipeline argument a : List[str] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , targets=__UpperCAmelCase) a : Optional[int] = fill_masker(f'''This is a {tokenizer.mask_token}''') self.assertEqual( __UpperCAmelCase , [ {"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)}, {"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)}, ] , ) a : int = {vocab[el] for el in targets} self.assertEqual({el["token"] for el in outputs} , __UpperCAmelCase) a : Tuple = [tokenizer.decode([x]) for x in target_ids] self.assertEqual({el["token_str"] for el in outputs} , set(__UpperCAmelCase)) # Call argument a : Union[str, Any] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase) a : str = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=__UpperCAmelCase) self.assertEqual( __UpperCAmelCase , [ {"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)}, {"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)}, ] , ) a : Any = {vocab[el] for el in targets} self.assertEqual({el["token"] for el in outputs} , __UpperCAmelCase) a : List[Any] = [tokenizer.decode([x]) for x in target_ids] self.assertEqual({el["token_str"] for el in outputs} , set(__UpperCAmelCase)) # Score equivalence a : Any = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=__UpperCAmelCase) a : Optional[Any] = [top_mask["token_str"] for top_mask in outputs] a : List[Any] = [top_mask["score"] for top_mask in outputs] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(__UpperCAmelCase) == set(__UpperCAmelCase): a : int = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=__UpperCAmelCase) a : List[str] = [top_mask["score"] for top_mask in unmasked_targets] self.assertEqual(nested_simplify(__UpperCAmelCase) , nested_simplify(__UpperCAmelCase)) # Raises with invalid with self.assertRaises(__UpperCAmelCase): a : int = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=[]) # For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised if "" not in tokenizer.get_vocab(): with self.assertRaises(__UpperCAmelCase): a : List[Any] = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=[""]) with self.assertRaises(__UpperCAmelCase): a : Union[str, Any] = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets="") def __snake_case ( self : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str): a : List[str] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , top_k=2) a : str = fill_masker(f'''This is a {tokenizer.mask_token}''') self.assertEqual( __UpperCAmelCase , [ {"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)}, {"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)}, ] , ) a : Union[str, Any] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase) a : Optional[int] = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=2) self.assertEqual( __UpperCAmelCase , [ {"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)}, {"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)}, ] , ) self.assertEqual(nested_simplify(__UpperCAmelCase) , nested_simplify(__UpperCAmelCase)) def __snake_case ( self : Any , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : str): a : Tuple = tokenizer.get_vocab() a : List[str] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase) # top_k=2, ntargets=3 a : Dict = sorted(vocab.keys())[:3] a : int = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=2 , targets=__UpperCAmelCase) # If we use the most probably targets, and filter differently, we should still # have the same results a : Union[str, Any] = [el["token_str"] for el in sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase: x["score"] , reverse=__UpperCAmelCase)] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(__UpperCAmelCase).issubset(__UpperCAmelCase): a : Tuple = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=3 , targets=__UpperCAmelCase) # They should yield exactly the same result self.assertEqual(nested_simplify(__UpperCAmelCase) , nested_simplify(__UpperCAmelCase)) def __snake_case ( self : int , __UpperCAmelCase : Any , __UpperCAmelCase : Any): a : str = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase) a : str = tokenizer.get_vocab() # String duplicates + id duplicates a : List[Any] = sorted(vocab.keys())[:3] a : Dict = [targets[0], targets[1], targets[0], targets[2], targets[1]] a : Union[str, Any] = fill_masker(f'''My name is {tokenizer.mask_token}''' , targets=__UpperCAmelCase , top_k=10) # The target list contains duplicates, so we can't output more # than them self.assertEqual(len(__UpperCAmelCase) , 3) def __snake_case ( self : Optional[int] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any]): a : Dict = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase) a : Tuple = fill_masker( f'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' , top_k=2) self.assertEqual( __UpperCAmelCase , [ [ {"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)}, {"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)}, ], [ {"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)}, {"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)}, ], [ {"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)}, {"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)}, ], ] , )
40
"""simple docstring""" def lowercase ( A_ )-> str: '''simple docstring''' if isinstance(A_ , A_ ): raise TypeError("'float' object cannot be interpreted as an integer" ) if isinstance(A_ , A_ ): raise TypeError("'str' object cannot be interpreted as an integer" ) if num == 0: return "0b0" a : Optional[Any] = False if num < 0: a : Tuple = True a : str = -num a : list[int] = [] while num > 0: binary.insert(0 , num % 2 ) num >>= 1 if negative: return "-0b" + "".join(str(A_ ) for e in binary ) return "0b" + "".join(str(A_ ) for e in binary ) if __name__ == "__main__": import doctest doctest.testmod()
40
1
"""simple docstring""" import argparse import os import re __lowercase = """src/transformers/models/auto""" # re pattern that matches mapping introductions: # SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict __lowercase = re.compile(R"""[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict""") # re pattern that matches identifiers in mappings __lowercase = re.compile(R"""\s*\(\s*\"(\S[^\"]+)\"""") def lowercase ( A_ , A_ = False )-> Dict: '''simple docstring''' with open(A_ , "r" , encoding="utf-8" ) as f: a : List[str] = f.read() a : Union[str, Any] = content.split("\n" ) a : Any = [] a : Tuple = 0 while line_idx < len(A_ ): if _re_intro_mapping.search(lines[line_idx] ) is not None: a : str = len(re.search(R"^(\s*)\S" , lines[line_idx] ).groups()[0] ) + 8 # Start of a new mapping! while not lines[line_idx].startswith(" " * indent + "(" ): new_lines.append(lines[line_idx] ) line_idx += 1 a : int = [] while lines[line_idx].strip() != "]": # Blocks either fit in one line or not if lines[line_idx].strip() == "(": a : Any = line_idx while not lines[line_idx].startswith(" " * indent + ")" ): line_idx += 1 blocks.append("\n".join(lines[start_idx : line_idx + 1] ) ) else: blocks.append(lines[line_idx] ) line_idx += 1 # Sort blocks by their identifiers a : Tuple = sorted(A_ , key=lambda A_ : _re_identifier.search(A_ ).groups()[0] ) new_lines += blocks else: new_lines.append(lines[line_idx] ) line_idx += 1 if overwrite: with open(A_ , "w" , encoding="utf-8" ) as f: f.write("\n".join(A_ ) ) elif "\n".join(A_ ) != content: return True def lowercase ( A_ = False )-> List[str]: '''simple docstring''' a : Dict = [os.path.join(A_ , A_ ) for f in os.listdir(A_ ) if f.endswith(".py" )] a : int = [sort_auto_mapping(A_ , overwrite=A_ ) for fname in fnames] if not overwrite and any(A_ ): a : Any = [f for f, d in zip(A_ , A_ ) if d] raise ValueError( F'''The following files have auto mappings that need sorting: {", ".join(A_ )}. Run `make style` to fix''' " this." ) if __name__ == "__main__": __lowercase = argparse.ArgumentParser() parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""") __lowercase = parser.parse_args() sort_all_auto_mappings(not args.check_only)
40
"""simple docstring""" from bisect import bisect from itertools import accumulate def lowercase ( A_ , A_ , A_ , A_ )-> Union[str, Any]: '''simple docstring''' a : Any = sorted(zip(A_ , A_ ) , key=lambda A_ : x[0] / x[1] , reverse=A_ ) a , a : int = [i[0] for i in r], [i[1] for i in r] a : Union[str, Any] = list(accumulate(A_ ) ) a : Optional[Any] = bisect(A_ , A_ ) return ( 0 if k == 0 else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k]) if k != n else sum(vl[:k] ) ) if __name__ == "__main__": import doctest doctest.testmod()
40
1
"""simple docstring""" import unittest from knapsack import knapsack as k class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : List[Any]): a : str = 0 a : Optional[int] = [0] a : Union[str, Any] = [0] a : Any = len(__UpperCAmelCase) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 0) a : List[str] = [60] a : str = [10] a : Optional[int] = len(__UpperCAmelCase) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 0) def __snake_case ( self : Optional[int]): a : Any = 3 a : str = [1, 2, 3] a : Tuple = [3, 2, 1] a : Any = len(__UpperCAmelCase) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 5) def __snake_case ( self : Tuple): a : int = 50 a : List[Any] = [60, 100, 120] a : Optional[int] = [10, 20, 30] a : str = len(__UpperCAmelCase) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 220) if __name__ == "__main__": unittest.main()
40
"""simple docstring""" from __future__ import annotations from numpy import array, cos, cross, floataa, radians, sin from numpy.typing import NDArray def lowercase ( A_ , A_ , A_ = False )-> list[float]: '''simple docstring''' if radian_mode: return [magnitude * cos(A_ ), magnitude * sin(A_ )] return [magnitude * cos(radians(A_ ) ), magnitude * sin(radians(A_ ) )] def lowercase ( A_ , A_ , A_ = 10**-1 )-> bool: '''simple docstring''' a : NDArray[floataa] = cross(A_ , A_ ) a : float = sum(A_ ) return abs(A_ ) < eps if __name__ == "__main__": # Test to check if it works __lowercase = array( [ polar_force(7_18.4, 180 - 30), polar_force(8_79.54, 45), polar_force(100, -90), ] ) __lowercase = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem 1 in image_data/2D_problems.jpg __lowercase = array( [ polar_force(30 * 9.81, 15), polar_force(215, 180 - 45), polar_force(264, 90 - 30), ] ) __lowercase = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem in image_data/2D_problems_1.jpg __lowercase = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]]) __lowercase = array([[0, 0], [6, 0], [10, 0], [12, 0]]) assert in_static_equilibrium(forces, location) import doctest doctest.testmod()
40
1
"""simple docstring""" def lowercase ( A_ )-> int: '''simple docstring''' a : int = hex_num.strip() if not hex_num: raise ValueError("No value was passed to the function" ) a : str = hex_num[0] == "-" if is_negative: a : str = hex_num[1:] try: a : int = int(A_ , 16 ) except ValueError: raise ValueError("Invalid value was passed to the function" ) a : str = "" while int_num > 0: a : List[Any] = str(int_num % 2 ) + bin_str int_num >>= 1 return int(("-" + bin_str) if is_negative else bin_str ) if __name__ == "__main__": import doctest doctest.testmod()
40
"""simple docstring""" def lowercase ( A_ , A_ )-> float: '''simple docstring''' if mass < 0: raise ValueError("The mass of a body cannot be negative" ) return 0.5 * mass * abs(A_ ) * abs(A_ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
40
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowercase = {"""configuration_wavlm""": ["""WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WavLMConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """WavLMForAudioFrameClassification""", """WavLMForCTC""", """WavLMForSequenceClassification""", """WavLMForXVector""", """WavLMModel""", """WavLMPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavlm import ( WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST, WavLMForAudioFrameClassification, WavLMForCTC, WavLMForSequenceClassification, WavLMForXVector, WavLMModel, WavLMPreTrainedModel, ) else: import sys __lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
40
"""simple docstring""" import os import sys import unittest __lowercase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path __lowercase = os.path.join(git_repo_path, """src""", """diffusers""") class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : Any): a : List[Any] = find_backend(" if not is_torch_available():") self.assertEqual(__UpperCAmelCase , "torch") # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") a : Dict = find_backend(" if not (is_torch_available() and is_transformers_available()):") self.assertEqual(__UpperCAmelCase , "torch_and_transformers") # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") a : int = find_backend( " if not (is_torch_available() and is_transformers_available() and is_onnx_available()):") self.assertEqual(__UpperCAmelCase , "torch_and_transformers_and_onnx") def __snake_case ( self : Union[str, Any]): a : Dict = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("torch" , __UpperCAmelCase) self.assertIn("torch_and_transformers" , __UpperCAmelCase) self.assertIn("flax_and_transformers" , __UpperCAmelCase) self.assertIn("torch_and_transformers_and_onnx" , __UpperCAmelCase) # Likewise, we can't assert on the exact content of a key self.assertIn("UNet2DModel" , objects["torch"]) self.assertIn("FlaxUNet2DConditionModel" , objects["flax"]) self.assertIn("StableDiffusionPipeline" , objects["torch_and_transformers"]) self.assertIn("FlaxStableDiffusionPipeline" , objects["flax_and_transformers"]) self.assertIn("LMSDiscreteScheduler" , objects["torch_and_scipy"]) self.assertIn("OnnxStableDiffusionPipeline" , objects["torch_and_transformers_and_onnx"]) def __snake_case ( self : Tuple): a : Optional[int] = create_dummy_object("CONSTANT" , "'torch'") self.assertEqual(__UpperCAmelCase , "\nCONSTANT = None\n") a : Dict = create_dummy_object("function" , "'torch'") self.assertEqual( __UpperCAmelCase , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n") a : Optional[Any] = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n" a : int = create_dummy_object("FakeClass" , "'torch'") self.assertEqual(__UpperCAmelCase , __UpperCAmelCase) def __snake_case ( self : List[str]): a : List[str] = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n" a : Tuple = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]}) self.assertEqual(dummy_files["torch"] , __UpperCAmelCase)
40
1
"""simple docstring""" from __future__ import annotations from typing import Any class _A : """simple docstring""" def __init__( self : Optional[int] , __UpperCAmelCase : int = 6): a : Node | None = None a : Node | None = None self.create_linked_list(__UpperCAmelCase) def __snake_case ( self : Optional[int] , __UpperCAmelCase : int): a : Dict = Node() a : List[Any] = current_node a : str = current_node a : Dict = current_node for _ in range(1 , __UpperCAmelCase): a : int = Node() a : Any = current_node a : int = previous_node a : Optional[int] = current_node a : Optional[int] = self.front a : Union[str, Any] = previous_node def __snake_case ( self : str): return ( self.front == self.rear and self.front is not None and self.front.data is None ) def __snake_case ( self : Union[str, Any]): self.check_can_perform_operation() return self.front.data if self.front else None def __snake_case ( self : Optional[Any] , __UpperCAmelCase : Any): if self.rear is None: return self.check_is_full() if not self.is_empty(): a : Optional[int] = self.rear.next if self.rear: a : List[str] = data def __snake_case ( self : List[Any]): self.check_can_perform_operation() if self.rear is None or self.front is None: return None if self.front == self.rear: a : Optional[Any] = self.front.data a : Tuple = None return data a : Optional[Any] = self.front a : Optional[Any] = old_front.next a : Dict = old_front.data a : str = None return data def __snake_case ( self : Any): if self.is_empty(): raise Exception("Empty Queue") def __snake_case ( self : int): if self.rear and self.rear.next == self.front: raise Exception("Full Queue") class _A : """simple docstring""" def __init__( self : Dict): a : Any | None = None a : Node | None = None a : Node | None = None if __name__ == "__main__": import doctest doctest.testmod()
40
"""simple docstring""" __lowercase = { """Pillow""": """Pillow<10.0.0""", """accelerate""": """accelerate>=0.20.3""", """av""": """av==9.2.0""", """beautifulsoup4""": """beautifulsoup4""", """black""": """black~=23.1""", """codecarbon""": """codecarbon==1.2.0""", """cookiecutter""": """cookiecutter==1.7.3""", """dataclasses""": """dataclasses""", """datasets""": """datasets!=2.5.0""", """decord""": """decord==0.6.0""", """deepspeed""": """deepspeed>=0.9.3""", """diffusers""": """diffusers""", """dill""": """dill<0.3.5""", """evaluate""": """evaluate>=0.2.0""", """fairscale""": """fairscale>0.3""", """faiss-cpu""": """faiss-cpu""", """fastapi""": """fastapi""", """filelock""": """filelock""", """flax""": """flax>=0.4.1,<=0.7.0""", """ftfy""": """ftfy""", """fugashi""": """fugashi>=1.0""", """GitPython""": """GitPython<3.1.19""", """hf-doc-builder""": """hf-doc-builder>=0.3.0""", """huggingface-hub""": """huggingface-hub>=0.14.1,<1.0""", """importlib_metadata""": """importlib_metadata""", """ipadic""": """ipadic>=1.0.0,<2.0""", """isort""": """isort>=5.5.4""", """jax""": """jax>=0.2.8,!=0.3.2,<=0.4.13""", """jaxlib""": """jaxlib>=0.1.65,<=0.4.13""", """jieba""": """jieba""", """kenlm""": """kenlm""", """keras-nlp""": """keras-nlp>=0.3.1""", """librosa""": """librosa""", """nltk""": """nltk""", """natten""": """natten>=0.14.6""", """numpy""": """numpy>=1.17""", """onnxconverter-common""": """onnxconverter-common""", """onnxruntime-tools""": """onnxruntime-tools>=1.4.2""", """onnxruntime""": """onnxruntime>=1.4.0""", """opencv-python""": """opencv-python""", """optuna""": """optuna""", """optax""": """optax>=0.0.8,<=0.1.4""", """packaging""": """packaging>=20.0""", """parameterized""": """parameterized""", """phonemizer""": """phonemizer""", """protobuf""": """protobuf""", """psutil""": """psutil""", """pyyaml""": """pyyaml>=5.1""", """pydantic""": """pydantic<2""", """pytest""": """pytest>=7.2.0""", """pytest-timeout""": """pytest-timeout""", """pytest-xdist""": """pytest-xdist""", """python""": """python>=3.8.0""", """ray[tune]""": """ray[tune]""", """regex""": """regex!=2019.12.17""", """requests""": """requests""", """rhoknp""": """rhoknp>=1.1.0,<1.3.1""", """rjieba""": """rjieba""", """rouge-score""": """rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1""", """ruff""": """ruff>=0.0.241,<=0.0.259""", """sacrebleu""": """sacrebleu>=1.4.12,<2.0.0""", """sacremoses""": """sacremoses""", """safetensors""": """safetensors>=0.3.1""", """sagemaker""": """sagemaker>=2.31.0""", """scikit-learn""": """scikit-learn""", """sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""", """sigopt""": """sigopt""", """starlette""": """starlette""", """sudachipy""": """sudachipy>=0.6.6""", """sudachidict_core""": """sudachidict_core>=20220729""", """tensorflow-cpu""": """tensorflow-cpu>=2.6,<2.14""", """tensorflow""": """tensorflow>=2.6,<2.14""", """tensorflow-text""": """tensorflow-text<2.14""", """tf2onnx""": """tf2onnx""", """timeout-decorator""": """timeout-decorator""", """timm""": """timm""", """tokenizers""": """tokenizers>=0.11.1,!=0.11.3,<0.14""", """torch""": """torch>=1.9,!=1.12.0""", """torchaudio""": """torchaudio""", """torchvision""": """torchvision""", """pyctcdecode""": """pyctcdecode>=0.4.0""", """tqdm""": """tqdm>=4.27""", """unidic""": """unidic>=1.0.2""", """unidic_lite""": """unidic_lite>=1.0.7""", """urllib3""": """urllib3<2.0.0""", """uvicorn""": """uvicorn""", }
40
1
"""simple docstring""" from __future__ import annotations def lowercase ( A_ )-> None: '''simple docstring''' create_state_space_tree(A_ , [] , 0 , [0 for i in range(len(A_ ) )] ) def lowercase ( A_ , A_ , A_ , A_ , )-> None: '''simple docstring''' if index == len(A_ ): print(A_ ) return for i in range(len(A_ ) ): if not index_used[i]: current_sequence.append(sequence[i] ) a : Dict = True create_state_space_tree(A_ , A_ , index + 1 , A_ ) current_sequence.pop() a : int = False __lowercase = [3, 1, 2, 4] generate_all_permutations(sequence) __lowercase = ["A", "B", "C"] generate_all_permutations(sequence_a)
40
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tensorflow_text_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowercase = { """configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""], """tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = ["""BertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """BERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """BertForMaskedLM""", """BertForMultipleChoice""", """BertForNextSentencePrediction""", """BertForPreTraining""", """BertForQuestionAnswering""", """BertForSequenceClassification""", """BertForTokenClassification""", """BertLayer""", """BertLMHeadModel""", """BertModel""", """BertPreTrainedModel""", """load_tf_weights_in_bert""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFBertEmbeddings""", """TFBertForMaskedLM""", """TFBertForMultipleChoice""", """TFBertForNextSentencePrediction""", """TFBertForPreTraining""", """TFBertForQuestionAnswering""", """TFBertForSequenceClassification""", """TFBertForTokenClassification""", """TFBertLMHeadModel""", """TFBertMainLayer""", """TFBertModel""", """TFBertPreTrainedModel""", ] try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = ["""TFBertTokenizer"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """FlaxBertForCausalLM""", """FlaxBertForMaskedLM""", """FlaxBertForMultipleChoice""", """FlaxBertForNextSentencePrediction""", """FlaxBertForPreTraining""", """FlaxBertForQuestionAnswering""", """FlaxBertForSequenceClassification""", """FlaxBertForTokenClassification""", """FlaxBertModel""", """FlaxBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_fast import BertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bert import ( BERT_PRETRAINED_MODEL_ARCHIVE_LIST, BertForMaskedLM, BertForMultipleChoice, BertForNextSentencePrediction, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, BertLayer, BertLMHeadModel, BertModel, BertPreTrainedModel, load_tf_weights_in_bert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_bert import ( TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFBertEmbeddings, TFBertForMaskedLM, TFBertForMultipleChoice, TFBertForNextSentencePrediction, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertForTokenClassification, TFBertLMHeadModel, TFBertMainLayer, TFBertModel, TFBertPreTrainedModel, ) try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_tf import TFBertTokenizer try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_bert import ( FlaxBertForCausalLM, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, FlaxBertPreTrainedModel, ) else: import sys __lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
40
1
"""simple docstring""" import argparse import shlex import runhouse as rh if __name__ == "__main__": # Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access # setup instructions, if using on-demand hardware # If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster # If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster # Throw an error if user passes both BYO and on-demand cluster args # Otherwise, use default values __lowercase = argparse.ArgumentParser() parser.add_argument("""--user""", type=str, default="""ubuntu""") parser.add_argument("""--host""", type=str, default="""localhost""") parser.add_argument("""--key_path""", type=str, default=None) parser.add_argument("""--instance""", type=str, default="""V100:1""") parser.add_argument("""--provider""", type=str, default="""cheapest""") parser.add_argument("""--use_spot""", type=bool, default=False) parser.add_argument("""--example""", type=str, default="""pytorch/text-generation/run_generation.py""") __lowercase , __lowercase = parser.parse_known_args() if args.host != "localhost": if args.instance != "V100:1" or args.provider != "cheapest": raise ValueError("""Cannot specify both BYO and on-demand cluster args""") __lowercase = rh.cluster( name="""rh-cluster""", ips=[args.host], ssh_creds={"""ssh_user""": args.user, """ssh_private_key""": args.key_path} ) else: __lowercase = rh.cluster( name="""rh-cluster""", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot ) __lowercase = args.example.rsplit("""/""", 1)[0] # Set up remote environment cluster.install_packages(["""pip:./"""]) # Installs transformers from local source # Note transformers is copied into the home directory on the remote machine, so we can install from there cluster.run([f'''pip install -r transformers/examples/{example_dir}/requirements.txt''']) cluster.run(["""pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"""]) # Run example. You can bypass the CLI wrapper and paste your own code here. cluster.run([f'''python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}''']) # Alternatively, we can just import and run a training function (especially if there's no wrapper CLI): # from my_script... import train # reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard'] # launch_train_gpu = rh.function(fn=train, # system=gpu, # reqs=reqs, # name='train_bert_glue') # # We can pass in arguments just like we would to a function: # launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16 # stream_logs=True)
40
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class _A ( _a ): """simple docstring""" UpperCAmelCase : str = """naver-clova-ix/donut-base-finetuned-docvqa""" UpperCAmelCase : Tuple = ( """This is a tool that answers a question about an document (pdf). It takes an input named `document` which """ """should be the document containing the information, as well as a `question` that is the question about the """ """document. It returns a text that contains the answer to the question.""" ) UpperCAmelCase : List[str] = """document_qa""" UpperCAmelCase : str = AutoProcessor UpperCAmelCase : Optional[int] = VisionEncoderDecoderModel UpperCAmelCase : int = ["""image""", """text"""] UpperCAmelCase : int = ["""text"""] def __init__( self : Tuple , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Any): if not is_vision_available(): raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool.") super().__init__(*__UpperCAmelCase , **__UpperCAmelCase) def __snake_case ( self : Tuple , __UpperCAmelCase : "Image" , __UpperCAmelCase : str): a : Any = "<s_docvqa><s_question>{user_input}</s_question><s_answer>" a : Union[str, Any] = task_prompt.replace("{user_input}" , __UpperCAmelCase) a : Optional[Any] = self.pre_processor.tokenizer( __UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors="pt").input_ids a : Any = self.pre_processor(__UpperCAmelCase , return_tensors="pt").pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def __snake_case ( self : int , __UpperCAmelCase : int): return self.model.generate( inputs["pixel_values"].to(self.device) , decoder_input_ids=inputs["decoder_input_ids"].to(self.device) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__UpperCAmelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__UpperCAmelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__UpperCAmelCase , ).sequences def __snake_case ( self : str , __UpperCAmelCase : List[Any]): a : Union[str, Any] = self.pre_processor.batch_decode(__UpperCAmelCase)[0] a : Optional[Any] = sequence.replace(self.pre_processor.tokenizer.eos_token , "") a : Any = sequence.replace(self.pre_processor.tokenizer.pad_token , "") a : Optional[Any] = re.sub(r"<.*?>" , "" , __UpperCAmelCase , count=1).strip() # remove first task start token a : List[str] = self.pre_processor.tokenajson(__UpperCAmelCase) return sequence["answer"]
40
1
"""simple docstring""" # This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/ import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, DDIMScheduler, StableDiffusionControlNetImgaImgPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() class _A ( _a ,_a ,_a ,unittest.TestCase ): """simple docstring""" UpperCAmelCase : Union[str, Any] = StableDiffusionControlNetImgaImgPipeline UpperCAmelCase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""} UpperCAmelCase : str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS UpperCAmelCase : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"""control_image"""} ) UpperCAmelCase : Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS def __snake_case ( self : Tuple): torch.manual_seed(0) a : Tuple = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) torch.manual_seed(0) a : Dict = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) torch.manual_seed(0) a : Union[str, Any] = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , ) torch.manual_seed(0) a : Tuple = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) torch.manual_seed(0) a : Union[str, Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) a : str = CLIPTextModel(__UpperCAmelCase) a : List[str] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") a : str = { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Dict=0): if str(__UpperCAmelCase).startswith("mps"): a : str = torch.manual_seed(__UpperCAmelCase) else: a : Union[str, Any] = torch.Generator(device=__UpperCAmelCase).manual_seed(__UpperCAmelCase) a : List[Any] = 2 a : List[str] = randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__UpperCAmelCase , device=torch.device(__UpperCAmelCase) , ) a : List[str] = floats_tensor(control_image.shape , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase) a : Dict = image.cpu().permute(0 , 2 , 3 , 1)[0] a : List[Any] = Image.fromarray(np.uinta(__UpperCAmelCase)).convert("RGB").resize((64, 64)) a : Union[str, Any] = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", "image": image, "control_image": control_image, } return inputs def __snake_case ( self : str): return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def __snake_case ( self : Optional[int]): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) def __snake_case ( self : str): self._test_inference_batch_single_identical(expected_max_diff=2e-3) class _A ( _a ,_a ,unittest.TestCase ): """simple docstring""" UpperCAmelCase : Optional[Any] = StableDiffusionControlNetImgaImgPipeline UpperCAmelCase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""} UpperCAmelCase : str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS UpperCAmelCase : List[Any] = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess def __snake_case ( self : Dict): torch.manual_seed(0) a : str = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) torch.manual_seed(0) def init_weights(__UpperCAmelCase : Dict): if isinstance(__UpperCAmelCase , torch.nn.Convad): torch.nn.init.normal(m.weight) m.bias.data.fill_(1.0) a : Union[str, Any] = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(__UpperCAmelCase) torch.manual_seed(0) a : Optional[Any] = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(__UpperCAmelCase) torch.manual_seed(0) a : Optional[Any] = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , ) torch.manual_seed(0) a : Any = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) torch.manual_seed(0) a : Union[str, Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) a : Optional[Any] = CLIPTextModel(__UpperCAmelCase) a : str = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") a : Any = MultiControlNetModel([controlneta, controlneta]) a : str = { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def __snake_case ( self : Tuple , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any]=0): if str(__UpperCAmelCase).startswith("mps"): a : Dict = torch.manual_seed(__UpperCAmelCase) else: a : Dict = torch.Generator(device=__UpperCAmelCase).manual_seed(__UpperCAmelCase) a : List[Any] = 2 a : Optional[int] = [ randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__UpperCAmelCase , device=torch.device(__UpperCAmelCase) , ), randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__UpperCAmelCase , device=torch.device(__UpperCAmelCase) , ), ] a : Optional[int] = floats_tensor(control_image[0].shape , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase) a : str = image.cpu().permute(0 , 2 , 3 , 1)[0] a : List[str] = Image.fromarray(np.uinta(__UpperCAmelCase)).convert("RGB").resize((64, 64)) a : Tuple = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", "image": image, "control_image": control_image, } return inputs def __snake_case ( self : Optional[int]): a : Tuple = self.get_dummy_components() a : Union[str, Any] = self.pipeline_class(**__UpperCAmelCase) pipe.to(__UpperCAmelCase) a : int = 10.0 a : Union[str, Any] = 4 a : List[Any] = self.get_dummy_inputs(__UpperCAmelCase) a : Any = steps a : Tuple = scale a : Dict = pipe(**__UpperCAmelCase)[0] a : Union[str, Any] = self.get_dummy_inputs(__UpperCAmelCase) a : int = steps a : Optional[Any] = scale a : int = pipe(**__UpperCAmelCase , control_guidance_start=0.1 , control_guidance_end=0.2)[0] a : Optional[Any] = self.get_dummy_inputs(__UpperCAmelCase) a : int = steps a : str = scale a : List[str] = pipe(**__UpperCAmelCase , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7])[0] a : str = self.get_dummy_inputs(__UpperCAmelCase) a : Any = steps a : Any = scale a : List[str] = pipe(**__UpperCAmelCase , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8])[0] # make sure that all outputs are different assert np.sum(np.abs(output_a - output_a)) > 1e-3 assert np.sum(np.abs(output_a - output_a)) > 1e-3 assert np.sum(np.abs(output_a - output_a)) > 1e-3 def __snake_case ( self : Tuple): return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def __snake_case ( self : Optional[Any]): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) def __snake_case ( self : List[str]): self._test_inference_batch_single_identical(expected_max_diff=2e-3) def __snake_case ( self : Union[str, Any]): a : List[Any] = self.get_dummy_components() a : Optional[Any] = self.pipeline_class(**__UpperCAmelCase) pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) with tempfile.TemporaryDirectory() as tmpdir: try: # save_pretrained is not implemented for Multi-ControlNet pipe.save_pretrained(__UpperCAmelCase) except NotImplementedError: pass @slow @require_torch_gpu class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : List[str]): super().tearDown() gc.collect() torch.cuda.empty_cache() def __snake_case ( self : Union[str, Any]): a : Optional[int] = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny") a : int = StableDiffusionControlNetImgaImgPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5" , safety_checker=__UpperCAmelCase , controlnet=__UpperCAmelCase) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=__UpperCAmelCase) a : List[str] = torch.Generator(device="cpu").manual_seed(0) a : List[str] = "evil space-punk bird" a : Tuple = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png").resize((512, 512)) a : int = load_image( "https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png").resize((512, 512)) a : Tuple = pipe( __UpperCAmelCase , __UpperCAmelCase , control_image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="np" , num_inference_steps=50 , strength=0.6 , ) a : Optional[Any] = output.images[0] assert image.shape == (512, 512, 3) a : Dict = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy") assert np.abs(expected_image - image).max() < 9e-2
40
"""simple docstring""" from __future__ import annotations class _A : """simple docstring""" def __init__( self : List[str] , __UpperCAmelCase : int = 0): a : Tuple = key def __snake_case ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : int): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) a : Dict = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(__UpperCAmelCase) ^ key) for ch in content] def __snake_case ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : int): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) a : Optional[Any] = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(__UpperCAmelCase) ^ key) for ch in content] def __snake_case ( self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : int = 0): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) a : List[Any] = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned a : Any = "" for ch in content: ans += chr(ord(__UpperCAmelCase) ^ key) return ans def __snake_case ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : int = 0): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) a : Dict = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned a : str = "" for ch in content: ans += chr(ord(__UpperCAmelCase) ^ key) return ans def __snake_case ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : int = 0): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) try: with open(__UpperCAmelCase) as fin, open("encrypt.out" , "w+") as fout: # actual encrypt-process for line in fin: fout.write(self.encrypt_string(__UpperCAmelCase , __UpperCAmelCase)) except OSError: return False return True def __snake_case ( self : Any , __UpperCAmelCase : str , __UpperCAmelCase : int): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) try: with open(__UpperCAmelCase) as fin, open("decrypt.out" , "w+") as fout: # actual encrypt-process for line in fin: fout.write(self.decrypt_string(__UpperCAmelCase , __UpperCAmelCase)) except OSError: return False return True # Tests # crypt = XORCipher() # key = 67 # # test encrypt # print(crypt.encrypt("hallo welt",key)) # # test decrypt # print(crypt.decrypt(crypt.encrypt("hallo welt",key), key)) # # test encrypt_string # print(crypt.encrypt_string("hallo welt",key)) # # test decrypt_string # print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key)) # if (crypt.encrypt_file("test.txt",key)): # print("encrypt successful") # else: # print("encrypt unsuccessful") # if (crypt.decrypt_file("encrypt.out",key)): # print("decrypt successful") # else: # print("decrypt unsuccessful")
40
1
"""simple docstring""" import pandas as pd from matplotlib import pyplot as plt from sklearn.linear_model import LinearRegression # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split # Fitting Polynomial Regression to the dataset from sklearn.preprocessing import PolynomialFeatures # Importing the dataset __lowercase = pd.read_csv( """https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/""" """position_salaries.csv""" ) __lowercase = dataset.iloc[:, 1:2].values __lowercase = dataset.iloc[:, 2].values __lowercase , __lowercase , __lowercase , __lowercase = train_test_split(X, y, test_size=0.2, random_state=0) __lowercase = PolynomialFeatures(degree=4) __lowercase = poly_reg.fit_transform(X) __lowercase = LinearRegression() pol_reg.fit(X_poly, y) def lowercase ( )-> List[Any]: '''simple docstring''' plt.scatter(A_ , A_ , color="red" ) plt.plot(A_ , pol_reg.predict(poly_reg.fit_transform(A_ ) ) , color="blue" ) plt.title("Truth or Bluff (Linear Regression)" ) plt.xlabel("Position level" ) plt.ylabel("Salary" ) plt.show() if __name__ == "__main__": viz_polymonial() # Predicting a new result with Polymonial Regression pol_reg.predict(poly_reg.fit_transform([[5.5]])) # output should be 132148.43750003
40
"""simple docstring""" import pytest from datasets import inspect_metric, list_metrics, load_metric @pytest.fixture def lowercase ( A_ )-> List[Any]: '''simple docstring''' monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() ) @pytest.fixture def lowercase ( A_ )-> Tuple: '''simple docstring''' class _A : """simple docstring""" def __init__( self : str , __UpperCAmelCase : int): a : List[Any] = metric_id class _A : """simple docstring""" UpperCAmelCase : Union[str, Any] = [MetricMock(_a ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]] def __snake_case ( self : List[str]): return self._metrics monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() ) @pytest.mark.parametrize( "func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] ) def lowercase ( A_ , A_ , A_ , A_ , A_ )-> Any: '''simple docstring''' if "tmp_path" in args: a : Union[str, Any] = tuple(arg if arg != "tmp_path" else tmp_path for arg in args ) with pytest.warns(A_ , match="https://huggingface.co/docs/evaluate" ): func(*A_ )
40
1
"""simple docstring""" from packaging import version from .import_utils import is_accelerate_available if is_accelerate_available(): import accelerate def lowercase ( A_ )-> Optional[int]: '''simple docstring''' if not is_accelerate_available(): return method a : Any = version.parse(accelerate.__version__ ).base_version if version.parse(A_ ) < version.parse("0.17.0" ): return method def wrapper(self , *A_ , **A_ ): if hasattr(self , "_hf_hook" ) and hasattr(self._hf_hook , "pre_forward" ): self._hf_hook.pre_forward(self ) return method(self , *A_ , **A_ ) return wrapper
40
"""simple docstring""" from __future__ import annotations from PIL import Image # Define glider example __lowercase = [ [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], ] # Define blinker example __lowercase = [[0, 1, 0], [0, 1, 0], [0, 1, 0]] def lowercase ( A_ )-> list[list[int]]: '''simple docstring''' a : str = [] for i in range(len(A_ ) ): a : str = [] for j in range(len(cells[i] ) ): # Get the number of live neighbours a : Union[str, Any] = 0 if i > 0 and j > 0: neighbour_count += cells[i - 1][j - 1] if i > 0: neighbour_count += cells[i - 1][j] if i > 0 and j < len(cells[i] ) - 1: neighbour_count += cells[i - 1][j + 1] if j > 0: neighbour_count += cells[i][j - 1] if j < len(cells[i] ) - 1: neighbour_count += cells[i][j + 1] if i < len(A_ ) - 1 and j > 0: neighbour_count += cells[i + 1][j - 1] if i < len(A_ ) - 1: neighbour_count += cells[i + 1][j] if i < len(A_ ) - 1 and j < len(cells[i] ) - 1: neighbour_count += cells[i + 1][j + 1] # Rules of the game of life (excerpt from Wikipedia): # 1. Any live cell with two or three live neighbours survives. # 2. Any dead cell with three live neighbours becomes a live cell. # 3. All other live cells die in the next generation. # Similarly, all other dead cells stay dead. a : Tuple = cells[i][j] == 1 if ( (alive and 2 <= neighbour_count <= 3) or not alive and neighbour_count == 3 ): next_generation_row.append(1 ) else: next_generation_row.append(0 ) next_generation.append(A_ ) return next_generation def lowercase ( A_ , A_ )-> list[Image.Image]: '''simple docstring''' a : List[str] = [] for _ in range(A_ ): # Create output image a : str = Image.new("RGB" , (len(cells[0] ), len(A_ )) ) a : Union[str, Any] = img.load() # Save cells to image for x in range(len(A_ ) ): for y in range(len(cells[0] ) ): a : Optional[Any] = 255 - cells[y][x] * 255 a : str = (colour, colour, colour) # Save image images.append(A_ ) a : Tuple = new_generation(A_ ) return images if __name__ == "__main__": __lowercase = generate_images(GLIDER, 16) images[0].save("""out.gif""", save_all=True, append_images=images[1:])
40
1
"""simple docstring""" import argparse import struct import unittest class _A : """simple docstring""" def __init__( self : List[Any] , __UpperCAmelCase : bytes): a : Tuple = data # Initialize hash values a : Union[str, Any] = [ 0x6_A_0_9_E_6_6_7, 0xB_B_6_7_A_E_8_5, 0x3_C_6_E_F_3_7_2, 0xA_5_4_F_F_5_3_A, 0x5_1_0_E_5_2_7_F, 0x9_B_0_5_6_8_8_C, 0x1_F_8_3_D_9_A_B, 0x5_B_E_0_C_D_1_9, ] # Initialize round constants a : Optional[int] = [ 0x4_2_8_A_2_F_9_8, 0x7_1_3_7_4_4_9_1, 0xB_5_C_0_F_B_C_F, 0xE_9_B_5_D_B_A_5, 0x3_9_5_6_C_2_5_B, 0x5_9_F_1_1_1_F_1, 0x9_2_3_F_8_2_A_4, 0xA_B_1_C_5_E_D_5, 0xD_8_0_7_A_A_9_8, 0x1_2_8_3_5_B_0_1, 0x2_4_3_1_8_5_B_E, 0x5_5_0_C_7_D_C_3, 0x7_2_B_E_5_D_7_4, 0x8_0_D_E_B_1_F_E, 0x9_B_D_C_0_6_A_7, 0xC_1_9_B_F_1_7_4, 0xE_4_9_B_6_9_C_1, 0xE_F_B_E_4_7_8_6, 0x0_F_C_1_9_D_C_6, 0x2_4_0_C_A_1_C_C, 0x2_D_E_9_2_C_6_F, 0x4_A_7_4_8_4_A_A, 0x5_C_B_0_A_9_D_C, 0x7_6_F_9_8_8_D_A, 0x9_8_3_E_5_1_5_2, 0xA_8_3_1_C_6_6_D, 0xB_0_0_3_2_7_C_8, 0xB_F_5_9_7_F_C_7, 0xC_6_E_0_0_B_F_3, 0xD_5_A_7_9_1_4_7, 0x0_6_C_A_6_3_5_1, 0x1_4_2_9_2_9_6_7, 0x2_7_B_7_0_A_8_5, 0x2_E_1_B_2_1_3_8, 0x4_D_2_C_6_D_F_C, 0x5_3_3_8_0_D_1_3, 0x6_5_0_A_7_3_5_4, 0x7_6_6_A_0_A_B_B, 0x8_1_C_2_C_9_2_E, 0x9_2_7_2_2_C_8_5, 0xA_2_B_F_E_8_A_1, 0xA_8_1_A_6_6_4_B, 0xC_2_4_B_8_B_7_0, 0xC_7_6_C_5_1_A_3, 0xD_1_9_2_E_8_1_9, 0xD_6_9_9_0_6_2_4, 0xF_4_0_E_3_5_8_5, 0x1_0_6_A_A_0_7_0, 0x1_9_A_4_C_1_1_6, 0x1_E_3_7_6_C_0_8, 0x2_7_4_8_7_7_4_C, 0x3_4_B_0_B_C_B_5, 0x3_9_1_C_0_C_B_3, 0x4_E_D_8_A_A_4_A, 0x5_B_9_C_C_A_4_F, 0x6_8_2_E_6_F_F_3, 0x7_4_8_F_8_2_E_E, 0x7_8_A_5_6_3_6_F, 0x8_4_C_8_7_8_1_4, 0x8_C_C_7_0_2_0_8, 0x9_0_B_E_F_F_F_A, 0xA_4_5_0_6_C_E_B, 0xB_E_F_9_A_3_F_7, 0xC_6_7_1_7_8_F_2, ] a : Tuple = self.preprocessing(self.data) self.final_hash() @staticmethod def __snake_case ( __UpperCAmelCase : bytes): a : List[str] = B"\x80" + (B"\x00" * (63 - (len(__UpperCAmelCase) + 8) % 64)) a : Dict = struct.pack(">Q" , (len(__UpperCAmelCase) * 8)) return data + padding + big_endian_integer def __snake_case ( self : Any): # Convert into blocks of 64 bytes a : List[str] = [ self.preprocessed_data[x : x + 64] for x in range(0 , len(self.preprocessed_data) , 64) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers a : Optional[Any] = list(struct.unpack(">16L" , __UpperCAmelCase)) # add 48 0-ed integers words += [0] * 48 a , a , a , a , a , a , a , a : Dict = self.hashes for index in range(0 , 64): if index > 15: # modify the zero-ed indexes at the end of the array a : List[Any] = ( self.ror(words[index - 15] , 7) ^ self.ror(words[index - 15] , 18) ^ (words[index - 15] >> 3) ) a : Optional[Any] = ( self.ror(words[index - 2] , 17) ^ self.ror(words[index - 2] , 19) ^ (words[index - 2] >> 10) ) a : Tuple = ( words[index - 16] + sa + words[index - 7] + sa ) % 0x1_0_0_0_0_0_0_0_0 # Compression a : Union[str, Any] = self.ror(__UpperCAmelCase , 6) ^ self.ror(__UpperCAmelCase , 11) ^ self.ror(__UpperCAmelCase , 25) a : str = (e & f) ^ ((~e & 0xF_F_F_F_F_F_F_F) & g) a : Tuple = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0x1_0_0_0_0_0_0_0_0 a : List[Any] = self.ror(__UpperCAmelCase , 2) ^ self.ror(__UpperCAmelCase , 13) ^ self.ror(__UpperCAmelCase , 22) a : Union[str, Any] = (a & b) ^ (a & c) ^ (b & c) a : List[Any] = (sa + maj) % 0x1_0_0_0_0_0_0_0_0 a , a , a , a , a , a , a , a : Optional[Any] = ( g, f, e, ((d + tempa) % 0x1_0_0_0_0_0_0_0_0), c, b, a, ((tempa + tempa) % 0x1_0_0_0_0_0_0_0_0), ) a : Dict = [a, b, c, d, e, f, g, h] # Modify final values a : str = [ ((element + mutated_hash_values[index]) % 0x1_0_0_0_0_0_0_0_0) for index, element in enumerate(self.hashes) ] a : Dict = "".join([hex(__UpperCAmelCase)[2:].zfill(8) for value in self.hashes]) def __snake_case ( self : Optional[Any] , __UpperCAmelCase : int , __UpperCAmelCase : int): return 0xF_F_F_F_F_F_F_F & (value << (32 - rotations)) | (value >> rotations) class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : Optional[int]): import hashlib a : int = bytes("Test String" , "utf-8") self.assertEqual(SHAaaa(__UpperCAmelCase).hash , hashlib.shaaaa(__UpperCAmelCase).hexdigest()) def lowercase ( )-> None: '''simple docstring''' import doctest doctest.testmod() a : Union[str, Any] = argparse.ArgumentParser() parser.add_argument( "-s" , "--string" , dest="input_string" , default="Hello World!! Welcome to Cryptography" , help="Hash the string" , ) parser.add_argument( "-f" , "--file" , dest="input_file" , help="Hash contents of a file" ) a : int = parser.parse_args() a : List[Any] = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , "rb" ) as f: a : Tuple = f.read() else: a : Optional[int] = bytes(A_ , "utf-8" ) print(SHAaaa(A_ ).hash ) if __name__ == "__main__": main()
40
"""simple docstring""" from itertools import permutations def lowercase ( A_ )-> bool: '''simple docstring''' if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False a : Optional[int] = [7, 11, 13, 17] for i, test in enumerate(A_ ): if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def lowercase ( A_ = 10 )-> int: '''simple docstring''' return sum( int("".join(map(A_ , A_ ) ) ) for num in permutations(range(A_ ) ) if is_substring_divisible(A_ ) ) if __name__ == "__main__": print(f'''{solution() = }''')
40
1
"""simple docstring""" from typing import List import numpy as np def lowercase ( A_ )-> int: '''simple docstring''' a : Union[str, Any] = {key: len(A_ ) for key, value in gen_kwargs.items() if isinstance(A_ , A_ )} if len(set(lists_lengths.values() ) ) > 1: raise RuntimeError( ( "Sharding is ambiguous for this dataset: " + "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n" + "\n".join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() ) + "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, " + "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length." ) ) a : str = max(lists_lengths.values() , default=0 ) return max(1 , A_ ) def lowercase ( A_ , A_ )-> List[range]: '''simple docstring''' a : Tuple = [] for group_idx in range(A_ ): a : Union[str, Any] = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break a : str = shards_indices_per_group[-1].stop if shards_indices_per_group else 0 a : Tuple = range(A_ , start + num_shards_to_add ) shards_indices_per_group.append(A_ ) return shards_indices_per_group def lowercase ( A_ , A_ )-> List[dict]: '''simple docstring''' a : Tuple = _number_of_shards_in_gen_kwargs(A_ ) if num_shards == 1: return [dict(A_ )] else: a : Tuple = _distribute_shards(num_shards=A_ , max_num_jobs=A_ ) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(A_ , A_ ) else value for key, value in gen_kwargs.items() } for group_idx in range(len(A_ ) ) ] def lowercase ( A_ )-> dict: '''simple docstring''' return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key] , A_ ) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def lowercase ( A_ , A_ )-> dict: '''simple docstring''' a : Union[str, Any] = {len(A_ ) for value in gen_kwargs.values() if isinstance(A_ , A_ )} a : Optional[int] = {} for size in list_sizes: a : Optional[Any] = list(range(A_ ) ) rng.shuffle(indices_per_size[size] ) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes a : Union[str, Any] = dict(A_ ) for key, value in shuffled_kwargs.items(): if isinstance(A_ , A_ ): a : int = [value[i] for i in indices_per_size[len(A_ )]] return shuffled_kwargs
40
"""simple docstring""" import gc import random import unittest import numpy as np import torch from diffusers import ( DDIMScheduler, KandinskyVaaControlnetPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _A ( _a ,unittest.TestCase ): """simple docstring""" UpperCAmelCase : Dict = KandinskyVaaControlnetPipeline UpperCAmelCase : List[str] = ["""image_embeds""", """negative_image_embeds""", """hint"""] UpperCAmelCase : Optional[Any] = ["""image_embeds""", """negative_image_embeds""", """hint"""] UpperCAmelCase : Dict = [ """generator""", """height""", """width""", """latents""", """guidance_scale""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] UpperCAmelCase : Optional[int] = False @property def __snake_case ( self : Optional[Any]): return 32 @property def __snake_case ( self : Dict): return 32 @property def __snake_case ( self : Dict): return self.time_input_dim @property def __snake_case ( self : Any): return self.time_input_dim * 4 @property def __snake_case ( self : str): return 100 @property def __snake_case ( self : str): torch.manual_seed(0) a : str = { "in_channels": 8, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image_hint", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } a : Dict = UNetaDConditionModel(**__UpperCAmelCase) return model @property def __snake_case ( self : str): return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def __snake_case ( self : Union[str, Any]): torch.manual_seed(0) a : Dict = VQModel(**self.dummy_movq_kwargs) return model def __snake_case ( self : Optional[Any]): a : Optional[Any] = self.dummy_unet a : int = self.dummy_movq a : str = DDIMScheduler( num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , steps_offset=1 , prediction_type="epsilon" , thresholding=__UpperCAmelCase , ) a : Optional[Any] = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def __snake_case ( self : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int=0): a : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase) a : List[str] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to( __UpperCAmelCase) # create hint a : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase) if str(__UpperCAmelCase).startswith("mps"): a : Union[str, Any] = torch.manual_seed(__UpperCAmelCase) else: a : List[Any] = torch.Generator(device=__UpperCAmelCase).manual_seed(__UpperCAmelCase) a : str = { "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "hint": hint, "generator": generator, "height": 64, "width": 64, "guidance_scale": 4.0, "num_inference_steps": 2, "output_type": "np", } return inputs def __snake_case ( self : Dict): a : str = "cpu" a : Tuple = self.get_dummy_components() a : Dict = self.pipeline_class(**__UpperCAmelCase) a : Optional[int] = pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) a : Optional[Any] = pipe(**self.get_dummy_inputs(__UpperCAmelCase)) a : Any = output.images a : Any = pipe( **self.get_dummy_inputs(__UpperCAmelCase) , return_dict=__UpperCAmelCase , )[0] a : Union[str, Any] = image[0, -3:, -3:, -1] a : int = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) a : Tuple = np.array( [0.6_959_826, 0.868_279, 0.7_558_092, 0.68_769_467, 0.85_805_804, 0.65_977_496, 0.44_885_302, 0.5_959_111, 0.4_251_595]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : Optional[int]): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __snake_case ( self : List[str]): a : List[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy") a : Union[str, Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/hint_image_cat.png") a : List[Any] = torch.from_numpy(np.array(__UpperCAmelCase)).float() / 255.0 a : str = hint.permute(2 , 0 , 1).unsqueeze(0) a : Optional[int] = KandinskyVaaPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa) pipe_prior.to(__UpperCAmelCase) a : List[str] = KandinskyVaaControlnetPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa) a : int = pipeline.to(__UpperCAmelCase) pipeline.set_progress_bar_config(disable=__UpperCAmelCase) a : Tuple = "A robot, 4k photo" a : Any = torch.Generator(device="cuda").manual_seed(0) a , a : int = pipe_prior( __UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple() a : str = torch.Generator(device="cuda").manual_seed(0) a : Union[str, Any] = pipeline( image_embeds=__UpperCAmelCase , negative_image_embeds=__UpperCAmelCase , hint=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=100 , output_type="np" , ) a : str = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase)
40
1
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _A ( _a ,_a ,_a ,unittest.TestCase ): """simple docstring""" UpperCAmelCase : str = StableDiffusionInpaintPipeline UpperCAmelCase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS UpperCAmelCase : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS UpperCAmelCase : Union[str, Any] = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess UpperCAmelCase : int = frozenset([] ) def __snake_case ( self : Dict): torch.manual_seed(0) a : List[Any] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__UpperCAmelCase , ) a : Tuple = PNDMScheduler(skip_prk_steps=__UpperCAmelCase) torch.manual_seed(0) a : str = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0) a : List[str] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , ) a : Any = CLIPTextModel(__UpperCAmelCase) a : List[str] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") a : Optional[Any] = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Union[str, Any]=0): # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched a : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase) a : List[str] = image.cpu().permute(0 , 2 , 3 , 1)[0] a : Union[str, Any] = Image.fromarray(np.uinta(__UpperCAmelCase)).convert("RGB").resize((64, 64)) a : Dict = Image.fromarray(np.uinta(image + 4)).convert("RGB").resize((64, 64)) if str(__UpperCAmelCase).startswith("mps"): a : Tuple = torch.manual_seed(__UpperCAmelCase) else: a : Tuple = torch.Generator(device=__UpperCAmelCase).manual_seed(__UpperCAmelCase) a : Optional[Any] = { "prompt": "A painting of a squirrel eating a burger", "image": init_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def __snake_case ( self : List[str]): a : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator a : Tuple = self.get_dummy_components() a : Optional[int] = StableDiffusionInpaintPipeline(**__UpperCAmelCase) a : int = sd_pipe.to(__UpperCAmelCase) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase) a : Any = self.get_dummy_inputs(__UpperCAmelCase) a : Optional[int] = sd_pipe(**__UpperCAmelCase).images a : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) a : int = np.array([0.4_727, 0.5_735, 0.3_941, 0.5_446, 0.5_926, 0.4_394, 0.5_062, 0.4_654, 0.4_476]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def __snake_case ( self : str): super().test_inference_batch_single_identical(expected_max_diff=3e-3) @slow @require_torch_gpu class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : Union[str, Any]): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __snake_case ( self : Dict): a : str = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png") a : str = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png") a : List[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint" "/yellow_cat_sitting_on_a_park_bench.npy") a : Tuple = "stabilityai/stable-diffusion-2-inpainting" a : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(__UpperCAmelCase , safety_checker=__UpperCAmelCase) pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) pipe.enable_attention_slicing() a : Any = "Face of a yellow cat, high resolution, sitting on a park bench" a : str = torch.manual_seed(0) a : Union[str, Any] = pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="np" , ) a : List[str] = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image).max() < 9e-3 def __snake_case ( self : Any): a : str = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png") a : Optional[Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png") a : str = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint" "/yellow_cat_sitting_on_a_park_bench_fp16.npy") a : Optional[Any] = "stabilityai/stable-diffusion-2-inpainting" a : Any = StableDiffusionInpaintPipeline.from_pretrained( __UpperCAmelCase , torch_dtype=torch.floataa , safety_checker=__UpperCAmelCase , ) pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) pipe.enable_attention_slicing() a : Optional[int] = "Face of a yellow cat, high resolution, sitting on a park bench" a : Dict = torch.manual_seed(0) a : List[Any] = pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="np" , ) a : Optional[Any] = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image).max() < 5e-1 def __snake_case ( self : int): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() a : Tuple = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png") a : Optional[int] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png") a : Optional[Any] = "stabilityai/stable-diffusion-2-inpainting" a : Optional[int] = PNDMScheduler.from_pretrained(__UpperCAmelCase , subfolder="scheduler") a : int = StableDiffusionInpaintPipeline.from_pretrained( __UpperCAmelCase , safety_checker=__UpperCAmelCase , scheduler=__UpperCAmelCase , torch_dtype=torch.floataa , ) pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() a : Optional[int] = "Face of a yellow cat, high resolution, sitting on a park bench" a : Optional[int] = torch.manual_seed(0) a : str = pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type="np" , ) a : int = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.65 * 10**9
40
"""simple docstring""" import argparse import json from pathlib import Path import torch import torchaudio from datasets import load_dataset from huggingface_hub import hf_hub_download from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification from transformers.utils import logging logging.set_verbosity_info() __lowercase = logging.get_logger(__name__) def lowercase ( A_ )-> Dict: '''simple docstring''' a : str = ASTConfig() if "10-10" in model_name: pass elif "speech-commands" in model_name: a : Union[str, Any] = 128 elif "12-12" in model_name: a : List[Any] = 12 a : str = 12 elif "14-14" in model_name: a : List[Any] = 14 a : Optional[int] = 14 elif "16-16" in model_name: a : Any = 16 a : List[Any] = 16 else: raise ValueError("Model not supported" ) a : Optional[int] = "huggingface/label-files" if "speech-commands" in model_name: a : Optional[int] = 35 a : List[str] = "speech-commands-v2-id2label.json" else: a : Optional[Any] = 527 a : Tuple = "audioset-id2label.json" a : List[str] = json.load(open(hf_hub_download(A_ , A_ , repo_type="dataset" ) , "r" ) ) a : Union[str, Any] = {int(A_ ): v for k, v in idalabel.items()} a : Any = idalabel a : str = {v: k for k, v in idalabel.items()} return config def lowercase ( A_ )-> Tuple: '''simple docstring''' if "module.v" in name: a : Union[str, Any] = name.replace("module.v" , "audio_spectrogram_transformer" ) if "cls_token" in name: a : List[Any] = name.replace("cls_token" , "embeddings.cls_token" ) if "dist_token" in name: a : Union[str, Any] = name.replace("dist_token" , "embeddings.distillation_token" ) if "pos_embed" in name: a : str = name.replace("pos_embed" , "embeddings.position_embeddings" ) if "patch_embed.proj" in name: a : Union[str, Any] = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) # transformer blocks if "blocks" in name: a : Union[str, Any] = name.replace("blocks" , "encoder.layer" ) if "attn.proj" in name: a : str = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: a : Tuple = name.replace("attn" , "attention.self" ) if "norm1" in name: a : int = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: a : Union[str, Any] = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: a : Union[str, Any] = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: a : Optional[Any] = name.replace("mlp.fc2" , "output.dense" ) # final layernorm if "audio_spectrogram_transformer.norm" in name: a : Tuple = name.replace("audio_spectrogram_transformer.norm" , "audio_spectrogram_transformer.layernorm" ) # classifier head if "module.mlp_head.0" in name: a : List[str] = name.replace("module.mlp_head.0" , "classifier.layernorm" ) if "module.mlp_head.1" in name: a : Optional[int] = name.replace("module.mlp_head.1" , "classifier.dense" ) return name def lowercase ( A_ , A_ )-> Any: '''simple docstring''' for key in orig_state_dict.copy().keys(): a : str = orig_state_dict.pop(A_ ) if "qkv" in key: a : int = key.split("." ) a : Optional[int] = int(key_split[3] ) a : int = config.hidden_size if "weight" in key: a : List[str] = val[:dim, :] a : Any = val[dim : dim * 2, :] a : int = val[-dim:, :] else: a : Optional[Any] = val[:dim] a : Union[str, Any] = val[dim : dim * 2] a : str = val[-dim:] else: a : str = val return orig_state_dict def lowercase ( A_ )-> Dict: '''simple docstring''' a : Union[str, Any] = [ "module.v.head.weight", "module.v.head.bias", "module.v.head_dist.weight", "module.v.head_dist.bias", ] for k in ignore_keys: state_dict.pop(A_ , A_ ) @torch.no_grad() def lowercase ( A_ , A_ , A_=False )-> Optional[int]: '''simple docstring''' a : Optional[int] = get_audio_spectrogram_transformer_config(A_ ) a : Dict = { "ast-finetuned-audioset-10-10-0.4593": ( "https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.450": ( "https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.448": ( "https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.448-v2": ( "https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1" ), "ast-finetuned-audioset-12-12-0.447": ( "https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1" ), "ast-finetuned-audioset-14-14-0.443": ( "https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1" ), "ast-finetuned-audioset-16-16-0.442": ( "https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1" ), "ast-finetuned-speech-commands-v2": ( "https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1" ), } # load original state_dict a : Any = model_name_to_url[model_name] a : List[Any] = torch.hub.load_state_dict_from_url(A_ , map_location="cpu" ) # remove some keys remove_keys(A_ ) # rename some keys a : Union[str, Any] = convert_state_dict(A_ , A_ ) # load 🤗 model a : List[str] = ASTForAudioClassification(A_ ) model.eval() model.load_state_dict(A_ ) # verify outputs on dummy input # source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62 a : Tuple = -4.2_6_7_7_3_9_3 if "speech-commands" not in model_name else -6.8_4_5_9_7_8 a : Union[str, Any] = 4.5_6_8_9_9_7_4 if "speech-commands" not in model_name else 5.5_6_5_4_5_2_6 a : str = 1_024 if "speech-commands" not in model_name else 128 a : List[Any] = ASTFeatureExtractor(mean=A_ , std=A_ , max_length=A_ ) if "speech-commands" in model_name: a : List[str] = load_dataset("speech_commands" , "v0.02" , split="validation" ) a : int = dataset[0]["audio"]["array"] else: a : Tuple = hf_hub_download( repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" , ) a , a : Tuple = torchaudio.load(A_ ) a : Optional[Any] = waveform.squeeze().numpy() a : Union[str, Any] = feature_extractor(A_ , sampling_rate=16_000 , return_tensors="pt" ) # forward pass a : Optional[Any] = model(**A_ ) a : List[str] = outputs.logits if model_name == "ast-finetuned-audioset-10-10-0.4593": a : Any = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] ) elif model_name == "ast-finetuned-audioset-10-10-0.450": a : Optional[int] = torch.tensor([-1.1_9_8_6, -7.0_9_0_3, -8.2_7_1_8] ) elif model_name == "ast-finetuned-audioset-10-10-0.448": a : List[str] = torch.tensor([-2.6_1_2_8, -8.0_0_8_0, -9.4_3_4_4] ) elif model_name == "ast-finetuned-audioset-10-10-0.448-v2": a : Tuple = torch.tensor([-1.5_0_8_0, -7.4_5_3_4, -8.8_9_1_7] ) elif model_name == "ast-finetuned-audioset-12-12-0.447": a : int = torch.tensor([-0.5_0_5_0, -6.5_8_3_3, -8.0_8_4_3] ) elif model_name == "ast-finetuned-audioset-14-14-0.443": a : Any = torch.tensor([-0.3_8_2_6, -7.0_3_3_6, -8.2_4_1_3] ) elif model_name == "ast-finetuned-audioset-16-16-0.442": a : Dict = torch.tensor([-1.2_1_1_3, -6.9_1_0_1, -8.3_4_7_0] ) elif model_name == "ast-finetuned-speech-commands-v2": a : Union[str, Any] = torch.tensor([6.1_5_8_9, -8.0_5_6_6, -8.7_9_8_4] ) else: raise ValueError("Unknown model name" ) if not torch.allclose(logits[0, :3] , A_ , atol=1e-4 ): raise ValueError("Logits don't match" ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(A_ ).mkdir(exist_ok=A_ ) print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(A_ ) print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' ) feature_extractor.save_pretrained(A_ ) if push_to_hub: print("Pushing model and feature extractor to the hub..." ) model.push_to_hub(F'''MIT/{model_name}''' ) feature_extractor.push_to_hub(F'''MIT/{model_name}''' ) if __name__ == "__main__": __lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""ast-finetuned-audioset-10-10-0.4593""", type=str, help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) __lowercase = parser.parse_args() convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
40
1
"""simple docstring""" import random import unittest import numpy as np import transformers from transformers import is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax if is_flax_available(): import os import jax.numpy as jnp from jax import jit from transformers import AutoTokenizer, FlaxAutoModelForCausalLM from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model __lowercase = """0.12""" # assumed parallelism: 8 if is_torch_available(): import torch def lowercase ( A_ , A_ , A_=None )-> List[Any]: '''simple docstring''' if rng is None: a : Union[str, Any] = random.Random() a : Tuple = 1 for dim in shape: total_dims *= dim a : Optional[Any] = [] for _ in range(A_ ): values.append(rng.randint(0 , vocab_size - 1 ) ) a : Dict = np.array(A_ , dtype=jnp.intaa ).reshape(A_ ) return output def lowercase ( A_ , A_=None )-> List[str]: '''simple docstring''' a : Optional[int] = ids_tensor(A_ , vocab_size=2 , rng=A_ ) # make sure that at least one token is attended to for each batch a : Tuple = 1 return attn_mask @require_flax class _A : """simple docstring""" UpperCAmelCase : Dict = None UpperCAmelCase : Dict = () def __snake_case ( self : Optional[int]): a , a : int = self.model_tester.prepare_config_and_inputs_for_common() # cut to half length & take max batch_size 3 a : Dict = 2 a : Tuple = inputs["input_ids"].shape[-1] // 2 a : List[Any] = inputs["input_ids"][:max_batch_size, :sequence_length] a : str = jnp.ones_like(__UpperCAmelCase) a : str = attention_mask[:max_batch_size, :sequence_length] # generate max 5 tokens a : Any = input_ids.shape[-1] + 5 if config.eos_token_id is not None and config.pad_token_id is None: # hack to allow generate for models such as GPT2 as is done in `generate()` a : int = config.eos_token_id return config, input_ids, attention_mask, max_length @is_pt_flax_cross_test def __snake_case ( self : Optional[Any]): a , a , a , a : Optional[int] = self._get_input_ids_and_config() a : Union[str, Any] = False a : str = max_length a : Dict = 0 for model_class in self.all_generative_model_classes: a : Union[str, Any] = model_class(__UpperCAmelCase) a : Optional[Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning a : Optional[Any] = getattr(__UpperCAmelCase , __UpperCAmelCase) a : int = pt_model_class(__UpperCAmelCase).eval() a : str = load_flax_weights_in_pytorch_model(__UpperCAmelCase , flax_model.params) a : Tuple = flax_model.generate(__UpperCAmelCase).sequences a : Tuple = pt_model.generate(torch.tensor(__UpperCAmelCase , dtype=torch.long)) if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]: a : Tuple = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]] self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist()) def __snake_case ( self : Dict): a , a , a , a : Dict = self._get_input_ids_and_config() a : Optional[Any] = False a : Dict = max_length for model_class in self.all_generative_model_classes: a : Optional[int] = model_class(__UpperCAmelCase) a : List[str] = model.generate(__UpperCAmelCase).sequences self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase) a : str = jit(model.generate) a : List[str] = jit_generate(__UpperCAmelCase).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist()) def __snake_case ( self : Optional[int]): a , a , a , a : Dict = self._get_input_ids_and_config() a : Union[str, Any] = True a : List[Any] = max_length for model_class in self.all_generative_model_classes: a : Dict = model_class(__UpperCAmelCase) a : str = model.generate(__UpperCAmelCase).sequences self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase) a : Optional[int] = jit(model.generate) a : Tuple = jit_generate(__UpperCAmelCase).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist()) def __snake_case ( self : Union[str, Any]): a , a , a , a : List[Any] = self._get_input_ids_and_config() a : List[str] = False a : str = max_length a : List[Any] = 2 for model_class in self.all_generative_model_classes: a : Tuple = model_class(__UpperCAmelCase) a : Union[str, Any] = model.generate(__UpperCAmelCase).sequences self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase) a : List[Any] = jit(model.generate) a : Optional[Any] = jit_generate(__UpperCAmelCase).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist()) def __snake_case ( self : int): a , a , a , a : Any = self._get_input_ids_and_config() a : int = False a : str = max_length a : str = 2 a : Optional[int] = 2 for model_class in self.all_generative_model_classes: a : str = model_class(__UpperCAmelCase) a : str = model.generate(__UpperCAmelCase).sequences self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences) def __snake_case ( self : List[Any]): a , a , a , a : Optional[int] = self._get_input_ids_and_config() a : Any = True a : List[Any] = max_length a : Tuple = 0.8 a : int = 10 a : Union[str, Any] = 0.3 a : Any = 1 a : Optional[int] = 8 a : Any = 9 for model_class in self.all_generative_model_classes: a : int = model_class(__UpperCAmelCase) a : Any = model.generate(__UpperCAmelCase).sequences self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase) a : Optional[Any] = jit(model.generate) a : Any = jit_generate(__UpperCAmelCase).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist()) def __snake_case ( self : Optional[int]): a , a , a , a : str = self._get_input_ids_and_config() a : Tuple = max_length a : int = 1 a : int = 8 a : List[Any] = 9 for model_class in self.all_generative_model_classes: a : List[Any] = model_class(__UpperCAmelCase) a : Optional[Any] = model.generate(__UpperCAmelCase).sequences self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase) a : Any = jit(model.generate) a : List[str] = jit_generate(__UpperCAmelCase).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist()) def __snake_case ( self : Dict): a , a , a , a : int = self._get_input_ids_and_config() a : Optional[Any] = max_length a : Dict = 2 a : Tuple = 1 a : Optional[Any] = 8 a : Dict = 9 for model_class in self.all_generative_model_classes: a : Any = model_class(__UpperCAmelCase) a : List[str] = model.generate(__UpperCAmelCase).sequences self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase) a : Dict = jit(model.generate) a : Tuple = jit_generate(__UpperCAmelCase).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist()) def __snake_case ( self : Optional[int]): a , a , a , a : List[Any] = self._get_input_ids_and_config() # pad attention mask on the left a : Optional[int] = attention_mask.at[(0, 0)].set(0) a : List[str] = False a : str = max_length for model_class in self.all_generative_model_classes: a : Any = model_class(__UpperCAmelCase) a : Any = model.generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase).sequences self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase) a : List[Any] = jit(model.generate) a : Tuple = jit_generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist()) def __snake_case ( self : List[Any]): a , a , a , a : List[Any] = self._get_input_ids_and_config() # pad attention mask on the left a : Tuple = attention_mask.at[(0, 0)].set(0) a : Any = True a : Union[str, Any] = max_length for model_class in self.all_generative_model_classes: a : Dict = model_class(__UpperCAmelCase) a : List[Any] = model.generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase).sequences self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase) a : Dict = jit(model.generate) a : Dict = jit_generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist()) def __snake_case ( self : Dict): a , a , a , a : Any = self._get_input_ids_and_config() # pad attention mask on the left a : Dict = attention_mask.at[(0, 0)].set(0) a : str = 2 a : Optional[int] = max_length for model_class in self.all_generative_model_classes: a : List[str] = model_class(__UpperCAmelCase) a : int = model.generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase).sequences self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase) a : str = jit(model.generate) a : Tuple = jit_generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist()) @require_flax class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : Any): a : List[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert") a : int = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only") a : Union[str, Any] = "Hello world" a : str = tokenizer(__UpperCAmelCase , return_tensors="np").input_ids # typos are quickly detected (the correct argument is `do_sample`) with self.assertRaisesRegex(__UpperCAmelCase , "do_samples"): model.generate(__UpperCAmelCase , do_samples=__UpperCAmelCase) # arbitrary arguments that will not be used anywhere are also not accepted with self.assertRaisesRegex(__UpperCAmelCase , "foo"): a : Tuple = {"foo": "bar"} model.generate(__UpperCAmelCase , **__UpperCAmelCase)
40
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __lowercase = { """configuration_rag""": ["""RagConfig"""], """retrieval_rag""": ["""RagRetriever"""], """tokenization_rag""": ["""RagTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """RagModel""", """RagPreTrainedModel""", """RagSequenceForGeneration""", """RagTokenForGeneration""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """TFRagModel""", """TFRagPreTrainedModel""", """TFRagSequenceForGeneration""", """TFRagTokenForGeneration""", ] if TYPE_CHECKING: from .configuration_rag import RagConfig from .retrieval_rag import RagRetriever from .tokenization_rag import RagTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rag import ( TFRagModel, TFRagPreTrainedModel, TFRagSequenceForGeneration, TFRagTokenForGeneration, ) else: import sys __lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
40
1
"""simple docstring""" def lowercase ( A_ )-> str: '''simple docstring''' if isinstance(A_ , A_ ): raise TypeError("'float' object cannot be interpreted as an integer" ) if isinstance(A_ , A_ ): raise TypeError("'str' object cannot be interpreted as an integer" ) if num == 0: return "0b0" a : Optional[Any] = False if num < 0: a : Tuple = True a : str = -num a : list[int] = [] while num > 0: binary.insert(0 , num % 2 ) num >>= 1 if negative: return "-0b" + "".join(str(A_ ) for e in binary ) return "0b" + "".join(str(A_ ) for e in binary ) if __name__ == "__main__": import doctest doctest.testmod()
40
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _A ( _a ,_a ,_a ,unittest.TestCase ): """simple docstring""" UpperCAmelCase : str = StableDiffusionInpaintPipeline UpperCAmelCase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS UpperCAmelCase : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS UpperCAmelCase : Union[str, Any] = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess UpperCAmelCase : int = frozenset([] ) def __snake_case ( self : Dict): torch.manual_seed(0) a : List[Any] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__UpperCAmelCase , ) a : Tuple = PNDMScheduler(skip_prk_steps=__UpperCAmelCase) torch.manual_seed(0) a : str = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0) a : List[str] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , ) a : Any = CLIPTextModel(__UpperCAmelCase) a : List[str] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") a : Optional[Any] = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Union[str, Any]=0): # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched a : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase) a : List[str] = image.cpu().permute(0 , 2 , 3 , 1)[0] a : Union[str, Any] = Image.fromarray(np.uinta(__UpperCAmelCase)).convert("RGB").resize((64, 64)) a : Dict = Image.fromarray(np.uinta(image + 4)).convert("RGB").resize((64, 64)) if str(__UpperCAmelCase).startswith("mps"): a : Tuple = torch.manual_seed(__UpperCAmelCase) else: a : Tuple = torch.Generator(device=__UpperCAmelCase).manual_seed(__UpperCAmelCase) a : Optional[Any] = { "prompt": "A painting of a squirrel eating a burger", "image": init_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def __snake_case ( self : List[str]): a : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator a : Tuple = self.get_dummy_components() a : Optional[int] = StableDiffusionInpaintPipeline(**__UpperCAmelCase) a : int = sd_pipe.to(__UpperCAmelCase) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase) a : Any = self.get_dummy_inputs(__UpperCAmelCase) a : Optional[int] = sd_pipe(**__UpperCAmelCase).images a : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) a : int = np.array([0.4_727, 0.5_735, 0.3_941, 0.5_446, 0.5_926, 0.4_394, 0.5_062, 0.4_654, 0.4_476]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def __snake_case ( self : str): super().test_inference_batch_single_identical(expected_max_diff=3e-3) @slow @require_torch_gpu class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : Union[str, Any]): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __snake_case ( self : Dict): a : str = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png") a : str = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png") a : List[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint" "/yellow_cat_sitting_on_a_park_bench.npy") a : Tuple = "stabilityai/stable-diffusion-2-inpainting" a : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(__UpperCAmelCase , safety_checker=__UpperCAmelCase) pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) pipe.enable_attention_slicing() a : Any = "Face of a yellow cat, high resolution, sitting on a park bench" a : str = torch.manual_seed(0) a : Union[str, Any] = pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="np" , ) a : List[str] = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image).max() < 9e-3 def __snake_case ( self : Any): a : str = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png") a : Optional[Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png") a : str = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint" "/yellow_cat_sitting_on_a_park_bench_fp16.npy") a : Optional[Any] = "stabilityai/stable-diffusion-2-inpainting" a : Any = StableDiffusionInpaintPipeline.from_pretrained( __UpperCAmelCase , torch_dtype=torch.floataa , safety_checker=__UpperCAmelCase , ) pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) pipe.enable_attention_slicing() a : Optional[int] = "Face of a yellow cat, high resolution, sitting on a park bench" a : Dict = torch.manual_seed(0) a : List[Any] = pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="np" , ) a : Optional[Any] = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image).max() < 5e-1 def __snake_case ( self : int): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() a : Tuple = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png") a : Optional[int] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png") a : Optional[Any] = "stabilityai/stable-diffusion-2-inpainting" a : Optional[int] = PNDMScheduler.from_pretrained(__UpperCAmelCase , subfolder="scheduler") a : int = StableDiffusionInpaintPipeline.from_pretrained( __UpperCAmelCase , safety_checker=__UpperCAmelCase , scheduler=__UpperCAmelCase , torch_dtype=torch.floataa , ) pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() a : Optional[int] = "Face of a yellow cat, high resolution, sitting on a park bench" a : Optional[int] = torch.manual_seed(0) a : str = pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type="np" , ) a : int = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.65 * 10**9
40
1
"""simple docstring""" import unittest import numpy as np from transformers import AlbertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.albert.modeling_flax_albert import ( FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, ) class _A ( unittest.TestCase ): """simple docstring""" def __init__( self : List[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[Any]=13 , __UpperCAmelCase : List[str]=7 , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : Optional[Any]=99 , __UpperCAmelCase : List[str]=32 , __UpperCAmelCase : List[Any]=5 , __UpperCAmelCase : Optional[int]=4 , __UpperCAmelCase : List[str]=37 , __UpperCAmelCase : List[Any]="gelu" , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Tuple=0.1 , __UpperCAmelCase : List[str]=512 , __UpperCAmelCase : List[str]=16 , __UpperCAmelCase : List[str]=2 , __UpperCAmelCase : int=0.02 , __UpperCAmelCase : Dict=4 , ): a : Any = parent a : Any = batch_size a : List[str] = seq_length a : Optional[Any] = is_training a : int = use_attention_mask a : Optional[Any] = use_token_type_ids a : Any = use_labels a : Any = vocab_size a : Any = hidden_size a : Any = num_hidden_layers a : Dict = num_attention_heads a : str = intermediate_size a : Any = hidden_act a : str = hidden_dropout_prob a : Union[str, Any] = attention_probs_dropout_prob a : Tuple = max_position_embeddings a : Dict = type_vocab_size a : Dict = type_sequence_label_size a : List[str] = initializer_range a : List[Any] = num_choices def __snake_case ( self : str): a : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) a : Any = None if self.use_attention_mask: a : Any = random_attention_mask([self.batch_size, self.seq_length]) a : Union[str, Any] = None if self.use_token_type_ids: a : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) a : Union[str, Any] = AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def __snake_case ( self : Optional[int]): a : Any = self.prepare_config_and_inputs() a , a , a , a : str = config_and_inputs a : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask} return config, inputs_dict @require_flax class _A ( _a ,unittest.TestCase ): """simple docstring""" UpperCAmelCase : Union[str, Any] = ( ( FlaxAlbertModel, FlaxAlbertForPreTraining, FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertForQuestionAnswering, ) if is_flax_available() else () ) def __snake_case ( self : str): a : List[Any] = FlaxAlbertModelTester(self) @slow def __snake_case ( self : Any): for model_class_name in self.all_model_classes: a : Dict = model_class_name.from_pretrained("albert-base-v2") a : List[str] = model(np.ones((1, 1))) self.assertIsNotNone(__UpperCAmelCase) @require_flax class _A ( unittest.TestCase ): """simple docstring""" @slow def __snake_case ( self : int): a : Optional[Any] = FlaxAlbertModel.from_pretrained("albert-base-v2") a : Tuple = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]]) a : Any = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]) a : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase)[0] a : Optional[Any] = (1, 11, 768) self.assertEqual(output.shape , __UpperCAmelCase) a : Optional[Any] = np.array( [[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]]) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __UpperCAmelCase , atol=1e-4))
40
"""simple docstring""" def lowercase ( A_ )-> bool: '''simple docstring''' if not all(x.isalpha() for x in string ): raise ValueError("String must only contain alphabetic characters." ) a : Tuple = sorted(string.lower() ) return len(A_ ) == len(set(A_ ) ) if __name__ == "__main__": __lowercase = input("""Enter a string """).strip() __lowercase = is_isogram(input_str) print(f'''{input_str} is {'an' if isogram else 'not an'} isogram.''')
40
1
"""simple docstring""" import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin __lowercase = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.plbart.modeling_plbart import shift_tokens_right __lowercase = 50003 __lowercase = 50002 @require_sentencepiece @require_tokenizers class _A ( _a ,unittest.TestCase ): """simple docstring""" UpperCAmelCase : int = PLBartTokenizer UpperCAmelCase : List[Any] = None UpperCAmelCase : str = False def __snake_case ( self : int): super().setUp() # We have a SentencePiece fixture for testing a : List[str] = PLBartTokenizer(__UpperCAmelCase , language_codes="base" , keep_accents=__UpperCAmelCase) tokenizer.save_pretrained(self.tmpdirname) def __snake_case ( self : List[str]): a : List[Any] = PLBartTokenizer(__UpperCAmelCase , language_codes="base" , keep_accents=__UpperCAmelCase) a : Tuple = tokenizer.tokenize("This is a test") self.assertListEqual(__UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"]) self.assertListEqual( tokenizer.convert_tokens_to_ids(__UpperCAmelCase) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) a : Union[str, Any] = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( __UpperCAmelCase , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) a : Tuple = tokenizer.convert_tokens_to_ids(__UpperCAmelCase) self.assertListEqual( __UpperCAmelCase , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) a : Union[str, Any] = tokenizer.convert_ids_to_tokens(__UpperCAmelCase) self.assertListEqual( __UpperCAmelCase , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) a : List[str] = tokenizer.vocab_size a : Union[str, Any] = [tokenizer.convert_ids_to_tokens(__UpperCAmelCase) for x in range(end - 4 , __UpperCAmelCase)] self.assertListEqual(__UpperCAmelCase , ["__java__", "__python__", "__en_XX__", "<mask>"]) a : Union[str, Any] = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go" a : Tuple = tokenizer(__UpperCAmelCase).input_ids self.assertEqual( tokenizer.decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase) , __UpperCAmelCase , ) def __snake_case ( self : int): a : Any = PLBartTokenizer(__UpperCAmelCase , language_codes="multi" , keep_accents=__UpperCAmelCase) a : Optional[int] = tokenizer.tokenize("This is a test") self.assertListEqual(__UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"]) self.assertListEqual( tokenizer.convert_tokens_to_ids(__UpperCAmelCase) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) a : List[str] = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( __UpperCAmelCase , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) a : Optional[int] = tokenizer.convert_tokens_to_ids(__UpperCAmelCase) self.assertListEqual( __UpperCAmelCase , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) a : Dict = tokenizer.convert_ids_to_tokens(__UpperCAmelCase) self.assertListEqual( __UpperCAmelCase , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) a : Any = tokenizer.vocab_size a : Tuple = [tokenizer.convert_ids_to_tokens(__UpperCAmelCase) for x in range(end - 7 , __UpperCAmelCase)] self.assertListEqual( __UpperCAmelCase , ["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"]) a : Dict = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go" a : Optional[Any] = tokenizer(__UpperCAmelCase).input_ids self.assertEqual( tokenizer.decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase) , __UpperCAmelCase , ) @require_torch @require_sentencepiece @require_tokenizers class _A ( unittest.TestCase ): """simple docstring""" UpperCAmelCase : int = """uclanlp/plbart-python-en_XX""" UpperCAmelCase : Dict = [ """def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])""", """def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""", ] UpperCAmelCase : Optional[Any] = [ """Returns the maximum value of a b c.""", """Sums the values of a b c.""", ] UpperCAmelCase : Dict = [ 1_3_4, 5_4_5_2, 3_3_4_6_0, 3_3_4_4_1, 3_3_4_6_3, 3_3_4_6_5, 3_3_4_6_3, 3_3_4_4_9, 9_8_8, 2_0, 3_3_4_5_6, 1_9, 3_3_4_5_6, 7_7_1, 3_9, 4_2_5_8, 8_8_9, 3_3_1_8, 3_3_4_4_1, 3_3_4_6_3, 3_3_4_6_5, 3_3_4_6_3, 3_3_4_4_9, 2_4_7_1, 2, PYTHON_CODE, ] @classmethod def __snake_case ( cls : Optional[Any]): a : PLBartTokenizer = PLBartTokenizer.from_pretrained( cls.checkpoint_name , language_codes="base" , src_lang="python" , tgt_lang="en_XX") a : Union[str, Any] = 1 return cls def __snake_case ( self : Any): self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__java__"] , 50001) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__python__"] , 50002) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__en_XX__"] , 50003) def __snake_case ( self : List[Any]): a : Any = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0] self.assertListEqual(self.expected_src_tokens , __UpperCAmelCase) def __snake_case ( self : Tuple): self.assertIn(__UpperCAmelCase , self.tokenizer.all_special_ids) a : Tuple = [EN_CODE, 9037, 33442, 57, 752, 153, 14, 56, 18, 9, 2] a : str = self.tokenizer.decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase) a : List[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__UpperCAmelCase) self.assertEqual(__UpperCAmelCase , __UpperCAmelCase) self.assertNotIn(self.tokenizer.eos_token , __UpperCAmelCase) def __snake_case ( self : Any): a : Dict = ["def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])" * 20] self.assertIsInstance(src_text[0] , __UpperCAmelCase) a : List[str] = 10 a : Union[str, Any] = self.tokenizer(__UpperCAmelCase , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase).input_ids[0] self.assertEqual(ids[-2] , 2) self.assertEqual(ids[-1] , __UpperCAmelCase) self.assertEqual(len(__UpperCAmelCase) , __UpperCAmelCase) def __snake_case ( self : int): self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "__java__"]) , [50004, 50001]) def __snake_case ( self : Optional[int]): a : Union[str, Any] = tempfile.mkdtemp() a : Dict = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(__UpperCAmelCase) a : Optional[int] = PLBartTokenizer.from_pretrained(__UpperCAmelCase) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __UpperCAmelCase) @require_torch def __snake_case ( self : Tuple): a : Optional[int] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__UpperCAmelCase , return_tensors="pt") a : Optional[int] = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE]) self.assertEqual(batch.decoder_input_ids[1][0] , __UpperCAmelCase) self.assertEqual(batch.decoder_input_ids[1][-1] , 2) self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE]) @require_torch def __snake_case ( self : Optional[Any]): a : Union[str, Any] = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=len(self.expected_src_tokens) , return_tensors="pt" , ) a : Tuple = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id) self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase) self.assertEqual((2, 26) , batch.input_ids.shape) self.assertEqual((2, 26) , batch.attention_mask.shape) a : List[str] = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , __UpperCAmelCase) self.assertEqual(2 , batch.decoder_input_ids[0, -1]) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , []) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE]) def __snake_case ( self : str): a : Tuple = self.tokenizer(self.src_text , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=3 , return_tensors="pt") a : Optional[Any] = self.tokenizer( text_target=self.tgt_text , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=10 , return_tensors="pt") a : Dict = targets["input_ids"] a : str = shift_tokens_right(__UpperCAmelCase , self.tokenizer.pad_token_id) self.assertEqual(batch.input_ids.shape[1] , 3) self.assertEqual(batch.decoder_input_ids.shape[1] , 10) @require_torch def __snake_case ( self : Dict): a : Optional[int] = self.tokenizer._build_translation_inputs( "A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="java") self.assertEqual( nested_simplify(__UpperCAmelCase) , { # A, test, EOS, en_XX "input_ids": [[150, 242, 2, 50003]], "attention_mask": [[1, 1, 1, 1]], # java "forced_bos_token_id": 50001, } , )
40
"""simple docstring""" import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast __lowercase = datasets.utils.logging.get_logger(__name__) @dataclass class _A ( datasets.BuilderConfig ): """simple docstring""" UpperCAmelCase : int = 1_0_0_0_0 UpperCAmelCase : Optional[List[str]] = None UpperCAmelCase : Optional[datasets.Features] = None class _A ( datasets.ArrowBasedBuilder ): """simple docstring""" UpperCAmelCase : str = ParquetConfig def __snake_case ( self : Tuple): return datasets.DatasetInfo(features=self.config.features) def __snake_case ( self : List[Any] , __UpperCAmelCase : str): if not self.config.data_files: raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''') a : str = dl_manager.download_and_extract(self.config.data_files) if isinstance(__UpperCAmelCase , (str, list, tuple)): a : Dict = data_files if isinstance(__UpperCAmelCase , __UpperCAmelCase): a : str = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive a : List[Any] = [dl_manager.iter_files(__UpperCAmelCase) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files})] a : Dict = [] for split_name, files in data_files.items(): if isinstance(__UpperCAmelCase , __UpperCAmelCase): a : Optional[int] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive a : Tuple = [dl_manager.iter_files(__UpperCAmelCase) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(__UpperCAmelCase): with open(__UpperCAmelCase , "rb") as f: a : Tuple = datasets.Features.from_arrow_schema(pq.read_schema(__UpperCAmelCase)) break splits.append(datasets.SplitGenerator(name=__UpperCAmelCase , gen_kwargs={"files": files})) return splits def __snake_case ( self : List[str] , __UpperCAmelCase : pa.Table): if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example a : Optional[int] = table_cast(__UpperCAmelCase , self.info.features.arrow_schema) return pa_table def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int): a : Tuple = self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema) != sorted(self.config.columns): raise ValueError( f'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''') for file_idx, file in enumerate(itertools.chain.from_iterable(__UpperCAmelCase)): with open(__UpperCAmelCase , "rb") as f: a : Tuple = pq.ParquetFile(__UpperCAmelCase) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns)): a : Optional[Any] = pa.Table.from_batches([record_batch]) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield f'''{file_idx}_{batch_idx}''', self._cast_table(__UpperCAmelCase) except ValueError as e: logger.error(f'''Failed to read file \'{file}\' with error {type(__UpperCAmelCase)}: {e}''') raise
40
1
"""simple docstring""" import os import sys import unittest __lowercase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path __lowercase = os.path.join(git_repo_path, """src""", """diffusers""") class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : Any): a : List[Any] = find_backend(" if not is_torch_available():") self.assertEqual(__UpperCAmelCase , "torch") # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") a : Dict = find_backend(" if not (is_torch_available() and is_transformers_available()):") self.assertEqual(__UpperCAmelCase , "torch_and_transformers") # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") a : int = find_backend( " if not (is_torch_available() and is_transformers_available() and is_onnx_available()):") self.assertEqual(__UpperCAmelCase , "torch_and_transformers_and_onnx") def __snake_case ( self : Union[str, Any]): a : Dict = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("torch" , __UpperCAmelCase) self.assertIn("torch_and_transformers" , __UpperCAmelCase) self.assertIn("flax_and_transformers" , __UpperCAmelCase) self.assertIn("torch_and_transformers_and_onnx" , __UpperCAmelCase) # Likewise, we can't assert on the exact content of a key self.assertIn("UNet2DModel" , objects["torch"]) self.assertIn("FlaxUNet2DConditionModel" , objects["flax"]) self.assertIn("StableDiffusionPipeline" , objects["torch_and_transformers"]) self.assertIn("FlaxStableDiffusionPipeline" , objects["flax_and_transformers"]) self.assertIn("LMSDiscreteScheduler" , objects["torch_and_scipy"]) self.assertIn("OnnxStableDiffusionPipeline" , objects["torch_and_transformers_and_onnx"]) def __snake_case ( self : Tuple): a : Optional[int] = create_dummy_object("CONSTANT" , "'torch'") self.assertEqual(__UpperCAmelCase , "\nCONSTANT = None\n") a : Dict = create_dummy_object("function" , "'torch'") self.assertEqual( __UpperCAmelCase , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n") a : Optional[Any] = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n" a : int = create_dummy_object("FakeClass" , "'torch'") self.assertEqual(__UpperCAmelCase , __UpperCAmelCase) def __snake_case ( self : List[str]): a : List[str] = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n" a : Tuple = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]}) self.assertEqual(dummy_files["torch"] , __UpperCAmelCase)
40
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __lowercase = logging.get_logger(__name__) __lowercase = { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json""" ), """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json""" ), """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json""" ), } class _A ( _a ): """simple docstring""" UpperCAmelCase : int = """dpr""" def __init__( self : List[Any] , __UpperCAmelCase : int=30522 , __UpperCAmelCase : Union[str, Any]=768 , __UpperCAmelCase : Dict=12 , __UpperCAmelCase : List[str]=12 , __UpperCAmelCase : Any=3072 , __UpperCAmelCase : Optional[int]="gelu" , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : str=512 , __UpperCAmelCase : List[str]=2 , __UpperCAmelCase : Tuple=0.02 , __UpperCAmelCase : List[str]=1e-12 , __UpperCAmelCase : List[str]=0 , __UpperCAmelCase : str="absolute" , __UpperCAmelCase : int = 0 , **__UpperCAmelCase : Tuple , ): super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase) a : List[Any] = vocab_size a : Optional[Any] = hidden_size a : Union[str, Any] = num_hidden_layers a : Dict = num_attention_heads a : int = hidden_act a : Any = intermediate_size a : Any = hidden_dropout_prob a : Dict = attention_probs_dropout_prob a : Any = max_position_embeddings a : Union[str, Any] = type_vocab_size a : Optional[Any] = initializer_range a : Dict = layer_norm_eps a : int = projection_dim a : str = position_embedding_type
40
1
"""simple docstring""" import time import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( MaxLengthCriteria, MaxNewTokensCriteria, MaxTimeCriteria, StoppingCriteriaList, validate_stopping_criteria, ) @require_torch class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : Optional[Any] , __UpperCAmelCase : str): a : Optional[int] = 3 a : List[Any] = 250 a : Optional[Any] = ids_tensor((batch_size, length) , __UpperCAmelCase) a : List[Any] = torch.ones((batch_size, length) , device=__UpperCAmelCase , dtype=torch.float) / length return input_ids, scores def __snake_case ( self : Union[str, Any]): a , a : List[Any] = self._get_tensors(5) a : Union[str, Any] = StoppingCriteriaList( [ MaxLengthCriteria(max_length=10), MaxTimeCriteria(max_time=0.1), ]) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase)) a , a : Any = self._get_tensors(9) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase)) a , a : Any = self._get_tensors(10) self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase)) def __snake_case ( self : Union[str, Any]): a : Any = MaxLengthCriteria(max_length=10) a , a : Optional[Any] = self._get_tensors(5) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase)) a , a : List[Any] = self._get_tensors(9) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase)) a , a : List[Any] = self._get_tensors(10) self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase)) def __snake_case ( self : Optional[Any]): a : str = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5) a , a : str = self._get_tensors(5) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase)) a , a : str = self._get_tensors(9) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase)) a , a : Dict = self._get_tensors(10) self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase)) a : Dict = StoppingCriteriaList([criteria]) self.assertEqual(criteria_list.max_length , 10) def __snake_case ( self : str): a , a : Dict = self._get_tensors(5) a : List[str] = MaxTimeCriteria(max_time=0.1) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase)) a : Dict = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2) self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase)) def __snake_case ( self : Union[str, Any]): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10)]) , 10) with self.assertWarns(__UpperCAmelCase): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10)]) , 11) a : Tuple = validate_stopping_criteria(StoppingCriteriaList() , 11) self.assertEqual(len(__UpperCAmelCase) , 1)
40
"""simple docstring""" class _A : """simple docstring""" def __init__( self : int , __UpperCAmelCase : int): a : Tuple = size a : Dict = [0] * size a : Optional[int] = [0] * size @staticmethod def __snake_case ( __UpperCAmelCase : int): return index | (index + 1) @staticmethod def __snake_case ( __UpperCAmelCase : int): return (index & (index + 1)) - 1 def __snake_case ( self : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : int): a : Union[str, Any] = value while index < self.size: a : Dict = self.get_prev(__UpperCAmelCase) + 1 if current_left_border == index: a : Optional[int] = value else: a : Any = max(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) a : Optional[int] = self.get_next(__UpperCAmelCase) def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : int): right -= 1 # Because of right is exclusive a : List[str] = 0 while left <= right: a : Dict = self.get_prev(__UpperCAmelCase) if left <= current_left: a : Optional[int] = max(__UpperCAmelCase , self.tree[right]) a : Optional[Any] = current_left else: a : List[str] = max(__UpperCAmelCase , self.arr[right]) right -= 1 return result if __name__ == "__main__": import doctest doctest.testmod()
40
1
"""simple docstring""" def lowercase ( A_ = 1_000_000 )-> int: '''simple docstring''' a : Tuple = [i - 1 for i in range(limit + 1 )] for i in range(2 , limit + 1 ): if phi[i] == i - 1: for j in range(2 * i , limit + 1 , A_ ): phi[j] -= phi[j] // i return sum(phi[2 : limit + 1] ) if __name__ == "__main__": print(solution())
40
"""simple docstring""" import unittest from knapsack import knapsack as k class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : List[Any]): a : str = 0 a : Optional[int] = [0] a : Union[str, Any] = [0] a : Any = len(__UpperCAmelCase) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 0) a : List[str] = [60] a : str = [10] a : Optional[int] = len(__UpperCAmelCase) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 0) def __snake_case ( self : Optional[int]): a : Any = 3 a : str = [1, 2, 3] a : Tuple = [3, 2, 1] a : Any = len(__UpperCAmelCase) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 5) def __snake_case ( self : Tuple): a : int = 50 a : List[Any] = [60, 100, 120] a : Optional[int] = [10, 20, 30] a : str = len(__UpperCAmelCase) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 220) if __name__ == "__main__": unittest.main()
40
1
"""simple docstring""" import os import string import sys __lowercase = 1 << 8 __lowercase = { """tab""": ord("""\t"""), """newline""": ord("""\r"""), """esc""": 27, """up""": 65 + ARROW_KEY_FLAG, """down""": 66 + ARROW_KEY_FLAG, """right""": 67 + ARROW_KEY_FLAG, """left""": 68 + ARROW_KEY_FLAG, """mod_int""": 91, """undefined""": sys.maxsize, """interrupt""": 3, """insert""": 50, """delete""": 51, """pg_up""": 53, """pg_down""": 54, } __lowercase = KEYMAP["""up"""] __lowercase = KEYMAP["""left"""] if sys.platform == "win32": __lowercase = [] __lowercase = { b"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG, b"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG, b"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG, b"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG, b"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG, b"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG, b"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG, b"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG, } for i in range(10): __lowercase = ord(str(i)) def lowercase ( )-> List[str]: '''simple docstring''' if os.name == "nt": import msvcrt a : Tuple = "mbcs" # Flush the keyboard buffer while msvcrt.kbhit(): msvcrt.getch() if len(A_ ) == 0: # Read the keystroke a : Any = msvcrt.getch() # If it is a prefix char, get second part if ch in (b"\x00", b"\xe0"): a : Union[str, Any] = ch + msvcrt.getch() # Translate actual Win chars to bullet char types try: a : Tuple = chr(WIN_KEYMAP[cha] ) WIN_CH_BUFFER.append(chr(KEYMAP["mod_int"] ) ) WIN_CH_BUFFER.append(A_ ) if ord(A_ ) in ( KEYMAP["insert"] - 1 << 9, KEYMAP["delete"] - 1 << 9, KEYMAP["pg_up"] - 1 << 9, KEYMAP["pg_down"] - 1 << 9, ): WIN_CH_BUFFER.append(chr(126 ) ) a : Dict = chr(KEYMAP["esc"] ) except KeyError: a : Dict = cha[1] else: a : Any = ch.decode(A_ ) else: a : List[Any] = WIN_CH_BUFFER.pop(0 ) elif os.name == "posix": import termios import tty a : str = sys.stdin.fileno() a : Tuple = termios.tcgetattr(A_ ) try: tty.setraw(A_ ) a : List[Any] = sys.stdin.read(1 ) finally: termios.tcsetattr(A_ , termios.TCSADRAIN , A_ ) return ch def lowercase ( )-> Any: '''simple docstring''' a : Dict = get_raw_chars() if ord(A_ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]: return char elif ord(A_ ) == KEYMAP["esc"]: a : Optional[Any] = get_raw_chars() if ord(A_ ) == KEYMAP["mod_int"]: a : Any = get_raw_chars() if ord(A_ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(A_ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG: return chr(ord(A_ ) + ARROW_KEY_FLAG ) else: return KEYMAP["undefined"] else: return get_raw_chars() else: if char in string.printable: return char else: return KEYMAP["undefined"]
40
"""simple docstring""" import os import unittest from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _A ( _a ,unittest.TestCase ): """simple docstring""" UpperCAmelCase : str = LayoutLMTokenizer UpperCAmelCase : int = LayoutLMTokenizerFast UpperCAmelCase : Union[str, Any] = True UpperCAmelCase : Optional[Any] = True def __snake_case ( self : Optional[int]): super().setUp() a : Tuple = [ "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] a : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) def __snake_case ( self : Optional[int] , **__UpperCAmelCase : Tuple): return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase) def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : str): a : Tuple = "UNwant\u00E9d,running" a : Dict = "unwanted, running" return input_text, output_text def __snake_case ( self : Any): a : List[Any] = self.tokenizer_class(self.vocab_file) a : str = tokenizer.tokenize("UNwant\u00E9d,running") self.assertListEqual(__UpperCAmelCase , ["un", "##want", "##ed", ",", "runn", "##ing"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase) , [7, 4, 5, 10, 8, 9]) def __snake_case ( self : Dict): pass
40
1
"""simple docstring""" from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import ( BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from ...utils.backbone_utils import BackboneMixin from .configuration_resnet import ResNetConfig __lowercase = logging.get_logger(__name__) # General docstring __lowercase = """ResNetConfig""" # Base docstring __lowercase = """microsoft/resnet-50""" __lowercase = [1, 2048, 7, 7] # Image classification docstring __lowercase = """microsoft/resnet-50""" __lowercase = """tiger cat""" __lowercase = [ """microsoft/resnet-50""", # See all resnet models at https://huggingface.co/models?filter=resnet ] class _A ( nn.Module ): """simple docstring""" def __init__( self : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int = 3 , __UpperCAmelCase : int = 1 , __UpperCAmelCase : str = "relu"): super().__init__() a : List[Any] = nn.Convad( __UpperCAmelCase , __UpperCAmelCase , kernel_size=__UpperCAmelCase , stride=__UpperCAmelCase , padding=kernel_size // 2 , bias=__UpperCAmelCase) a : str = nn.BatchNormad(__UpperCAmelCase) a : Optional[int] = ACTaFN[activation] if activation is not None else nn.Identity() def __snake_case ( self : Optional[Any] , __UpperCAmelCase : Tensor): a : Union[str, Any] = self.convolution(__UpperCAmelCase) a : Optional[Any] = self.normalization(__UpperCAmelCase) a : Optional[int] = self.activation(__UpperCAmelCase) return hidden_state class _A ( nn.Module ): """simple docstring""" def __init__( self : Optional[int] , __UpperCAmelCase : ResNetConfig): super().__init__() a : str = ResNetConvLayer( config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act) a : List[str] = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1) a : str = config.num_channels def __snake_case ( self : str , __UpperCAmelCase : Tensor): a : Optional[int] = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration.") a : Optional[int] = self.embedder(__UpperCAmelCase) a : Optional[int] = self.pooler(__UpperCAmelCase) return embedding class _A ( nn.Module ): """simple docstring""" def __init__( self : Any , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int = 2): super().__init__() a : Union[str, Any] = nn.Convad(__UpperCAmelCase , __UpperCAmelCase , kernel_size=1 , stride=__UpperCAmelCase , bias=__UpperCAmelCase) a : int = nn.BatchNormad(__UpperCAmelCase) def __snake_case ( self : Any , __UpperCAmelCase : Tensor): a : Any = self.convolution(__UpperCAmelCase) a : Any = self.normalization(__UpperCAmelCase) return hidden_state class _A ( nn.Module ): """simple docstring""" def __init__( self : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int = 1 , __UpperCAmelCase : str = "relu"): super().__init__() a : Optional[int] = in_channels != out_channels or stride != 1 a : List[str] = ( ResNetShortCut(__UpperCAmelCase , __UpperCAmelCase , stride=__UpperCAmelCase) if should_apply_shortcut else nn.Identity() ) a : str = nn.Sequential( ResNetConvLayer(__UpperCAmelCase , __UpperCAmelCase , stride=__UpperCAmelCase) , ResNetConvLayer(__UpperCAmelCase , __UpperCAmelCase , activation=__UpperCAmelCase) , ) a : Optional[int] = ACTaFN[activation] def __snake_case ( self : str , __UpperCAmelCase : Any): a : Optional[int] = hidden_state a : Optional[int] = self.layer(__UpperCAmelCase) a : Optional[Any] = self.shortcut(__UpperCAmelCase) hidden_state += residual a : str = self.activation(__UpperCAmelCase) return hidden_state class _A ( nn.Module ): """simple docstring""" def __init__( self : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int = 1 , __UpperCAmelCase : str = "relu" , __UpperCAmelCase : int = 4): super().__init__() a : Optional[Any] = in_channels != out_channels or stride != 1 a : List[str] = out_channels // reduction a : Optional[Any] = ( ResNetShortCut(__UpperCAmelCase , __UpperCAmelCase , stride=__UpperCAmelCase) if should_apply_shortcut else nn.Identity() ) a : int = nn.Sequential( ResNetConvLayer(__UpperCAmelCase , __UpperCAmelCase , kernel_size=1) , ResNetConvLayer(__UpperCAmelCase , __UpperCAmelCase , stride=__UpperCAmelCase) , ResNetConvLayer(__UpperCAmelCase , __UpperCAmelCase , kernel_size=1 , activation=__UpperCAmelCase) , ) a : Union[str, Any] = ACTaFN[activation] def __snake_case ( self : List[str] , __UpperCAmelCase : Dict): a : List[str] = hidden_state a : List[Any] = self.layer(__UpperCAmelCase) a : Optional[int] = self.shortcut(__UpperCAmelCase) hidden_state += residual a : str = self.activation(__UpperCAmelCase) return hidden_state class _A ( nn.Module ): """simple docstring""" def __init__( self : List[str] , __UpperCAmelCase : ResNetConfig , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int = 2 , __UpperCAmelCase : int = 2 , ): super().__init__() a : Tuple = ResNetBottleNeckLayer if config.layer_type == "bottleneck" else ResNetBasicLayer a : Dict = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer(__UpperCAmelCase , __UpperCAmelCase , stride=__UpperCAmelCase , activation=config.hidden_act) , *[layer(__UpperCAmelCase , __UpperCAmelCase , activation=config.hidden_act) for _ in range(depth - 1)] , ) def __snake_case ( self : List[str] , __UpperCAmelCase : Tensor): a : str = input for layer in self.layers: a : Tuple = layer(__UpperCAmelCase) return hidden_state class _A ( nn.Module ): """simple docstring""" def __init__( self : Union[str, Any] , __UpperCAmelCase : ResNetConfig): super().__init__() a : List[str] = nn.ModuleList([]) # based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input self.stages.append( ResNetStage( __UpperCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , )) a : Any = zip(config.hidden_sizes , config.hidden_sizes[1:]) for (in_channels, out_channels), depth in zip(__UpperCAmelCase , config.depths[1:]): self.stages.append(ResNetStage(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , depth=__UpperCAmelCase)) def __snake_case ( self : int , __UpperCAmelCase : Tensor , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = True): a : Tuple = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: a : Any = hidden_states + (hidden_state,) a : int = stage_module(__UpperCAmelCase) if output_hidden_states: a : List[Any] = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None) return BaseModelOutputWithNoAttention( last_hidden_state=__UpperCAmelCase , hidden_states=__UpperCAmelCase , ) class _A ( _a ): """simple docstring""" UpperCAmelCase : int = ResNetConfig UpperCAmelCase : List[Any] = """resnet""" UpperCAmelCase : Optional[Any] = """pixel_values""" UpperCAmelCase : str = True def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : Tuple): if isinstance(__UpperCAmelCase , nn.Convad): nn.init.kaiming_normal_(module.weight , mode="fan_out" , nonlinearity="relu") elif isinstance(__UpperCAmelCase , (nn.BatchNormad, nn.GroupNorm)): nn.init.constant_(module.weight , 1) nn.init.constant_(module.bias , 0) def __snake_case ( self : List[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Any=False): if isinstance(__UpperCAmelCase , __UpperCAmelCase): a : Tuple = value __lowercase = R""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`ResNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ __lowercase = R""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConvNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( """The bare ResNet model outputting raw features without any specific head on top.""" ,_a ,) class _A ( _a ): """simple docstring""" def __init__( self : List[str] , __UpperCAmelCase : Union[str, Any]): super().__init__(__UpperCAmelCase) a : Tuple = config a : str = ResNetEmbeddings(__UpperCAmelCase) a : List[str] = ResNetEncoder(__UpperCAmelCase) a : Dict = nn.AdaptiveAvgPoolad((1, 1)) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__UpperCAmelCase) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=__UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def __snake_case ( self : List[str] , __UpperCAmelCase : Tensor , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[bool] = None): a : str = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) a : List[str] = return_dict if return_dict is not None else self.config.use_return_dict a : Any = self.embedder(__UpperCAmelCase) a : Optional[int] = self.encoder( __UpperCAmelCase , output_hidden_states=__UpperCAmelCase , return_dict=__UpperCAmelCase) a : Tuple = encoder_outputs[0] a : Dict = self.pooler(__UpperCAmelCase) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=__UpperCAmelCase , pooler_output=__UpperCAmelCase , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( """ ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. """ ,_a ,) class _A ( _a ): """simple docstring""" def __init__( self : List[Any] , __UpperCAmelCase : List[str]): super().__init__(__UpperCAmelCase) a : List[str] = config.num_labels a : Union[str, Any] = ResNetModel(__UpperCAmelCase) # classification head a : Tuple = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__UpperCAmelCase) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : Optional[torch.FloatTensor] = None , __UpperCAmelCase : Optional[torch.LongTensor] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[bool] = None , ): a : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict a : int = self.resnet(__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , return_dict=__UpperCAmelCase) a : Dict = outputs.pooler_output if return_dict else outputs[1] a : List[str] = self.classifier(__UpperCAmelCase) a : Tuple = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: a : Dict = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): a : int = "single_label_classification" else: a : Tuple = "multi_label_classification" if self.config.problem_type == "regression": a : Dict = MSELoss() if self.num_labels == 1: a : Any = loss_fct(logits.squeeze() , labels.squeeze()) else: a : Any = loss_fct(__UpperCAmelCase , __UpperCAmelCase) elif self.config.problem_type == "single_label_classification": a : Any = CrossEntropyLoss() a : Optional[int] = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1)) elif self.config.problem_type == "multi_label_classification": a : Optional[Any] = BCEWithLogitsLoss() a : Optional[int] = loss_fct(__UpperCAmelCase , __UpperCAmelCase) if not return_dict: a : int = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=__UpperCAmelCase , logits=__UpperCAmelCase , hidden_states=outputs.hidden_states) @add_start_docstrings( """ ResNet backbone, to be used with frameworks like DETR and MaskFormer. """ ,_a ,) class _A ( _a ,_a ): """simple docstring""" def __init__( self : Union[str, Any] , __UpperCAmelCase : Dict): super().__init__(__UpperCAmelCase) super()._init_backbone(__UpperCAmelCase) a : List[str] = [config.embedding_size] + config.hidden_sizes a : Tuple = ResNetEmbeddings(__UpperCAmelCase) a : List[str] = ResNetEncoder(__UpperCAmelCase) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__UpperCAmelCase) @replace_return_docstrings(output_type=__UpperCAmelCase , config_class=_CONFIG_FOR_DOC) def __snake_case ( self : Dict , __UpperCAmelCase : Tensor , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[bool] = None): a : Tuple = return_dict if return_dict is not None else self.config.use_return_dict a : List[str] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) a : str = self.embedder(__UpperCAmelCase) a : Union[str, Any] = self.encoder(__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , return_dict=__UpperCAmelCase) a : Optional[int] = outputs.hidden_states a : Union[str, Any] = () for idx, stage in enumerate(self.stage_names): if stage in self.out_features: feature_maps += (hidden_states[idx],) if not return_dict: a : str = (feature_maps,) if output_hidden_states: output += (outputs.hidden_states,) return output return BackboneOutput( feature_maps=__UpperCAmelCase , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=__UpperCAmelCase , )
40
"""simple docstring""" def lowercase ( A_ )-> str: '''simple docstring''' if isinstance(A_ , A_ ): raise TypeError("'float' object cannot be interpreted as an integer" ) if isinstance(A_ , A_ ): raise TypeError("'str' object cannot be interpreted as an integer" ) if num == 0: return "0b0" a : Optional[Any] = False if num < 0: a : Tuple = True a : str = -num a : list[int] = [] while num > 0: binary.insert(0 , num % 2 ) num >>= 1 if negative: return "-0b" + "".join(str(A_ ) for e in binary ) return "0b" + "".join(str(A_ ) for e in binary ) if __name__ == "__main__": import doctest doctest.testmod()
40
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __lowercase = { """configuration_ctrl""": ["""CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CTRLConfig"""], """tokenization_ctrl""": ["""CTRLTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""", """CTRLForSequenceClassification""", """CTRLLMHeadModel""", """CTRLModel""", """CTRLPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFCTRLForSequenceClassification""", """TFCTRLLMHeadModel""", """TFCTRLModel""", """TFCTRLPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig from .tokenization_ctrl import CTRLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ctrl import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, CTRLPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_ctrl import ( TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, TFCTRLForSequenceClassification, TFCTRLLMHeadModel, TFCTRLModel, TFCTRLPreTrainedModel, ) else: import sys __lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
40
"""simple docstring""" from bisect import bisect from itertools import accumulate def lowercase ( A_ , A_ , A_ , A_ )-> Union[str, Any]: '''simple docstring''' a : Any = sorted(zip(A_ , A_ ) , key=lambda A_ : x[0] / x[1] , reverse=A_ ) a , a : int = [i[0] for i in r], [i[1] for i in r] a : Union[str, Any] = list(accumulate(A_ ) ) a : Optional[Any] = bisect(A_ , A_ ) return ( 0 if k == 0 else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k]) if k != n else sum(vl[:k] ) ) if __name__ == "__main__": import doctest doctest.testmod()
40
1
"""simple docstring""" import os try: from .build_directory_md import good_file_paths except ImportError: from build_directory_md import good_file_paths # type: ignore __lowercase = list(good_file_paths()) assert filepaths, "good_file_paths() failed!" __lowercase = [file for file in filepaths if file != file.lower()] if upper_files: print(f'''{len(upper_files)} files contain uppercase characters:''') print("""\n""".join(upper_files) + """\n""") __lowercase = [file for file in filepaths if """ """ in file] if space_files: print(f'''{len(space_files)} files contain space characters:''') print("""\n""".join(space_files) + """\n""") __lowercase = [file for file in filepaths if """-""" in file] if hyphen_files: print(f'''{len(hyphen_files)} files contain hyphen characters:''') print("""\n""".join(hyphen_files) + """\n""") __lowercase = [file for file in filepaths if os.sep not in file] if nodir_files: print(f'''{len(nodir_files)} files are not in a directory:''') print("""\n""".join(nodir_files) + """\n""") __lowercase = len(upper_files + space_files + hyphen_files + nodir_files) if bad_files: import sys sys.exit(bad_files)
40
"""simple docstring""" from __future__ import annotations from numpy import array, cos, cross, floataa, radians, sin from numpy.typing import NDArray def lowercase ( A_ , A_ , A_ = False )-> list[float]: '''simple docstring''' if radian_mode: return [magnitude * cos(A_ ), magnitude * sin(A_ )] return [magnitude * cos(radians(A_ ) ), magnitude * sin(radians(A_ ) )] def lowercase ( A_ , A_ , A_ = 10**-1 )-> bool: '''simple docstring''' a : NDArray[floataa] = cross(A_ , A_ ) a : float = sum(A_ ) return abs(A_ ) < eps if __name__ == "__main__": # Test to check if it works __lowercase = array( [ polar_force(7_18.4, 180 - 30), polar_force(8_79.54, 45), polar_force(100, -90), ] ) __lowercase = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem 1 in image_data/2D_problems.jpg __lowercase = array( [ polar_force(30 * 9.81, 15), polar_force(215, 180 - 45), polar_force(264, 90 - 30), ] ) __lowercase = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem in image_data/2D_problems_1.jpg __lowercase = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]]) __lowercase = array([[0, 0], [6, 0], [10, 0], [12, 0]]) assert in_static_equilibrium(forces, location) import doctest doctest.testmod()
40
1
"""simple docstring""" import argparse import os from pathlib import Path import torch from bark.generation import _load_model as _bark_load_model from huggingface_hub import hf_hub_download from transformers import EncodecConfig, EncodecModel, set_seed from transformers.models.bark.configuration_bark import ( BarkCoarseConfig, BarkConfig, BarkFineConfig, BarkSemanticConfig, ) from transformers.models.bark.generation_configuration_bark import ( BarkCoarseGenerationConfig, BarkFineGenerationConfig, BarkGenerationConfig, BarkSemanticGenerationConfig, ) from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel from transformers.utils import logging logging.set_verbosity_info() __lowercase = logging.get_logger(__name__) set_seed(770) __lowercase = { """c_attn""": """att_proj""", """c_proj""": """out_proj""", """c_fc""": """in_proj""", """transformer.""": """""", """h.""": """layers.""", """ln_1""": """layernorm_1""", """ln_2""": """layernorm_2""", """ln_f""": """layernorm_final""", """wpe""": """position_embeds_layer""", """wte""": """input_embeds_layer""", } __lowercase = { """text_small""": { """repo_id""": """suno/bark""", """file_name""": """text.pt""", }, """coarse_small""": { """repo_id""": """suno/bark""", """file_name""": """coarse.pt""", }, """fine_small""": { """repo_id""": """suno/bark""", """file_name""": """fine.pt""", }, """text""": { """repo_id""": """suno/bark""", """file_name""": """text_2.pt""", }, """coarse""": { """repo_id""": """suno/bark""", """file_name""": """coarse_2.pt""", }, """fine""": { """repo_id""": """suno/bark""", """file_name""": """fine_2.pt""", }, } __lowercase = os.path.dirname(os.path.abspath(__file__)) __lowercase = os.path.join(os.path.expanduser("""~"""), """.cache""") __lowercase = os.path.join(os.getenv("""XDG_CACHE_HOME""", default_cache_dir), """suno""", """bark_v0""") def lowercase ( A_ , A_=False )-> Dict: '''simple docstring''' a : str = model_type if use_small: key += "_small" return os.path.join(A_ , REMOTE_MODEL_PATHS[key]["file_name"] ) def lowercase ( A_ , A_ )-> Any: '''simple docstring''' os.makedirs(A_ , exist_ok=A_ ) hf_hub_download(repo_id=A_ , filename=A_ , local_dir=A_ ) def lowercase ( A_ , A_ , A_=False , A_="text" )-> int: '''simple docstring''' if model_type == "text": a : Any = BarkSemanticModel a : List[Any] = BarkSemanticConfig a : List[Any] = BarkSemanticGenerationConfig elif model_type == "coarse": a : Optional[Any] = BarkCoarseModel a : Tuple = BarkCoarseConfig a : List[str] = BarkCoarseGenerationConfig elif model_type == "fine": a : Optional[Any] = BarkFineModel a : Dict = BarkFineConfig a : List[str] = BarkFineGenerationConfig else: raise NotImplementedError() a : Dict = F'''{model_type}_small''' if use_small else model_type a : Any = REMOTE_MODEL_PATHS[model_key] if not os.path.exists(A_ ): logger.info(F'''{model_type} model not found, downloading into `{CACHE_DIR}`.''' ) _download(model_info["repo_id"] , model_info["file_name"] ) a : Optional[int] = torch.load(A_ , map_location=A_ ) # this is a hack a : List[str] = checkpoint["model_args"] if "input_vocab_size" not in model_args: a : str = model_args["vocab_size"] a : Union[str, Any] = model_args["vocab_size"] del model_args["vocab_size"] # convert Bark model arguments to HF Bark model arguments a : Tuple = model_args.pop("n_head" ) a : Tuple = model_args.pop("n_embd" ) a : int = model_args.pop("n_layer" ) a : List[Any] = ConfigClass(**checkpoint["model_args"] ) a : Tuple = ModelClass(config=A_ ) a : int = GenerationConfigClass() a : Optional[int] = model_generation_config a : Any = checkpoint["model"] # fixup checkpoint a : Optional[Any] = "_orig_mod." for k, v in list(state_dict.items() ): if k.startswith(A_ ): # replace part of the key with corresponding layer name in HF implementation a : Tuple = k[len(A_ ) :] for old_layer_name in new_layer_name_dict: a : Tuple = new_k.replace(A_ , new_layer_name_dict[old_layer_name] ) a : Tuple = state_dict.pop(A_ ) a : Dict = set(state_dict.keys() ) - set(model.state_dict().keys() ) a : Any = {k for k in extra_keys if not k.endswith(".attn.bias" )} a : Any = set(model.state_dict().keys() ) - set(state_dict.keys() ) a : Optional[Any] = {k for k in missing_keys if not k.endswith(".attn.bias" )} if len(A_ ) != 0: raise ValueError(F'''extra keys found: {extra_keys}''' ) if len(A_ ) != 0: raise ValueError(F'''missing keys: {missing_keys}''' ) model.load_state_dict(A_ , strict=A_ ) a : List[Any] = model.num_parameters(exclude_embeddings=A_ ) a : int = checkpoint["best_val_loss"].item() logger.info(F'''model loaded: {round(n_params/1e6 , 1 )}M params, {round(A_ , 3 )} loss''' ) model.eval() model.to(A_ ) del checkpoint, state_dict return model def lowercase ( A_ , A_=False , A_="text" )-> Dict: '''simple docstring''' if model_type not in ("text", "coarse", "fine"): raise NotImplementedError() a : Dict = "cpu" # do conversion on cpu a : Optional[Any] = _get_ckpt_path(A_ , use_small=A_ ) a : Optional[int] = _load_model(A_ , A_ , model_type=A_ , use_small=A_ ) # load bark initial model a : Optional[Any] = _bark_load_model(A_ , "cpu" , model_type=A_ , use_small=A_ ) if model_type == "text": a : Dict = bark_model["model"] if model.num_parameters(exclude_embeddings=A_ ) != bark_model.get_num_params(): raise ValueError("initial and new models don't have the same number of parameters" ) # check if same output as the bark model a : Dict = 5 a : List[Any] = 10 if model_type in ["text", "coarse"]: a : List[Any] = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int ) a : Optional[Any] = bark_model(A_ )[0] a : str = model(A_ ) # take last logits a : Tuple = output_new_model_total.logits[:, [-1], :] else: a : int = 3 a : Any = 8 a : Tuple = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int ) a : Union[str, Any] = model(A_ , A_ ) a : str = bark_model(A_ , A_ ) a : Optional[int] = output_new_model_total.logits # output difference should come from the difference of self-attention implementation design if output_new_model.shape != output_old_model.shape: raise ValueError("initial and new outputs don't have the same shape" ) if (output_new_model - output_old_model).abs().max().item() > 1e-3: raise ValueError("initial and new outputs are not equal" ) Path(A_ ).mkdir(exist_ok=A_ ) model.save_pretrained(A_ ) def lowercase ( A_ , A_ , A_ , A_ , A_ , A_ , )-> Any: '''simple docstring''' a : Tuple = os.path.join(A_ , A_ ) a : int = BarkSemanticConfig.from_pretrained(os.path.join(A_ , "config.json" ) ) a : Tuple = BarkCoarseConfig.from_pretrained(os.path.join(A_ , "config.json" ) ) a : Tuple = BarkFineConfig.from_pretrained(os.path.join(A_ , "config.json" ) ) a : List[str] = EncodecConfig.from_pretrained("facebook/encodec_24khz" ) a : Tuple = BarkSemanticModel.from_pretrained(A_ ) a : Tuple = BarkCoarseModel.from_pretrained(A_ ) a : Any = BarkFineModel.from_pretrained(A_ ) a : Tuple = EncodecModel.from_pretrained("facebook/encodec_24khz" ) a : List[str] = BarkConfig.from_sub_model_configs( A_ , A_ , A_ , A_ ) a : str = BarkGenerationConfig.from_sub_model_configs( semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config ) a : Optional[int] = BarkModel(A_ ) a : Tuple = semantic a : int = coarseAcoustic a : List[str] = fineAcoustic a : Dict = codec a : str = bark_generation_config Path(A_ ).mkdir(exist_ok=A_ ) bark.save_pretrained(A_ , repo_id=A_ , push_to_hub=A_ ) if __name__ == "__main__": __lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument("""model_type""", type=str, help="""text, coarse or fine.""") parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--is_small""", action="""store_true""", help="""convert the small version instead of the large.""") __lowercase = parser.parse_args() load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
40
"""simple docstring""" def lowercase ( A_ , A_ )-> float: '''simple docstring''' if mass < 0: raise ValueError("The mass of a body cannot be negative" ) return 0.5 * mass * abs(A_ ) * abs(A_ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
40
1
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary # Register SEW's fairseq modules from sew_asapp import tasks # noqa: F401 from transformers import ( SEWConfig, SEWForCTC, SEWModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() __lowercase = logging.get_logger(__name__) __lowercase = { """post_extract_proj""": """feature_projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.upsample.0""": """encoder.upsample.projection""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """layer_norm""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", } def lowercase ( A_ , A_ , A_ , A_ , A_ )-> Tuple: '''simple docstring''' for attribute in key.split("." ): a : Union[str, Any] = getattr(A_ , A_ ) if weight_type is not None: a : List[str] = getattr(A_ , A_ ).shape else: a : List[Any] = hf_pointer.shape assert hf_shape == value.shape, ( F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": a : Dict = value elif weight_type == "weight_g": a : Tuple = value elif weight_type == "weight_v": a : Union[str, Any] = value elif weight_type == "bias": a : List[Any] = value else: a : Optional[Any] = value logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def lowercase ( A_ , A_ , A_ )-> Tuple: '''simple docstring''' a : Tuple = [] a : Any = fairseq_model.state_dict() a : Optional[int] = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): a : Optional[int] = False if "conv_layers" in name: load_conv_layer( A_ , A_ , A_ , A_ , hf_model.config.feat_extract_norm == "group" , ) a : List[Any] = True else: for key, mapped_key in MAPPING.items(): a : Tuple = "sew." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: a : List[str] = True if "*" in mapped_key: a : Tuple = name.split(A_ )[0].split("." )[-2] a : Optional[Any] = mapped_key.replace("*" , A_ ) if "weight_g" in name: a : List[Any] = "weight_g" elif "weight_v" in name: a : Any = "weight_v" elif "weight" in name: a : int = "weight" elif "bias" in name: a : Optional[int] = "bias" else: a : Tuple = None set_recursively(A_ , A_ , A_ , A_ , A_ ) continue if not is_used: unused_weights.append(A_ ) logger.warning(F'''Unused weights: {unused_weights}''' ) def lowercase ( A_ , A_ , A_ , A_ , A_ )-> List[str]: '''simple docstring''' a : List[str] = full_name.split("conv_layers." )[-1] a : List[str] = name.split("." ) a : Optional[int] = int(items[0] ) a : int = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) a : List[str] = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) a : Union[str, Any] = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) a : Union[str, Any] = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) a : List[str] = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(A_ ) def lowercase ( A_ , A_ )-> int: '''simple docstring''' a : str = SEWConfig() if is_finetuned: a : int = model.wav_encoder.wav_model.cfg else: a : Optional[Any] = model.cfg a : Optional[int] = fs_config.conv_bias a : str = eval(fs_config.conv_feature_layers ) a : Optional[Any] = [x[0] for x in conv_layers] a : Optional[int] = [x[1] for x in conv_layers] a : List[str] = [x[2] for x in conv_layers] a : Dict = "gelu" a : Union[str, Any] = "layer" if fs_config.extractor_mode == "layer_norm" else "group" a : List[str] = 0.0 a : Tuple = fs_config.activation_fn.name a : Dict = fs_config.encoder_embed_dim a : int = 0.0_2 a : Dict = fs_config.encoder_ffn_embed_dim a : Optional[int] = 1e-5 a : Tuple = fs_config.encoder_layerdrop a : Dict = fs_config.encoder_attention_heads a : int = fs_config.conv_pos_groups a : Tuple = fs_config.conv_pos a : int = len(A_ ) a : Union[str, Any] = fs_config.encoder_layers a : int = fs_config.squeeze_factor # take care of any params that are overridden by the Wav2VecCtc model if is_finetuned: a : Any = model.cfg a : List[Any] = fs_config.final_dropout a : Optional[int] = fs_config.layerdrop a : List[Any] = fs_config.activation_dropout a : List[Any] = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0 a : int = fs_config.attention_dropout a : Optional[int] = fs_config.dropout_input a : str = fs_config.dropout a : int = fs_config.mask_channel_length a : Tuple = fs_config.mask_channel_prob a : Tuple = fs_config.mask_length a : str = fs_config.mask_prob a : Union[str, Any] = "Wav2Vec2FeatureExtractor" a : Any = "Wav2Vec2CTCTokenizer" return config @torch.no_grad() def lowercase ( A_ , A_ , A_=None , A_=None , A_=True )-> List[Any]: '''simple docstring''' if is_finetuned: a , a , a : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) else: a , a , a : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) if config_path is not None: a : str = SEWConfig.from_pretrained(A_ ) else: a : int = convert_config(model[0] , A_ ) a : List[Any] = model[0].eval() a : int = True if config.feat_extract_norm == "layer" else False a : Tuple = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=A_ , return_attention_mask=A_ , ) if is_finetuned: if dict_path: a : Optional[Any] = Dictionary.load(A_ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq a : Union[str, Any] = target_dict.pad_index a : Optional[Any] = target_dict.bos_index a : Optional[int] = target_dict.pad_index a : List[str] = target_dict.bos_index a : int = target_dict.eos_index a : Optional[Any] = len(target_dict.symbols ) a : Optional[int] = os.path.join(A_ , "vocab.json" ) if not os.path.isdir(A_ ): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(A_ ) ) return os.makedirs(A_ , exist_ok=A_ ) with open(A_ , "w" , encoding="utf-8" ) as vocab_handle: json.dump(target_dict.indices , A_ ) a : Optional[Any] = WavaVecaCTCTokenizer( A_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=A_ , ) a : Optional[Any] = WavaVecaProcessor(feature_extractor=A_ , tokenizer=A_ ) processor.save_pretrained(A_ ) a : Optional[Any] = SEWForCTC(A_ ) else: a : Union[str, Any] = SEWModel(A_ ) feature_extractor.save_pretrained(A_ ) recursively_load_weights(A_ , A_ , A_ ) hf_model.save_pretrained(A_ ) if __name__ == "__main__": __lowercase = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) __lowercase = parser.parse_args() convert_sew_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned )
40
"""simple docstring""" import os import sys import unittest __lowercase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path __lowercase = os.path.join(git_repo_path, """src""", """diffusers""") class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : Any): a : List[Any] = find_backend(" if not is_torch_available():") self.assertEqual(__UpperCAmelCase , "torch") # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") a : Dict = find_backend(" if not (is_torch_available() and is_transformers_available()):") self.assertEqual(__UpperCAmelCase , "torch_and_transformers") # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") a : int = find_backend( " if not (is_torch_available() and is_transformers_available() and is_onnx_available()):") self.assertEqual(__UpperCAmelCase , "torch_and_transformers_and_onnx") def __snake_case ( self : Union[str, Any]): a : Dict = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("torch" , __UpperCAmelCase) self.assertIn("torch_and_transformers" , __UpperCAmelCase) self.assertIn("flax_and_transformers" , __UpperCAmelCase) self.assertIn("torch_and_transformers_and_onnx" , __UpperCAmelCase) # Likewise, we can't assert on the exact content of a key self.assertIn("UNet2DModel" , objects["torch"]) self.assertIn("FlaxUNet2DConditionModel" , objects["flax"]) self.assertIn("StableDiffusionPipeline" , objects["torch_and_transformers"]) self.assertIn("FlaxStableDiffusionPipeline" , objects["flax_and_transformers"]) self.assertIn("LMSDiscreteScheduler" , objects["torch_and_scipy"]) self.assertIn("OnnxStableDiffusionPipeline" , objects["torch_and_transformers_and_onnx"]) def __snake_case ( self : Tuple): a : Optional[int] = create_dummy_object("CONSTANT" , "'torch'") self.assertEqual(__UpperCAmelCase , "\nCONSTANT = None\n") a : Dict = create_dummy_object("function" , "'torch'") self.assertEqual( __UpperCAmelCase , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n") a : Optional[Any] = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n" a : int = create_dummy_object("FakeClass" , "'torch'") self.assertEqual(__UpperCAmelCase , __UpperCAmelCase) def __snake_case ( self : List[str]): a : List[str] = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n" a : Tuple = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]}) self.assertEqual(dummy_files["torch"] , __UpperCAmelCase)
40
1
"""simple docstring""" def lowercase ( A_ , A_ , A_ )-> list: '''simple docstring''' a : List[str] = len(A_ ) a : int = [[0] * n for i in range(A_ )] for i in range(A_ ): a : Dict = y_points[i] for i in range(2 , A_ ): for j in range(A_ , A_ ): a : Union[str, Any] = ( (xa - x_points[j - i + 1]) * q[j][i - 1] - (xa - x_points[j]) * q[j - 1][i - 1] ) / (x_points[j] - x_points[j - i + 1]) return [q[n - 1][n - 1], q] if __name__ == "__main__": import doctest doctest.testmod()
40
"""simple docstring""" __lowercase = { """Pillow""": """Pillow<10.0.0""", """accelerate""": """accelerate>=0.20.3""", """av""": """av==9.2.0""", """beautifulsoup4""": """beautifulsoup4""", """black""": """black~=23.1""", """codecarbon""": """codecarbon==1.2.0""", """cookiecutter""": """cookiecutter==1.7.3""", """dataclasses""": """dataclasses""", """datasets""": """datasets!=2.5.0""", """decord""": """decord==0.6.0""", """deepspeed""": """deepspeed>=0.9.3""", """diffusers""": """diffusers""", """dill""": """dill<0.3.5""", """evaluate""": """evaluate>=0.2.0""", """fairscale""": """fairscale>0.3""", """faiss-cpu""": """faiss-cpu""", """fastapi""": """fastapi""", """filelock""": """filelock""", """flax""": """flax>=0.4.1,<=0.7.0""", """ftfy""": """ftfy""", """fugashi""": """fugashi>=1.0""", """GitPython""": """GitPython<3.1.19""", """hf-doc-builder""": """hf-doc-builder>=0.3.0""", """huggingface-hub""": """huggingface-hub>=0.14.1,<1.0""", """importlib_metadata""": """importlib_metadata""", """ipadic""": """ipadic>=1.0.0,<2.0""", """isort""": """isort>=5.5.4""", """jax""": """jax>=0.2.8,!=0.3.2,<=0.4.13""", """jaxlib""": """jaxlib>=0.1.65,<=0.4.13""", """jieba""": """jieba""", """kenlm""": """kenlm""", """keras-nlp""": """keras-nlp>=0.3.1""", """librosa""": """librosa""", """nltk""": """nltk""", """natten""": """natten>=0.14.6""", """numpy""": """numpy>=1.17""", """onnxconverter-common""": """onnxconverter-common""", """onnxruntime-tools""": """onnxruntime-tools>=1.4.2""", """onnxruntime""": """onnxruntime>=1.4.0""", """opencv-python""": """opencv-python""", """optuna""": """optuna""", """optax""": """optax>=0.0.8,<=0.1.4""", """packaging""": """packaging>=20.0""", """parameterized""": """parameterized""", """phonemizer""": """phonemizer""", """protobuf""": """protobuf""", """psutil""": """psutil""", """pyyaml""": """pyyaml>=5.1""", """pydantic""": """pydantic<2""", """pytest""": """pytest>=7.2.0""", """pytest-timeout""": """pytest-timeout""", """pytest-xdist""": """pytest-xdist""", """python""": """python>=3.8.0""", """ray[tune]""": """ray[tune]""", """regex""": """regex!=2019.12.17""", """requests""": """requests""", """rhoknp""": """rhoknp>=1.1.0,<1.3.1""", """rjieba""": """rjieba""", """rouge-score""": """rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1""", """ruff""": """ruff>=0.0.241,<=0.0.259""", """sacrebleu""": """sacrebleu>=1.4.12,<2.0.0""", """sacremoses""": """sacremoses""", """safetensors""": """safetensors>=0.3.1""", """sagemaker""": """sagemaker>=2.31.0""", """scikit-learn""": """scikit-learn""", """sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""", """sigopt""": """sigopt""", """starlette""": """starlette""", """sudachipy""": """sudachipy>=0.6.6""", """sudachidict_core""": """sudachidict_core>=20220729""", """tensorflow-cpu""": """tensorflow-cpu>=2.6,<2.14""", """tensorflow""": """tensorflow>=2.6,<2.14""", """tensorflow-text""": """tensorflow-text<2.14""", """tf2onnx""": """tf2onnx""", """timeout-decorator""": """timeout-decorator""", """timm""": """timm""", """tokenizers""": """tokenizers>=0.11.1,!=0.11.3,<0.14""", """torch""": """torch>=1.9,!=1.12.0""", """torchaudio""": """torchaudio""", """torchvision""": """torchvision""", """pyctcdecode""": """pyctcdecode>=0.4.0""", """tqdm""": """tqdm>=4.27""", """unidic""": """unidic>=1.0.2""", """unidic_lite""": """unidic_lite>=1.0.7""", """urllib3""": """urllib3<2.0.0""", """uvicorn""": """uvicorn""", }
40
1
"""simple docstring""" import operator as op def lowercase ( A_ )-> Any: '''simple docstring''' a : Tuple = [] a : int = lambda A_ , A_ : int(x / y ) # noqa: E731 integer division operation a : Tuple = { "^": op.pow, "*": op.mul, "/": div, "+": op.add, "-": op.sub, } # operators & their respective operation # print table header print("Symbol".center(8 ) , "Action".center(12 ) , "Stack" , sep=" | " ) print("-" * (30 + len(A_ )) ) for x in post_fix: if x.isdigit(): # if x in digit stack.append(A_ ) # append x to stack # output in tabular format print(x.rjust(8 ) , ("push(" + x + ")").ljust(12 ) , ",".join(A_ ) , sep=" | " ) else: a : str = stack.pop() # pop stack # output in tabular format print("".rjust(8 ) , ("pop(" + b + ")").ljust(12 ) , ",".join(A_ ) , sep=" | " ) a : str = stack.pop() # pop stack # output in tabular format print("".rjust(8 ) , ("pop(" + a + ")").ljust(12 ) , ",".join(A_ ) , sep=" | " ) stack.append( str(opr[x](int(A_ ) , int(A_ ) ) ) ) # evaluate the 2 values popped from stack & push result to stack # output in tabular format print( x.rjust(8 ) , ("push(" + a + x + b + ")").ljust(12 ) , ",".join(A_ ) , sep=" | " , ) return int(stack[0] ) if __name__ == "__main__": __lowercase = input("""\n\nEnter a Postfix Equation (space separated) = """).split(""" """) print("""\n\tResult = """, solve(Postfix))
40
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tensorflow_text_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowercase = { """configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""], """tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = ["""BertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """BERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """BertForMaskedLM""", """BertForMultipleChoice""", """BertForNextSentencePrediction""", """BertForPreTraining""", """BertForQuestionAnswering""", """BertForSequenceClassification""", """BertForTokenClassification""", """BertLayer""", """BertLMHeadModel""", """BertModel""", """BertPreTrainedModel""", """load_tf_weights_in_bert""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFBertEmbeddings""", """TFBertForMaskedLM""", """TFBertForMultipleChoice""", """TFBertForNextSentencePrediction""", """TFBertForPreTraining""", """TFBertForQuestionAnswering""", """TFBertForSequenceClassification""", """TFBertForTokenClassification""", """TFBertLMHeadModel""", """TFBertMainLayer""", """TFBertModel""", """TFBertPreTrainedModel""", ] try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = ["""TFBertTokenizer"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """FlaxBertForCausalLM""", """FlaxBertForMaskedLM""", """FlaxBertForMultipleChoice""", """FlaxBertForNextSentencePrediction""", """FlaxBertForPreTraining""", """FlaxBertForQuestionAnswering""", """FlaxBertForSequenceClassification""", """FlaxBertForTokenClassification""", """FlaxBertModel""", """FlaxBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_fast import BertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bert import ( BERT_PRETRAINED_MODEL_ARCHIVE_LIST, BertForMaskedLM, BertForMultipleChoice, BertForNextSentencePrediction, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, BertLayer, BertLMHeadModel, BertModel, BertPreTrainedModel, load_tf_weights_in_bert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_bert import ( TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFBertEmbeddings, TFBertForMaskedLM, TFBertForMultipleChoice, TFBertForNextSentencePrediction, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertForTokenClassification, TFBertLMHeadModel, TFBertMainLayer, TFBertModel, TFBertPreTrainedModel, ) try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_tf import TFBertTokenizer try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_bert import ( FlaxBertForCausalLM, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, FlaxBertPreTrainedModel, ) else: import sys __lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
40
1
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor from ..utils import is_datasets_available from .base import PipelineTool if is_datasets_available(): from datasets import load_dataset class _A ( _a ): """simple docstring""" UpperCAmelCase : Union[str, Any] = """microsoft/speecht5_tts""" UpperCAmelCase : Optional[Any] = ( """This is a tool that reads an English text out loud. It takes an input named `text` which should contain the """ """text to read (in English) and returns a waveform object containing the sound.""" ) UpperCAmelCase : str = """text_reader""" UpperCAmelCase : str = SpeechTaProcessor UpperCAmelCase : Tuple = SpeechTaForTextToSpeech UpperCAmelCase : Tuple = SpeechTaHifiGan UpperCAmelCase : Optional[Any] = ["""text"""] UpperCAmelCase : List[Any] = ["""audio"""] def __snake_case ( self : Tuple): if self.post_processor is None: a : Tuple = "microsoft/speecht5_hifigan" super().setup() def __snake_case ( self : List[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[Any]=None): a : Any = self.pre_processor(text=__UpperCAmelCase , return_tensors="pt" , truncation=__UpperCAmelCase) if speaker_embeddings is None: if not is_datasets_available(): raise ImportError("Datasets needs to be installed if not passing speaker embeddings.") a : List[str] = load_dataset("Matthijs/cmu-arctic-xvectors" , split="validation") a : Optional[int] = torch.tensor(embeddings_dataset[7305]["xvector"]).unsqueeze(0) return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings} def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : Optional[int]): with torch.no_grad(): return self.model.generate_speech(**__UpperCAmelCase) def __snake_case ( self : Tuple , __UpperCAmelCase : str): with torch.no_grad(): return self.post_processor(__UpperCAmelCase).cpu().detach()
40
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class _A ( _a ): """simple docstring""" UpperCAmelCase : str = """naver-clova-ix/donut-base-finetuned-docvqa""" UpperCAmelCase : Tuple = ( """This is a tool that answers a question about an document (pdf). It takes an input named `document` which """ """should be the document containing the information, as well as a `question` that is the question about the """ """document. It returns a text that contains the answer to the question.""" ) UpperCAmelCase : List[str] = """document_qa""" UpperCAmelCase : str = AutoProcessor UpperCAmelCase : Optional[int] = VisionEncoderDecoderModel UpperCAmelCase : int = ["""image""", """text"""] UpperCAmelCase : int = ["""text"""] def __init__( self : Tuple , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Any): if not is_vision_available(): raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool.") super().__init__(*__UpperCAmelCase , **__UpperCAmelCase) def __snake_case ( self : Tuple , __UpperCAmelCase : "Image" , __UpperCAmelCase : str): a : Any = "<s_docvqa><s_question>{user_input}</s_question><s_answer>" a : Union[str, Any] = task_prompt.replace("{user_input}" , __UpperCAmelCase) a : Optional[Any] = self.pre_processor.tokenizer( __UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors="pt").input_ids a : Any = self.pre_processor(__UpperCAmelCase , return_tensors="pt").pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def __snake_case ( self : int , __UpperCAmelCase : int): return self.model.generate( inputs["pixel_values"].to(self.device) , decoder_input_ids=inputs["decoder_input_ids"].to(self.device) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__UpperCAmelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__UpperCAmelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__UpperCAmelCase , ).sequences def __snake_case ( self : str , __UpperCAmelCase : List[Any]): a : Union[str, Any] = self.pre_processor.batch_decode(__UpperCAmelCase)[0] a : Optional[Any] = sequence.replace(self.pre_processor.tokenizer.eos_token , "") a : Any = sequence.replace(self.pre_processor.tokenizer.pad_token , "") a : Optional[Any] = re.sub(r"<.*?>" , "" , __UpperCAmelCase , count=1).strip() # remove first task start token a : List[str] = self.pre_processor.tokenajson(__UpperCAmelCase) return sequence["answer"]
40
1
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool __lowercase = { """Acehnese Arabic""": """ace_Arab""", """Acehnese Latin""": """ace_Latn""", """Mesopotamian Arabic""": """acm_Arab""", """Ta'izzi-Adeni Arabic""": """acq_Arab""", """Tunisian Arabic""": """aeb_Arab""", """Afrikaans""": """afr_Latn""", """South Levantine Arabic""": """ajp_Arab""", """Akan""": """aka_Latn""", """Amharic""": """amh_Ethi""", """North Levantine Arabic""": """apc_Arab""", """Modern Standard Arabic""": """arb_Arab""", """Modern Standard Arabic Romanized""": """arb_Latn""", """Najdi Arabic""": """ars_Arab""", """Moroccan Arabic""": """ary_Arab""", """Egyptian Arabic""": """arz_Arab""", """Assamese""": """asm_Beng""", """Asturian""": """ast_Latn""", """Awadhi""": """awa_Deva""", """Central Aymara""": """ayr_Latn""", """South Azerbaijani""": """azb_Arab""", """North Azerbaijani""": """azj_Latn""", """Bashkir""": """bak_Cyrl""", """Bambara""": """bam_Latn""", """Balinese""": """ban_Latn""", """Belarusian""": """bel_Cyrl""", """Bemba""": """bem_Latn""", """Bengali""": """ben_Beng""", """Bhojpuri""": """bho_Deva""", """Banjar Arabic""": """bjn_Arab""", """Banjar Latin""": """bjn_Latn""", """Standard Tibetan""": """bod_Tibt""", """Bosnian""": """bos_Latn""", """Buginese""": """bug_Latn""", """Bulgarian""": """bul_Cyrl""", """Catalan""": """cat_Latn""", """Cebuano""": """ceb_Latn""", """Czech""": """ces_Latn""", """Chokwe""": """cjk_Latn""", """Central Kurdish""": """ckb_Arab""", """Crimean Tatar""": """crh_Latn""", """Welsh""": """cym_Latn""", """Danish""": """dan_Latn""", """German""": """deu_Latn""", """Southwestern Dinka""": """dik_Latn""", """Dyula""": """dyu_Latn""", """Dzongkha""": """dzo_Tibt""", """Greek""": """ell_Grek""", """English""": """eng_Latn""", """Esperanto""": """epo_Latn""", """Estonian""": """est_Latn""", """Basque""": """eus_Latn""", """Ewe""": """ewe_Latn""", """Faroese""": """fao_Latn""", """Fijian""": """fij_Latn""", """Finnish""": """fin_Latn""", """Fon""": """fon_Latn""", """French""": """fra_Latn""", """Friulian""": """fur_Latn""", """Nigerian Fulfulde""": """fuv_Latn""", """Scottish Gaelic""": """gla_Latn""", """Irish""": """gle_Latn""", """Galician""": """glg_Latn""", """Guarani""": """grn_Latn""", """Gujarati""": """guj_Gujr""", """Haitian Creole""": """hat_Latn""", """Hausa""": """hau_Latn""", """Hebrew""": """heb_Hebr""", """Hindi""": """hin_Deva""", """Chhattisgarhi""": """hne_Deva""", """Croatian""": """hrv_Latn""", """Hungarian""": """hun_Latn""", """Armenian""": """hye_Armn""", """Igbo""": """ibo_Latn""", """Ilocano""": """ilo_Latn""", """Indonesian""": """ind_Latn""", """Icelandic""": """isl_Latn""", """Italian""": """ita_Latn""", """Javanese""": """jav_Latn""", """Japanese""": """jpn_Jpan""", """Kabyle""": """kab_Latn""", """Jingpho""": """kac_Latn""", """Kamba""": """kam_Latn""", """Kannada""": """kan_Knda""", """Kashmiri Arabic""": """kas_Arab""", """Kashmiri Devanagari""": """kas_Deva""", """Georgian""": """kat_Geor""", """Central Kanuri Arabic""": """knc_Arab""", """Central Kanuri Latin""": """knc_Latn""", """Kazakh""": """kaz_Cyrl""", """Kabiyè""": """kbp_Latn""", """Kabuverdianu""": """kea_Latn""", """Khmer""": """khm_Khmr""", """Kikuyu""": """kik_Latn""", """Kinyarwanda""": """kin_Latn""", """Kyrgyz""": """kir_Cyrl""", """Kimbundu""": """kmb_Latn""", """Northern Kurdish""": """kmr_Latn""", """Kikongo""": """kon_Latn""", """Korean""": """kor_Hang""", """Lao""": """lao_Laoo""", """Ligurian""": """lij_Latn""", """Limburgish""": """lim_Latn""", """Lingala""": """lin_Latn""", """Lithuanian""": """lit_Latn""", """Lombard""": """lmo_Latn""", """Latgalian""": """ltg_Latn""", """Luxembourgish""": """ltz_Latn""", """Luba-Kasai""": """lua_Latn""", """Ganda""": """lug_Latn""", """Luo""": """luo_Latn""", """Mizo""": """lus_Latn""", """Standard Latvian""": """lvs_Latn""", """Magahi""": """mag_Deva""", """Maithili""": """mai_Deva""", """Malayalam""": """mal_Mlym""", """Marathi""": """mar_Deva""", """Minangkabau Arabic """: """min_Arab""", """Minangkabau Latin""": """min_Latn""", """Macedonian""": """mkd_Cyrl""", """Plateau Malagasy""": """plt_Latn""", """Maltese""": """mlt_Latn""", """Meitei Bengali""": """mni_Beng""", """Halh Mongolian""": """khk_Cyrl""", """Mossi""": """mos_Latn""", """Maori""": """mri_Latn""", """Burmese""": """mya_Mymr""", """Dutch""": """nld_Latn""", """Norwegian Nynorsk""": """nno_Latn""", """Norwegian Bokmål""": """nob_Latn""", """Nepali""": """npi_Deva""", """Northern Sotho""": """nso_Latn""", """Nuer""": """nus_Latn""", """Nyanja""": """nya_Latn""", """Occitan""": """oci_Latn""", """West Central Oromo""": """gaz_Latn""", """Odia""": """ory_Orya""", """Pangasinan""": """pag_Latn""", """Eastern Panjabi""": """pan_Guru""", """Papiamento""": """pap_Latn""", """Western Persian""": """pes_Arab""", """Polish""": """pol_Latn""", """Portuguese""": """por_Latn""", """Dari""": """prs_Arab""", """Southern Pashto""": """pbt_Arab""", """Ayacucho Quechua""": """quy_Latn""", """Romanian""": """ron_Latn""", """Rundi""": """run_Latn""", """Russian""": """rus_Cyrl""", """Sango""": """sag_Latn""", """Sanskrit""": """san_Deva""", """Santali""": """sat_Olck""", """Sicilian""": """scn_Latn""", """Shan""": """shn_Mymr""", """Sinhala""": """sin_Sinh""", """Slovak""": """slk_Latn""", """Slovenian""": """slv_Latn""", """Samoan""": """smo_Latn""", """Shona""": """sna_Latn""", """Sindhi""": """snd_Arab""", """Somali""": """som_Latn""", """Southern Sotho""": """sot_Latn""", """Spanish""": """spa_Latn""", """Tosk Albanian""": """als_Latn""", """Sardinian""": """srd_Latn""", """Serbian""": """srp_Cyrl""", """Swati""": """ssw_Latn""", """Sundanese""": """sun_Latn""", """Swedish""": """swe_Latn""", """Swahili""": """swh_Latn""", """Silesian""": """szl_Latn""", """Tamil""": """tam_Taml""", """Tatar""": """tat_Cyrl""", """Telugu""": """tel_Telu""", """Tajik""": """tgk_Cyrl""", """Tagalog""": """tgl_Latn""", """Thai""": """tha_Thai""", """Tigrinya""": """tir_Ethi""", """Tamasheq Latin""": """taq_Latn""", """Tamasheq Tifinagh""": """taq_Tfng""", """Tok Pisin""": """tpi_Latn""", """Tswana""": """tsn_Latn""", """Tsonga""": """tso_Latn""", """Turkmen""": """tuk_Latn""", """Tumbuka""": """tum_Latn""", """Turkish""": """tur_Latn""", """Twi""": """twi_Latn""", """Central Atlas Tamazight""": """tzm_Tfng""", """Uyghur""": """uig_Arab""", """Ukrainian""": """ukr_Cyrl""", """Umbundu""": """umb_Latn""", """Urdu""": """urd_Arab""", """Northern Uzbek""": """uzn_Latn""", """Venetian""": """vec_Latn""", """Vietnamese""": """vie_Latn""", """Waray""": """war_Latn""", """Wolof""": """wol_Latn""", """Xhosa""": """xho_Latn""", """Eastern Yiddish""": """ydd_Hebr""", """Yoruba""": """yor_Latn""", """Yue Chinese""": """yue_Hant""", """Chinese Simplified""": """zho_Hans""", """Chinese Traditional""": """zho_Hant""", """Standard Malay""": """zsm_Latn""", """Zulu""": """zul_Latn""", } class _A ( _a ): """simple docstring""" UpperCAmelCase : Dict = """facebook/nllb-200-distilled-600M""" UpperCAmelCase : Dict = ( """This is a tool that translates text from a language to another. It takes three inputs: `text`, which should """ """be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, """ """which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in """ """plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`.""" ) UpperCAmelCase : Any = """translator""" UpperCAmelCase : Optional[int] = AutoTokenizer UpperCAmelCase : str = AutoModelForSeqaSeqLM UpperCAmelCase : List[Any] = LANGUAGE_CODES UpperCAmelCase : Tuple = ["""text""", """text""", """text"""] UpperCAmelCase : List[str] = ["""text"""] def __snake_case ( self : int , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Any): if src_lang not in self.lang_to_code: raise ValueError(f'''{src_lang} is not a supported language.''') if tgt_lang not in self.lang_to_code: raise ValueError(f'''{tgt_lang} is not a supported language.''') a : Dict = self.lang_to_code[src_lang] a : Any = self.lang_to_code[tgt_lang] return self.pre_processor._build_translation_inputs( __UpperCAmelCase , return_tensors="pt" , src_lang=__UpperCAmelCase , tgt_lang=__UpperCAmelCase) def __snake_case ( self : List[Any] , __UpperCAmelCase : int): return self.model.generate(**__UpperCAmelCase) def __snake_case ( self : int , __UpperCAmelCase : Union[str, Any]): return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=__UpperCAmelCase)
40
"""simple docstring""" from __future__ import annotations class _A : """simple docstring""" def __init__( self : List[str] , __UpperCAmelCase : int = 0): a : Tuple = key def __snake_case ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : int): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) a : Dict = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(__UpperCAmelCase) ^ key) for ch in content] def __snake_case ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : int): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) a : Optional[Any] = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(__UpperCAmelCase) ^ key) for ch in content] def __snake_case ( self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : int = 0): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) a : List[Any] = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned a : Any = "" for ch in content: ans += chr(ord(__UpperCAmelCase) ^ key) return ans def __snake_case ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : int = 0): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) a : Dict = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned a : str = "" for ch in content: ans += chr(ord(__UpperCAmelCase) ^ key) return ans def __snake_case ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : int = 0): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) try: with open(__UpperCAmelCase) as fin, open("encrypt.out" , "w+") as fout: # actual encrypt-process for line in fin: fout.write(self.encrypt_string(__UpperCAmelCase , __UpperCAmelCase)) except OSError: return False return True def __snake_case ( self : Any , __UpperCAmelCase : str , __UpperCAmelCase : int): assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase) try: with open(__UpperCAmelCase) as fin, open("decrypt.out" , "w+") as fout: # actual encrypt-process for line in fin: fout.write(self.decrypt_string(__UpperCAmelCase , __UpperCAmelCase)) except OSError: return False return True # Tests # crypt = XORCipher() # key = 67 # # test encrypt # print(crypt.encrypt("hallo welt",key)) # # test decrypt # print(crypt.decrypt(crypt.encrypt("hallo welt",key), key)) # # test encrypt_string # print(crypt.encrypt_string("hallo welt",key)) # # test decrypt_string # print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key)) # if (crypt.encrypt_file("test.txt",key)): # print("encrypt successful") # else: # print("encrypt unsuccessful") # if (crypt.decrypt_file("encrypt.out",key)): # print("decrypt successful") # else: # print("decrypt unsuccessful")
40
1
"""simple docstring""" import itertools import math def lowercase ( A_ )-> bool: '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(A_ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def lowercase ( )-> Tuple: '''simple docstring''' a : Tuple = 2 while True: if is_prime(A_ ): yield num num += 1 def lowercase ( A_ = 10_001 )-> int: '''simple docstring''' return next(itertools.islice(prime_generator() , nth - 1 , A_ ) ) if __name__ == "__main__": print(f'''{solution() = }''')
40
"""simple docstring""" import pytest from datasets import inspect_metric, list_metrics, load_metric @pytest.fixture def lowercase ( A_ )-> List[Any]: '''simple docstring''' monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() ) @pytest.fixture def lowercase ( A_ )-> Tuple: '''simple docstring''' class _A : """simple docstring""" def __init__( self : str , __UpperCAmelCase : int): a : List[Any] = metric_id class _A : """simple docstring""" UpperCAmelCase : Union[str, Any] = [MetricMock(_a ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]] def __snake_case ( self : List[str]): return self._metrics monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() ) @pytest.mark.parametrize( "func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] ) def lowercase ( A_ , A_ , A_ , A_ , A_ )-> Any: '''simple docstring''' if "tmp_path" in args: a : Union[str, Any] = tuple(arg if arg != "tmp_path" else tmp_path for arg in args ) with pytest.warns(A_ , match="https://huggingface.co/docs/evaluate" ): func(*A_ )
40
1
"""simple docstring""" class _A : """simple docstring""" def __init__( self : Optional[Any] , __UpperCAmelCase : int): a : Tuple = n a : int = [None] * self.n a : Union[str, Any] = 0 # index of the first element a : Union[str, Any] = 0 a : int = 0 def __len__( self : Optional[Any]): return self.size def __snake_case ( self : Optional[int]): return self.size == 0 def __snake_case ( self : str): return False if self.is_empty() else self.array[self.front] def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : Dict): if self.size >= self.n: raise Exception("QUEUE IS FULL") a : Union[str, Any] = data a : List[str] = (self.rear + 1) % self.n self.size += 1 return self def __snake_case ( self : List[str]): if self.size == 0: raise Exception("UNDERFLOW") a : Union[str, Any] = self.array[self.front] a : Any = None a : List[Any] = (self.front + 1) % self.n self.size -= 1 return temp
40
"""simple docstring""" from __future__ import annotations from PIL import Image # Define glider example __lowercase = [ [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], ] # Define blinker example __lowercase = [[0, 1, 0], [0, 1, 0], [0, 1, 0]] def lowercase ( A_ )-> list[list[int]]: '''simple docstring''' a : str = [] for i in range(len(A_ ) ): a : str = [] for j in range(len(cells[i] ) ): # Get the number of live neighbours a : Union[str, Any] = 0 if i > 0 and j > 0: neighbour_count += cells[i - 1][j - 1] if i > 0: neighbour_count += cells[i - 1][j] if i > 0 and j < len(cells[i] ) - 1: neighbour_count += cells[i - 1][j + 1] if j > 0: neighbour_count += cells[i][j - 1] if j < len(cells[i] ) - 1: neighbour_count += cells[i][j + 1] if i < len(A_ ) - 1 and j > 0: neighbour_count += cells[i + 1][j - 1] if i < len(A_ ) - 1: neighbour_count += cells[i + 1][j] if i < len(A_ ) - 1 and j < len(cells[i] ) - 1: neighbour_count += cells[i + 1][j + 1] # Rules of the game of life (excerpt from Wikipedia): # 1. Any live cell with two or three live neighbours survives. # 2. Any dead cell with three live neighbours becomes a live cell. # 3. All other live cells die in the next generation. # Similarly, all other dead cells stay dead. a : Tuple = cells[i][j] == 1 if ( (alive and 2 <= neighbour_count <= 3) or not alive and neighbour_count == 3 ): next_generation_row.append(1 ) else: next_generation_row.append(0 ) next_generation.append(A_ ) return next_generation def lowercase ( A_ , A_ )-> list[Image.Image]: '''simple docstring''' a : List[str] = [] for _ in range(A_ ): # Create output image a : str = Image.new("RGB" , (len(cells[0] ), len(A_ )) ) a : Union[str, Any] = img.load() # Save cells to image for x in range(len(A_ ) ): for y in range(len(cells[0] ) ): a : Optional[Any] = 255 - cells[y][x] * 255 a : str = (colour, colour, colour) # Save image images.append(A_ ) a : Tuple = new_generation(A_ ) return images if __name__ == "__main__": __lowercase = generate_images(GLIDER, 16) images[0].save("""out.gif""", save_all=True, append_images=images[1:])
40
1
"""simple docstring""" __lowercase = { """Pillow""": """Pillow<10.0.0""", """accelerate""": """accelerate>=0.20.3""", """av""": """av==9.2.0""", """beautifulsoup4""": """beautifulsoup4""", """black""": """black~=23.1""", """codecarbon""": """codecarbon==1.2.0""", """cookiecutter""": """cookiecutter==1.7.3""", """dataclasses""": """dataclasses""", """datasets""": """datasets!=2.5.0""", """decord""": """decord==0.6.0""", """deepspeed""": """deepspeed>=0.9.3""", """diffusers""": """diffusers""", """dill""": """dill<0.3.5""", """evaluate""": """evaluate>=0.2.0""", """fairscale""": """fairscale>0.3""", """faiss-cpu""": """faiss-cpu""", """fastapi""": """fastapi""", """filelock""": """filelock""", """flax""": """flax>=0.4.1,<=0.7.0""", """ftfy""": """ftfy""", """fugashi""": """fugashi>=1.0""", """GitPython""": """GitPython<3.1.19""", """hf-doc-builder""": """hf-doc-builder>=0.3.0""", """huggingface-hub""": """huggingface-hub>=0.14.1,<1.0""", """importlib_metadata""": """importlib_metadata""", """ipadic""": """ipadic>=1.0.0,<2.0""", """isort""": """isort>=5.5.4""", """jax""": """jax>=0.2.8,!=0.3.2,<=0.4.13""", """jaxlib""": """jaxlib>=0.1.65,<=0.4.13""", """jieba""": """jieba""", """kenlm""": """kenlm""", """keras-nlp""": """keras-nlp>=0.3.1""", """librosa""": """librosa""", """nltk""": """nltk""", """natten""": """natten>=0.14.6""", """numpy""": """numpy>=1.17""", """onnxconverter-common""": """onnxconverter-common""", """onnxruntime-tools""": """onnxruntime-tools>=1.4.2""", """onnxruntime""": """onnxruntime>=1.4.0""", """opencv-python""": """opencv-python""", """optuna""": """optuna""", """optax""": """optax>=0.0.8,<=0.1.4""", """packaging""": """packaging>=20.0""", """parameterized""": """parameterized""", """phonemizer""": """phonemizer""", """protobuf""": """protobuf""", """psutil""": """psutil""", """pyyaml""": """pyyaml>=5.1""", """pydantic""": """pydantic<2""", """pytest""": """pytest>=7.2.0""", """pytest-timeout""": """pytest-timeout""", """pytest-xdist""": """pytest-xdist""", """python""": """python>=3.8.0""", """ray[tune]""": """ray[tune]""", """regex""": """regex!=2019.12.17""", """requests""": """requests""", """rhoknp""": """rhoknp>=1.1.0,<1.3.1""", """rjieba""": """rjieba""", """rouge-score""": """rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1""", """ruff""": """ruff>=0.0.241,<=0.0.259""", """sacrebleu""": """sacrebleu>=1.4.12,<2.0.0""", """sacremoses""": """sacremoses""", """safetensors""": """safetensors>=0.3.1""", """sagemaker""": """sagemaker>=2.31.0""", """scikit-learn""": """scikit-learn""", """sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""", """sigopt""": """sigopt""", """starlette""": """starlette""", """sudachipy""": """sudachipy>=0.6.6""", """sudachidict_core""": """sudachidict_core>=20220729""", """tensorflow-cpu""": """tensorflow-cpu>=2.6,<2.14""", """tensorflow""": """tensorflow>=2.6,<2.14""", """tensorflow-text""": """tensorflow-text<2.14""", """tf2onnx""": """tf2onnx""", """timeout-decorator""": """timeout-decorator""", """timm""": """timm""", """tokenizers""": """tokenizers>=0.11.1,!=0.11.3,<0.14""", """torch""": """torch>=1.9,!=1.12.0""", """torchaudio""": """torchaudio""", """torchvision""": """torchvision""", """pyctcdecode""": """pyctcdecode>=0.4.0""", """tqdm""": """tqdm>=4.27""", """unidic""": """unidic>=1.0.2""", """unidic_lite""": """unidic_lite>=1.0.7""", """urllib3""": """urllib3<2.0.0""", """uvicorn""": """uvicorn""", }
40
"""simple docstring""" from itertools import permutations def lowercase ( A_ )-> bool: '''simple docstring''' if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False a : Optional[int] = [7, 11, 13, 17] for i, test in enumerate(A_ ): if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def lowercase ( A_ = 10 )-> int: '''simple docstring''' return sum( int("".join(map(A_ , A_ ) ) ) for num in permutations(range(A_ ) ) if is_substring_divisible(A_ ) ) if __name__ == "__main__": print(f'''{solution() = }''')
40
1
"""simple docstring""" def lowercase ( A_ , A_ , A_ , A_ )-> List[Any]: '''simple docstring''' a : List[Any] = [False] * len(A_ ) a : int = [] queue.append(A_ ) a : int = True while queue: a : List[str] = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(A_ ) a : Dict = True a : Optional[int] = u return visited[t] def lowercase ( A_ , A_ , A_ )-> str: '''simple docstring''' a : int = [-1] * (len(A_ )) a : List[Any] = 0 while bfs(A_ , A_ , A_ , A_ ): a : Tuple = float("Inf" ) a : List[str] = sink while s != source: # Find the minimum value in select path a : List[Any] = min(A_ , graph[parent[s]][s] ) a : str = parent[s] max_flow += path_flow a : Optional[Any] = sink while v != source: a : List[Any] = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow a : str = parent[v] return max_flow __lowercase = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] __lowercase , __lowercase = 0, 5 print(ford_fulkerson(graph, source, sink))
40
"""simple docstring""" import gc import random import unittest import numpy as np import torch from diffusers import ( DDIMScheduler, KandinskyVaaControlnetPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _A ( _a ,unittest.TestCase ): """simple docstring""" UpperCAmelCase : Dict = KandinskyVaaControlnetPipeline UpperCAmelCase : List[str] = ["""image_embeds""", """negative_image_embeds""", """hint"""] UpperCAmelCase : Optional[Any] = ["""image_embeds""", """negative_image_embeds""", """hint"""] UpperCAmelCase : Dict = [ """generator""", """height""", """width""", """latents""", """guidance_scale""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] UpperCAmelCase : Optional[int] = False @property def __snake_case ( self : Optional[Any]): return 32 @property def __snake_case ( self : Dict): return 32 @property def __snake_case ( self : Dict): return self.time_input_dim @property def __snake_case ( self : Any): return self.time_input_dim * 4 @property def __snake_case ( self : str): return 100 @property def __snake_case ( self : str): torch.manual_seed(0) a : str = { "in_channels": 8, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image_hint", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } a : Dict = UNetaDConditionModel(**__UpperCAmelCase) return model @property def __snake_case ( self : str): return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def __snake_case ( self : Union[str, Any]): torch.manual_seed(0) a : Dict = VQModel(**self.dummy_movq_kwargs) return model def __snake_case ( self : Optional[Any]): a : Optional[Any] = self.dummy_unet a : int = self.dummy_movq a : str = DDIMScheduler( num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , steps_offset=1 , prediction_type="epsilon" , thresholding=__UpperCAmelCase , ) a : Optional[Any] = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def __snake_case ( self : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int=0): a : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase) a : List[str] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to( __UpperCAmelCase) # create hint a : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase) if str(__UpperCAmelCase).startswith("mps"): a : Union[str, Any] = torch.manual_seed(__UpperCAmelCase) else: a : List[Any] = torch.Generator(device=__UpperCAmelCase).manual_seed(__UpperCAmelCase) a : str = { "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "hint": hint, "generator": generator, "height": 64, "width": 64, "guidance_scale": 4.0, "num_inference_steps": 2, "output_type": "np", } return inputs def __snake_case ( self : Dict): a : str = "cpu" a : Tuple = self.get_dummy_components() a : Dict = self.pipeline_class(**__UpperCAmelCase) a : Optional[int] = pipe.to(__UpperCAmelCase) pipe.set_progress_bar_config(disable=__UpperCAmelCase) a : Optional[Any] = pipe(**self.get_dummy_inputs(__UpperCAmelCase)) a : Any = output.images a : Any = pipe( **self.get_dummy_inputs(__UpperCAmelCase) , return_dict=__UpperCAmelCase , )[0] a : Union[str, Any] = image[0, -3:, -3:, -1] a : int = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) a : Tuple = np.array( [0.6_959_826, 0.868_279, 0.7_558_092, 0.68_769_467, 0.85_805_804, 0.65_977_496, 0.44_885_302, 0.5_959_111, 0.4_251_595]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : Optional[int]): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __snake_case ( self : List[str]): a : List[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy") a : Union[str, Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/hint_image_cat.png") a : List[Any] = torch.from_numpy(np.array(__UpperCAmelCase)).float() / 255.0 a : str = hint.permute(2 , 0 , 1).unsqueeze(0) a : Optional[int] = KandinskyVaaPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa) pipe_prior.to(__UpperCAmelCase) a : List[str] = KandinskyVaaControlnetPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa) a : int = pipeline.to(__UpperCAmelCase) pipeline.set_progress_bar_config(disable=__UpperCAmelCase) a : Tuple = "A robot, 4k photo" a : Any = torch.Generator(device="cuda").manual_seed(0) a , a : int = pipe_prior( __UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple() a : str = torch.Generator(device="cuda").manual_seed(0) a : Union[str, Any] = pipeline( image_embeds=__UpperCAmelCase , negative_image_embeds=__UpperCAmelCase , hint=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=100 , output_type="np" , ) a : str = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase)
40
1
"""simple docstring""" from string import ascii_uppercase __lowercase = {char: i for i, char in enumerate(ascii_uppercase)} __lowercase = dict(enumerate(ascii_uppercase)) def lowercase ( A_ , A_ )-> str: '''simple docstring''' a : List[str] = len(A_ ) a : Optional[int] = 0 while True: if x == i: a : Optional[Any] = 0 if len(A_ ) == len(A_ ): break key += key[i] i += 1 return key def lowercase ( A_ , A_ )-> str: '''simple docstring''' a : Optional[int] = "" a : Union[str, Any] = 0 for letter in message: if letter == " ": cipher_text += " " else: a : Tuple = (dicta[letter] - dicta[key_new[i]]) % 26 i += 1 cipher_text += dicta[x] return cipher_text def lowercase ( A_ , A_ )-> str: '''simple docstring''' a : Optional[Any] = "" a : int = 0 for letter in cipher_text: if letter == " ": or_txt += " " else: a : Tuple = (dicta[letter] + dicta[key_new[i]] + 26) % 26 i += 1 or_txt += dicta[x] return or_txt def lowercase ( )-> None: '''simple docstring''' a : Union[str, Any] = "THE GERMAN ATTACK" a : Any = "SECRET" a : str = generate_key(A_ , A_ ) a : Tuple = cipher_text(A_ , A_ ) print(F'''Encrypted Text = {s}''' ) print(F'''Original Text = {original_text(A_ , A_ )}''' ) if __name__ == "__main__": import doctest doctest.testmod() main()
40
"""simple docstring""" import argparse import json from pathlib import Path import torch import torchaudio from datasets import load_dataset from huggingface_hub import hf_hub_download from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification from transformers.utils import logging logging.set_verbosity_info() __lowercase = logging.get_logger(__name__) def lowercase ( A_ )-> Dict: '''simple docstring''' a : str = ASTConfig() if "10-10" in model_name: pass elif "speech-commands" in model_name: a : Union[str, Any] = 128 elif "12-12" in model_name: a : List[Any] = 12 a : str = 12 elif "14-14" in model_name: a : List[Any] = 14 a : Optional[int] = 14 elif "16-16" in model_name: a : Any = 16 a : List[Any] = 16 else: raise ValueError("Model not supported" ) a : Optional[int] = "huggingface/label-files" if "speech-commands" in model_name: a : Optional[int] = 35 a : List[str] = "speech-commands-v2-id2label.json" else: a : Optional[Any] = 527 a : Tuple = "audioset-id2label.json" a : List[str] = json.load(open(hf_hub_download(A_ , A_ , repo_type="dataset" ) , "r" ) ) a : Union[str, Any] = {int(A_ ): v for k, v in idalabel.items()} a : Any = idalabel a : str = {v: k for k, v in idalabel.items()} return config def lowercase ( A_ )-> Tuple: '''simple docstring''' if "module.v" in name: a : Union[str, Any] = name.replace("module.v" , "audio_spectrogram_transformer" ) if "cls_token" in name: a : List[Any] = name.replace("cls_token" , "embeddings.cls_token" ) if "dist_token" in name: a : Union[str, Any] = name.replace("dist_token" , "embeddings.distillation_token" ) if "pos_embed" in name: a : str = name.replace("pos_embed" , "embeddings.position_embeddings" ) if "patch_embed.proj" in name: a : Union[str, Any] = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) # transformer blocks if "blocks" in name: a : Union[str, Any] = name.replace("blocks" , "encoder.layer" ) if "attn.proj" in name: a : str = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: a : Tuple = name.replace("attn" , "attention.self" ) if "norm1" in name: a : int = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: a : Union[str, Any] = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: a : Union[str, Any] = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: a : Optional[Any] = name.replace("mlp.fc2" , "output.dense" ) # final layernorm if "audio_spectrogram_transformer.norm" in name: a : Tuple = name.replace("audio_spectrogram_transformer.norm" , "audio_spectrogram_transformer.layernorm" ) # classifier head if "module.mlp_head.0" in name: a : List[str] = name.replace("module.mlp_head.0" , "classifier.layernorm" ) if "module.mlp_head.1" in name: a : Optional[int] = name.replace("module.mlp_head.1" , "classifier.dense" ) return name def lowercase ( A_ , A_ )-> Any: '''simple docstring''' for key in orig_state_dict.copy().keys(): a : str = orig_state_dict.pop(A_ ) if "qkv" in key: a : int = key.split("." ) a : Optional[int] = int(key_split[3] ) a : int = config.hidden_size if "weight" in key: a : List[str] = val[:dim, :] a : Any = val[dim : dim * 2, :] a : int = val[-dim:, :] else: a : Optional[Any] = val[:dim] a : Union[str, Any] = val[dim : dim * 2] a : str = val[-dim:] else: a : str = val return orig_state_dict def lowercase ( A_ )-> Dict: '''simple docstring''' a : Union[str, Any] = [ "module.v.head.weight", "module.v.head.bias", "module.v.head_dist.weight", "module.v.head_dist.bias", ] for k in ignore_keys: state_dict.pop(A_ , A_ ) @torch.no_grad() def lowercase ( A_ , A_ , A_=False )-> Optional[int]: '''simple docstring''' a : Optional[int] = get_audio_spectrogram_transformer_config(A_ ) a : Dict = { "ast-finetuned-audioset-10-10-0.4593": ( "https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.450": ( "https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.448": ( "https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1" ), "ast-finetuned-audioset-10-10-0.448-v2": ( "https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1" ), "ast-finetuned-audioset-12-12-0.447": ( "https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1" ), "ast-finetuned-audioset-14-14-0.443": ( "https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1" ), "ast-finetuned-audioset-16-16-0.442": ( "https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1" ), "ast-finetuned-speech-commands-v2": ( "https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1" ), } # load original state_dict a : Any = model_name_to_url[model_name] a : List[Any] = torch.hub.load_state_dict_from_url(A_ , map_location="cpu" ) # remove some keys remove_keys(A_ ) # rename some keys a : Union[str, Any] = convert_state_dict(A_ , A_ ) # load 🤗 model a : List[str] = ASTForAudioClassification(A_ ) model.eval() model.load_state_dict(A_ ) # verify outputs on dummy input # source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62 a : Tuple = -4.2_6_7_7_3_9_3 if "speech-commands" not in model_name else -6.8_4_5_9_7_8 a : Union[str, Any] = 4.5_6_8_9_9_7_4 if "speech-commands" not in model_name else 5.5_6_5_4_5_2_6 a : str = 1_024 if "speech-commands" not in model_name else 128 a : List[Any] = ASTFeatureExtractor(mean=A_ , std=A_ , max_length=A_ ) if "speech-commands" in model_name: a : List[str] = load_dataset("speech_commands" , "v0.02" , split="validation" ) a : int = dataset[0]["audio"]["array"] else: a : Tuple = hf_hub_download( repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" , ) a , a : Tuple = torchaudio.load(A_ ) a : Optional[Any] = waveform.squeeze().numpy() a : Union[str, Any] = feature_extractor(A_ , sampling_rate=16_000 , return_tensors="pt" ) # forward pass a : Optional[Any] = model(**A_ ) a : List[str] = outputs.logits if model_name == "ast-finetuned-audioset-10-10-0.4593": a : Any = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] ) elif model_name == "ast-finetuned-audioset-10-10-0.450": a : Optional[int] = torch.tensor([-1.1_9_8_6, -7.0_9_0_3, -8.2_7_1_8] ) elif model_name == "ast-finetuned-audioset-10-10-0.448": a : List[str] = torch.tensor([-2.6_1_2_8, -8.0_0_8_0, -9.4_3_4_4] ) elif model_name == "ast-finetuned-audioset-10-10-0.448-v2": a : Tuple = torch.tensor([-1.5_0_8_0, -7.4_5_3_4, -8.8_9_1_7] ) elif model_name == "ast-finetuned-audioset-12-12-0.447": a : int = torch.tensor([-0.5_0_5_0, -6.5_8_3_3, -8.0_8_4_3] ) elif model_name == "ast-finetuned-audioset-14-14-0.443": a : Any = torch.tensor([-0.3_8_2_6, -7.0_3_3_6, -8.2_4_1_3] ) elif model_name == "ast-finetuned-audioset-16-16-0.442": a : Dict = torch.tensor([-1.2_1_1_3, -6.9_1_0_1, -8.3_4_7_0] ) elif model_name == "ast-finetuned-speech-commands-v2": a : Union[str, Any] = torch.tensor([6.1_5_8_9, -8.0_5_6_6, -8.7_9_8_4] ) else: raise ValueError("Unknown model name" ) if not torch.allclose(logits[0, :3] , A_ , atol=1e-4 ): raise ValueError("Logits don't match" ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(A_ ).mkdir(exist_ok=A_ ) print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(A_ ) print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' ) feature_extractor.save_pretrained(A_ ) if push_to_hub: print("Pushing model and feature extractor to the hub..." ) model.push_to_hub(F'''MIT/{model_name}''' ) feature_extractor.push_to_hub(F'''MIT/{model_name}''' ) if __name__ == "__main__": __lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""ast-finetuned-audioset-10-10-0.4593""", type=str, help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) __lowercase = parser.parse_args() convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
40
1
"""simple docstring""" class _A : """simple docstring""" def __init__( self : List[Any] , __UpperCAmelCase : Optional[Any]): a : Union[str, Any] = val a : str = None a : Dict = None def __snake_case ( self : Tuple , __UpperCAmelCase : List[Any]): if self.val: if val < self.val: if self.left is None: a : Tuple = Node(__UpperCAmelCase) else: self.left.insert(__UpperCAmelCase) elif val > self.val: if self.right is None: a : Union[str, Any] = Node(__UpperCAmelCase) else: self.right.insert(__UpperCAmelCase) else: a : Tuple = val def lowercase ( A_ , A_ )-> Any: '''simple docstring''' if root: inorder(root.left , A_ ) res.append(root.val ) inorder(root.right , A_ ) def lowercase ( A_ )-> Any: '''simple docstring''' if len(A_ ) == 0: return arr a : Optional[int] = Node(arr[0] ) for i in range(1 , len(A_ ) ): root.insert(arr[i] ) # Traverse BST in order. a : Optional[Any] = [] inorder(A_ , A_ ) return res if __name__ == "__main__": print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
40
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __lowercase = { """configuration_rag""": ["""RagConfig"""], """retrieval_rag""": ["""RagRetriever"""], """tokenization_rag""": ["""RagTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """RagModel""", """RagPreTrainedModel""", """RagSequenceForGeneration""", """RagTokenForGeneration""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """TFRagModel""", """TFRagPreTrainedModel""", """TFRagSequenceForGeneration""", """TFRagTokenForGeneration""", ] if TYPE_CHECKING: from .configuration_rag import RagConfig from .retrieval_rag import RagRetriever from .tokenization_rag import RagTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rag import ( TFRagModel, TFRagPreTrainedModel, TFRagSequenceForGeneration, TFRagTokenForGeneration, ) else: import sys __lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
40
1