code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
import copy from dataclasses import dataclass from pathlib import Path from typing import Dict, Optional, Union @dataclass class lowerCamelCase_ : a__ = None a__ = False a__ = False a__ = False a__ = None a__ = None a__ = False a__ = False a__ = False a__ = True a__ = None a__ = 1 a__ = None a__ = False a__ = None a__ = None def A ( self ): """simple docstring""" return self.__class__(**{k: copy.deepcopy(__lowerCAmelCase ) for k, v in self.__dict__.items()} )
0
from dataclasses import dataclass from enum import Enum from typing import List, Optional, Union import numpy as np import PIL from PIL import Image from ...utils import BaseOutput, is_torch_available, is_transformers_available @dataclass class A_ ( SCREAMING_SNAKE_CASE ): _UpperCAmelCase : Union[List[PIL.Image.Image], np.ndarray] _UpperCAmelCase : Optional[List[bool]] if is_transformers_available() and is_torch_available(): from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
652
0
import numpy as np from nltk.translate import meteor_score import datasets from datasets.config import importlib_metadata, version __snake_case = version.parse(importlib_metadata.version('''nltk''')) if NLTK_VERSION >= version.Version('''3.6.4'''): from nltk import word_tokenize __snake_case = '''\ @inproceedings{banarjee2005, title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments}, author = {Banerjee, Satanjeev and Lavie, Alon}, booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization}, month = jun, year = {2005}, address = {Ann Arbor, Michigan}, publisher = {Association for Computational Linguistics}, url = {https://www.aclweb.org/anthology/W05-0909}, pages = {65--72}, } ''' __snake_case = '''\ METEOR, an automatic metric for machine translation evaluation that is based on a generalized concept of unigram matching between the machine-produced translation and human-produced reference translations. Unigrams can be matched based on their surface forms, stemmed forms, and meanings; furthermore, METEOR can be easily extended to include more advanced matching strategies. Once all generalized unigram matches between the two strings have been found, METEOR computes a score for this matching using a combination of unigram-precision, unigram-recall, and a measure of fragmentation that is designed to directly capture how well-ordered the matched words in the machine translation are in relation to the reference. METEOR gets an R correlation value of 0.347 with human evaluation on the Arabic data and 0.331 on the Chinese data. This is shown to be an improvement on using simply unigram-precision, unigram-recall and their harmonic F1 combination. ''' __snake_case = ''' Computes METEOR score of translated segments against one or more references. Args: predictions: list of predictions to score. Each prediction should be a string with tokens separated by spaces. references: list of reference for each prediction. Each reference should be a string with tokens separated by spaces. alpha: Parameter for controlling relative weights of precision and recall. default: 0.9 beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3 gamma: Relative weight assigned to fragmentation penalty. default: 0.5 Returns: \'meteor\': meteor score. Examples: >>> meteor = datasets.load_metric(\'meteor\') >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"] >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"] >>> results = meteor.compute(predictions=predictions, references=references) >>> print(round(results["meteor"], 4)) 0.6944 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowerCamelCase (datasets.Metric ): def snake_case_ ( self: List[str] ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features( { 'predictions': datasets.Value('string',id='sequence' ), 'references': datasets.Value('string',id='sequence' ), } ),codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'],reference_urls=[ 'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score', 'https://en.wikipedia.org/wiki/METEOR', ],) def snake_case_ ( self: int,A_: str ): '''simple docstring''' import nltk nltk.download('wordnet' ) if NLTK_VERSION >= version.Version('3.6.5' ): nltk.download('punkt' ) if NLTK_VERSION >= version.Version('3.6.6' ): nltk.download('omw-1.4' ) def snake_case_ ( self: str,A_: Any,A_: Optional[Any],A_: Any=0.9,A_: int=3,A_: Optional[Any]=0.5 ): '''simple docstring''' if NLTK_VERSION >= version.Version('3.6.5' ): __UpperCamelCase = [ meteor_score.single_meteor_score( word_tokenize(A_ ),word_tokenize(A_ ),alpha=A_,beta=A_,gamma=A_ ) for ref, pred in zip(A_,A_ ) ] else: __UpperCamelCase = [ meteor_score.single_meteor_score(A_,A_,alpha=A_,beta=A_,gamma=A_ ) for ref, pred in zip(A_,A_ ) ] return {"meteor": np.mean(A_ )}
1
# NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from ...utils import deprecate from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401 from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401 deprecate( """stable diffusion controlnet""", """0.22.0""", """Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""", standard_warn=False, stacklevel=3, )
652
0
import argparse import json import os import torch from transformers.file_utils import has_file from diffusers import UNetaDConditionModel, UNetaDModel UpperCAmelCase_ = False UpperCAmelCase_ = True UpperCAmelCase_ = False if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument( """--repo_path""", default=None, type=str, required=True, help="""The config json file corresponding to the architecture.""", ) parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") UpperCAmelCase_ = parser.parse_args() UpperCAmelCase_ = { """image_size""": """sample_size""", """num_res_blocks""": """layers_per_block""", """block_channels""": """block_out_channels""", """down_blocks""": """down_block_types""", """up_blocks""": """up_block_types""", """downscale_freq_shift""": """freq_shift""", """resnet_num_groups""": """norm_num_groups""", """resnet_act_fn""": """act_fn""", """resnet_eps""": """norm_eps""", """num_head_channels""": """attention_head_dim""", } UpperCAmelCase_ = { """time_steps""": """time_proj""", """mid""": """mid_block""", """downsample_blocks""": """down_blocks""", """upsample_blocks""": """up_blocks""", } UpperCAmelCase_ = """""" if has_file(args.repo_path, """config.json""") else """unet""" with open(os.path.join(args.repo_path, subfolder, """config.json"""), """r""", encoding="""utf-8""") as reader: UpperCAmelCase_ = reader.read() UpperCAmelCase_ = json.loads(text) if do_only_config: for key in config_parameters_to_change.keys(): config.pop(key, None) if has_file(args.repo_path, """config.json"""): UpperCAmelCase_ = UNetaDModel(**config) else: UpperCAmelCase_ = UNetaDConditionModel if """ldm-text2im-large-256""" in args.repo_path else UNetaDModel UpperCAmelCase_ = class_name(**config) if do_only_config: model.save_config(os.path.join(args.repo_path, subfolder)) UpperCAmelCase_ = dict(model.config) if do_only_renaming: for key, value in config_parameters_to_change.items(): if key in config: UpperCAmelCase_ = config[key] del config[key] UpperCAmelCase_ = [k.replace("""UNetRes""", """""") for k in config["""down_block_types"""]] UpperCAmelCase_ = [k.replace("""UNetRes""", """""") for k in config["""up_block_types"""]] if do_only_weights: UpperCAmelCase_ = torch.load(os.path.join(args.repo_path, subfolder, """diffusion_pytorch_model.bin""")) UpperCAmelCase_ = {} for param_key, param_value in state_dict.items(): if param_key.endswith(""".op.bias""") or param_key.endswith(""".op.weight"""): continue UpperCAmelCase_ = False for key, new_key in key_parameters_to_change.items(): if not has_changed and param_key.split(""".""")[0] == key: UpperCAmelCase_ = param_value UpperCAmelCase_ = True if not has_changed: UpperCAmelCase_ = param_value model.load_state_dict(new_state_dict) model.save_pretrained(os.path.join(args.repo_path, subfolder))
2
import collections import os from typing import List, Optional, Tuple from transformers.utils import is_jieba_available, requires_backends if is_jieba_available(): import jieba from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging a =logging.get_logger(__name__) a ={"""vocab_file""": """vocab.txt"""} a ={ """vocab_file""": { """openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""", }, } a ={ """openbmb/cpm-ant-10b""": 1024, } def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple: __lowerCamelCase : int = collections.OrderedDict() with open(lowerCamelCase__ , 'r' , encoding='utf-8' ) as reader: __lowerCamelCase : Optional[int] = reader.readlines() for index, token in enumerate(lowerCamelCase__ ): __lowerCamelCase : Optional[Any] = token.rstrip('\n' ) __lowerCamelCase : Union[str, Any] = index return vocab class A_ ( SCREAMING_SNAKE_CASE ): def __init__( self : int ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Optional[int]="<unk>" ,SCREAMING_SNAKE_CASE__ : Optional[int]=2_0_0): __lowerCamelCase : str = vocab __lowerCamelCase : Dict = unk_token __lowerCamelCase : int = max_input_chars_per_word def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any]): __lowerCamelCase : int = list(SCREAMING_SNAKE_CASE__) if len(SCREAMING_SNAKE_CASE__) > self.max_input_chars_per_word: return [self.unk_token] __lowerCamelCase : Tuple = 0 __lowerCamelCase : str = [] while start < len(SCREAMING_SNAKE_CASE__): __lowerCamelCase : List[Any] = len(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Any = None while start < end: __lowerCamelCase : Any = ''.join(chars[start:end]) if substr in self.vocab: __lowerCamelCase : Optional[Any] = substr break end -= 1 if cur_substr is None: sub_tokens.append(self.unk_token) start += 1 else: sub_tokens.append(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Dict = end return sub_tokens class A_ ( SCREAMING_SNAKE_CASE ): _UpperCAmelCase : List[str] = VOCAB_FILES_NAMES _UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase : str = ['''input_ids''', '''attention_mask'''] _UpperCAmelCase : Optional[int] = False def __init__( self : Tuple ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Tuple="<d>" ,SCREAMING_SNAKE_CASE__ : Tuple="</d>" ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="<s>" ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="</s>" ,SCREAMING_SNAKE_CASE__ : str="<pad>" ,SCREAMING_SNAKE_CASE__ : List[str]="<unk>" ,SCREAMING_SNAKE_CASE__ : List[Any]="</n>" ,SCREAMING_SNAKE_CASE__ : int="</_>" ,SCREAMING_SNAKE_CASE__ : List[Any]="left" ,**SCREAMING_SNAKE_CASE__ : List[str] ,): requires_backends(self ,['jieba']) super().__init__( bod_token=SCREAMING_SNAKE_CASE__ ,eod_token=SCREAMING_SNAKE_CASE__ ,bos_token=SCREAMING_SNAKE_CASE__ ,eos_token=SCREAMING_SNAKE_CASE__ ,pad_token=SCREAMING_SNAKE_CASE__ ,unk_token=SCREAMING_SNAKE_CASE__ ,line_token=SCREAMING_SNAKE_CASE__ ,space_token=SCREAMING_SNAKE_CASE__ ,padding_side=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,) __lowerCamelCase : Optional[Any] = bod_token __lowerCamelCase : Dict = eod_token __lowerCamelCase : Any = load_vocab(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Any = self.encoder[space_token] __lowerCamelCase : Dict = self.encoder[line_token] del self.encoder[space_token] del self.encoder[line_token] __lowerCamelCase : Optional[Any] = collections.OrderedDict(sorted(self.encoder.items() ,key=lambda SCREAMING_SNAKE_CASE__: x[1])) __lowerCamelCase : int = {v: k for k, v in self.encoder.items()} __lowerCamelCase : Union[str, Any] = WordpieceTokenizer(vocab=self.encoder ,unk_token=self.unk_token) @property def lowerCAmelCase ( self : List[Any]): return self.encoder[self.bod_token] @property def lowerCAmelCase ( self : Tuple): return self.encoder[self.eod_token] @property def lowerCAmelCase ( self : Union[str, Any]): return self.encoder["\n"] @property def lowerCAmelCase ( self : str): return len(self.encoder) def lowerCAmelCase ( self : str): return dict(self.encoder ,**self.added_tokens_encoder) def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Optional[int]): __lowerCamelCase : Any = [] for x in jieba.cut(SCREAMING_SNAKE_CASE__ ,cut_all=SCREAMING_SNAKE_CASE__): output_tokens.extend(self.wordpiece_tokenizer.tokenize(SCREAMING_SNAKE_CASE__)) return output_tokens def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Tuple ,**SCREAMING_SNAKE_CASE__ : List[Any]): __lowerCamelCase : Tuple = [i for i in token_ids if i >= 0] __lowerCamelCase : str = [ x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id ] return super()._decode(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : List[Any]): return token in self.encoder def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[str]): return "".join(SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[Any]): return self.encoder.get(SCREAMING_SNAKE_CASE__ ,self.encoder.get(self.unk_token)) def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[Any]): return self.decoder.get(SCREAMING_SNAKE_CASE__ ,self.unk_token) def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[str] = None): if os.path.isdir(SCREAMING_SNAKE_CASE__): __lowerCamelCase : Any = os.path.join( SCREAMING_SNAKE_CASE__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) else: __lowerCamelCase : int = (filename_prefix + '-' if filename_prefix else '') + save_directory __lowerCamelCase : Any = 0 if " " in self.encoder: __lowerCamelCase : Any = self.encoder[' '] del self.encoder[" "] if "\n" in self.encoder: __lowerCamelCase : str = self.encoder['\n'] del self.encoder["\n"] __lowerCamelCase : str = collections.OrderedDict(sorted(self.encoder.items() ,key=lambda SCREAMING_SNAKE_CASE__: x[1])) with open(SCREAMING_SNAKE_CASE__ ,'w' ,encoding='utf-8') as writer: for token, token_index in self.encoder.items(): if index != token_index: logger.warning( F"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive." ' Please check that the vocabulary is not corrupted!') __lowerCamelCase : Any = token_index writer.write(token + '\n') index += 1 return (vocab_file,) def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : List[int] = None): if token_ids_a is None: return [self.bos_token_id] + token_ids_a return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ,SCREAMING_SNAKE_CASE__ : bool = False): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE__ ,token_ids_a=SCREAMING_SNAKE_CASE__ ,already_has_special_tokens=SCREAMING_SNAKE_CASE__) if token_ids_a is not None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) return [1] + ([0] * len(SCREAMING_SNAKE_CASE__))
652
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase : Tuple = { 'configuration_blip_2': [ 'BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Blip2Config', 'Blip2QFormerConfig', 'Blip2VisionConfig', ], 'processing_blip_2': ['Blip2Processor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : int = [ 'BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST', 'Blip2Model', 'Blip2QFormerModel', 'Blip2PreTrainedModel', 'Blip2ForConditionalGeneration', 'Blip2VisionModel', ] if TYPE_CHECKING: from .configuration_blip_a import ( BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipaConfig, BlipaQFormerConfig, BlipaVisionConfig, ) from .processing_blip_a import BlipaProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip_a import ( BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST, BlipaForConditionalGeneration, BlipaModel, BlipaPreTrainedModel, BlipaQFormerModel, BlipaVisionModel, ) else: import sys lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
3
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a ={"""configuration_mmbt""": ["""MMBTConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a =["""MMBTForClassification""", """MMBTModel""", """ModalEmbeddings"""] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys a =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
652
0
"""simple docstring""" from __future__ import annotations def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[float] ): if len(_UpperCAmelCase ) < 2: raise ValueError('Monogons and Digons are not polygons in the Euclidean space' ) if any(i <= 0 for i in nums ): raise ValueError('All values must be greater than 0' ) lowerCAmelCase = nums.copy() copy_nums.sort() return copy_nums[-1] < sum(copy_nums[:-1] ) if __name__ == "__main__": import doctest doctest.testmod()
4
import torch from diffusers import UnCLIPScheduler from .test_schedulers import SchedulerCommonTest class A_ ( SCREAMING_SNAKE_CASE ): _UpperCAmelCase : int = (UnCLIPScheduler,) def lowerCAmelCase ( self : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : List[Any]): __lowerCamelCase : Any = { 'num_train_timesteps': 1_0_0_0, 'variance_type': 'fixed_small_log', 'clip_sample': True, 'clip_sample_range': 1.0, 'prediction_type': 'epsilon', } config.update(**SCREAMING_SNAKE_CASE__) return config def lowerCAmelCase ( self : Optional[Any]): for timesteps in [1, 5, 1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Optional[Any]): for variance in ["fixed_small_log", "learned_range"]: self.check_over_configs(variance_type=SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Union[str, Any]): for clip_sample in [True, False]: self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Tuple): for clip_sample_range in [1, 5, 1_0, 2_0]: self.check_over_configs(clip_sample_range=SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : List[Any]): for prediction_type in ["epsilon", "sample"]: self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Dict): for time_step in [0, 5_0_0, 9_9_9]: for prev_timestep in [None, 5, 1_0_0, 2_5_0, 5_0_0, 7_5_0]: if prev_timestep is not None and prev_timestep >= time_step: continue self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ ,prev_timestep=SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Optional[int]): __lowerCamelCase : Optional[int] = self.scheduler_classes[0] __lowerCamelCase : Any = self.get_scheduler_config(variance_type='fixed_small_log') __lowerCamelCase : Dict = scheduler_class(**SCREAMING_SNAKE_CASE__) assert torch.sum(torch.abs(scheduler._get_variance(0) - 1.00_00E-10)) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(4_8_7) - 0.0549625)) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(9_9_9) - 0.9994987)) < 1E-5 def lowerCAmelCase ( self : Any): __lowerCamelCase : Dict = self.scheduler_classes[0] __lowerCamelCase : List[str] = self.get_scheduler_config(variance_type='learned_range') __lowerCamelCase : int = scheduler_class(**SCREAMING_SNAKE_CASE__) __lowerCamelCase : Tuple = 0.5 assert scheduler._get_variance(1 ,predicted_variance=SCREAMING_SNAKE_CASE__) - -10.1712790 < 1E-5 assert scheduler._get_variance(4_8_7 ,predicted_variance=SCREAMING_SNAKE_CASE__) - -5.7998052 < 1E-5 assert scheduler._get_variance(9_9_9 ,predicted_variance=SCREAMING_SNAKE_CASE__) - -0.0010011 < 1E-5 def lowerCAmelCase ( self : List[str]): __lowerCamelCase : str = self.scheduler_classes[0] __lowerCamelCase : str = self.get_scheduler_config() __lowerCamelCase : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[Any] = scheduler.timesteps __lowerCamelCase : Union[str, Any] = self.dummy_model() __lowerCamelCase : Optional[Any] = self.dummy_sample_deter __lowerCamelCase : List[str] = torch.manual_seed(0) for i, t in enumerate(SCREAMING_SNAKE_CASE__): # 1. predict noise residual __lowerCamelCase : int = model(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) # 2. predict previous mean of sample x_t-1 __lowerCamelCase : Optional[int] = scheduler.step(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__).prev_sample __lowerCamelCase : Optional[Any] = pred_prev_sample __lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__)) __lowerCamelCase : Tuple = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__)) assert abs(result_sum.item() - 252.2682495) < 1E-2 assert abs(result_mean.item() - 0.3284743) < 1E-3 def lowerCAmelCase ( self : str): __lowerCamelCase : str = self.scheduler_classes[0] __lowerCamelCase : List[Any] = self.get_scheduler_config() __lowerCamelCase : int = scheduler_class(**SCREAMING_SNAKE_CASE__) scheduler.set_timesteps(2_5) __lowerCamelCase : int = scheduler.timesteps __lowerCamelCase : Tuple = self.dummy_model() __lowerCamelCase : Any = self.dummy_sample_deter __lowerCamelCase : Any = torch.manual_seed(0) for i, t in enumerate(SCREAMING_SNAKE_CASE__): # 1. predict noise residual __lowerCamelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) if i + 1 == timesteps.shape[0]: __lowerCamelCase : Optional[Any] = None else: __lowerCamelCase : Union[str, Any] = timesteps[i + 1] # 2. predict previous mean of sample x_t-1 __lowerCamelCase : int = scheduler.step( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,prev_timestep=SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__).prev_sample __lowerCamelCase : Union[str, Any] = pred_prev_sample __lowerCamelCase : Tuple = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__)) __lowerCamelCase : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__)) assert abs(result_sum.item() - 258.2044983) < 1E-2 assert abs(result_mean.item() - 0.3362038) < 1E-3 def lowerCAmelCase ( self : List[Any]): pass def lowerCAmelCase ( self : Union[str, Any]): pass
652
0
'''simple docstring''' import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL _lowercase = logging.get_logger(__name__) def A (__lowerCamelCase :np.ndarray , __lowerCamelCase :Union[int, Iterable[int]] , __lowerCamelCase :bool , __lowerCamelCase :int ): def constraint_to_multiple_of(__lowerCamelCase :List[str] , __lowerCamelCase :str , __lowerCamelCase :Optional[int]=0 , __lowerCamelCase :Any=None ): _lowerCAmelCase = round(val / multiple ) * multiple if max_val is not None and x > max_val: _lowerCAmelCase = math.floor(val / multiple ) * multiple if x < min_val: _lowerCAmelCase = math.ceil(val / multiple ) * multiple return x _lowerCAmelCase = (output_size, output_size) if isinstance(__lowerCamelCase , __lowerCamelCase ) else output_size _lowerCAmelCase , _lowerCAmelCase = get_image_size(__lowerCamelCase ) _lowerCAmelCase , _lowerCAmelCase = output_size # determine new height and width _lowerCAmelCase = output_height / input_height _lowerCAmelCase = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width ) < abs(1 - scale_height ): # fit width _lowerCAmelCase = scale_width else: # fit height _lowerCAmelCase = scale_height _lowerCAmelCase = constraint_to_multiple_of(scale_height * input_height , multiple=__lowerCamelCase ) _lowerCAmelCase = constraint_to_multiple_of(scale_width * input_width , multiple=__lowerCamelCase ) return (new_height, new_width) class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ): '''simple docstring''' _lowercase : List[str] = ['''pixel_values'''] def __init__( self , _lowercase = True , _lowercase = None , _lowercase = PILImageResampling.BILINEAR , _lowercase = False , _lowercase = 1 , _lowercase = True , _lowercase = 1 / 255 , _lowercase = True , _lowercase = None , _lowercase = None , **_lowercase , ): """simple docstring""" super().__init__(**_lowercase ) _lowerCAmelCase = size if size is not None else {"""height""": 384, """width""": 384} _lowerCAmelCase = get_size_dict(_lowercase ) _lowerCAmelCase = do_resize _lowerCAmelCase = size _lowerCAmelCase = keep_aspect_ratio _lowerCAmelCase = ensure_multiple_of _lowerCAmelCase = resample _lowerCAmelCase = do_rescale _lowerCAmelCase = rescale_factor _lowerCAmelCase = do_normalize _lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _lowerCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD def _lowercase ( self , _lowercase , _lowercase , _lowercase = False , _lowercase = 1 , _lowercase = PILImageResampling.BICUBIC , _lowercase = None , **_lowercase , ): """simple docstring""" _lowerCAmelCase = get_size_dict(_lowercase ) if "height" not in size or "width" not in size: raise ValueError(F'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' ) _lowerCAmelCase = get_resize_output_image_size( _lowercase , output_size=(size["""height"""], size["""width"""]) , keep_aspect_ratio=_lowercase , multiple=_lowercase , ) return resize(_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase ) def _lowercase ( self , _lowercase , _lowercase , _lowercase = None , **_lowercase , ): """simple docstring""" return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase ) def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase = None , **_lowercase , ): """simple docstring""" return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase ) def _lowercase ( self , _lowercase , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = ChannelDimension.FIRST , **_lowercase , ): """simple docstring""" _lowerCAmelCase = do_resize if do_resize is not None else self.do_resize _lowerCAmelCase = size if size is not None else self.size _lowerCAmelCase = get_size_dict(_lowercase ) _lowerCAmelCase = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio _lowerCAmelCase = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of _lowerCAmelCase = resample if resample is not None else self.resample _lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale _lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor _lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize _lowerCAmelCase = image_mean if image_mean is not None else self.image_mean _lowerCAmelCase = image_std if image_std is not None else self.image_std _lowerCAmelCase = make_list_of_images(_lowercase ) if not valid_images(_lowercase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. _lowerCAmelCase = [to_numpy_array(_lowercase ) for image in images] if do_resize: _lowerCAmelCase = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images] if do_rescale: _lowerCAmelCase = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images] if do_normalize: _lowerCAmelCase = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase ) for image in images] _lowerCAmelCase = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images] _lowerCAmelCase = {"""pixel_values""": images} return BatchFeature(data=_lowercase , tensor_type=_lowercase ) def _lowercase ( self , _lowercase , _lowercase = None ): """simple docstring""" _lowerCAmelCase = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(_lowercase ) != len(_lowercase ): raise ValueError( """Make sure that you pass in as many target sizes as the batch dimension of the logits""" ) if is_torch_tensor(_lowercase ): _lowerCAmelCase = target_sizes.numpy() _lowerCAmelCase = [] for idx in range(len(_lowercase ) ): _lowerCAmelCase = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=_lowercase ) _lowerCAmelCase = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(_lowercase ) else: _lowerCAmelCase = logits.argmax(dim=1 ) _lowerCAmelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
5
from ...configuration_utils import PretrainedConfig from ...utils import logging a =logging.get_logger(__name__) a ={ """caidas/swin2sr-classicalsr-x2-64""": ( """https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json""" ), } class A_ ( SCREAMING_SNAKE_CASE ): _UpperCAmelCase : Optional[int] = '''swin2sr''' _UpperCAmelCase : Any = { '''hidden_size''': '''embed_dim''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any]=6_4 ,SCREAMING_SNAKE_CASE__ : Optional[int]=1 ,SCREAMING_SNAKE_CASE__ : List[Any]=3 ,SCREAMING_SNAKE_CASE__ : Tuple=1_8_0 ,SCREAMING_SNAKE_CASE__ : Any=[6, 6, 6, 6, 6, 6] ,SCREAMING_SNAKE_CASE__ : int=[6, 6, 6, 6, 6, 6] ,SCREAMING_SNAKE_CASE__ : Optional[Any]=8 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=2.0 ,SCREAMING_SNAKE_CASE__ : Optional[int]=True ,SCREAMING_SNAKE_CASE__ : Any=0.0 ,SCREAMING_SNAKE_CASE__ : Any=0.0 ,SCREAMING_SNAKE_CASE__ : List[str]=0.1 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="gelu" ,SCREAMING_SNAKE_CASE__ : Any=False ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.02 ,SCREAMING_SNAKE_CASE__ : Dict=1E-5 ,SCREAMING_SNAKE_CASE__ : Dict=2 ,SCREAMING_SNAKE_CASE__ : Tuple=1.0 ,SCREAMING_SNAKE_CASE__ : int="1conv" ,SCREAMING_SNAKE_CASE__ : Optional[int]="pixelshuffle" ,**SCREAMING_SNAKE_CASE__ : Optional[int] ,): super().__init__(**SCREAMING_SNAKE_CASE__) __lowerCamelCase : Dict = image_size __lowerCamelCase : str = patch_size __lowerCamelCase : List[Any] = num_channels __lowerCamelCase : Dict = embed_dim __lowerCamelCase : Dict = depths __lowerCamelCase : Any = len(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[Any] = num_heads __lowerCamelCase : Tuple = window_size __lowerCamelCase : Dict = mlp_ratio __lowerCamelCase : str = qkv_bias __lowerCamelCase : Optional[int] = hidden_dropout_prob __lowerCamelCase : Optional[Any] = attention_probs_dropout_prob __lowerCamelCase : List[Any] = drop_path_rate __lowerCamelCase : Optional[int] = hidden_act __lowerCamelCase : Dict = use_absolute_embeddings __lowerCamelCase : Optional[Any] = layer_norm_eps __lowerCamelCase : str = initializer_range __lowerCamelCase : List[Any] = upscale __lowerCamelCase : List[Any] = img_range __lowerCamelCase : List[str] = resi_connection __lowerCamelCase : Union[str, Any] = upsampler
652
0
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_big_bird import BigBirdTokenizer else: _lowerCamelCase = None _lowerCamelCase = logging.get_logger(__name__) _lowerCamelCase = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} _lowerCamelCase = { 'vocab_file': { 'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model', 'google/bigbird-roberta-large': ( 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model' ), 'google/bigbird-base-trivia-itc': ( 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model' ), }, 'tokenizer_file': { 'google/bigbird-roberta-base': ( 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json' ), 'google/bigbird-roberta-large': ( 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json' ), 'google/bigbird-base-trivia-itc': ( 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json' ), }, } _lowerCamelCase = { 'google/bigbird-roberta-base': 4096, 'google/bigbird-roberta-large': 4096, 'google/bigbird-base-trivia-itc': 4096, } _lowerCamelCase = '▁' class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = VOCAB_FILES_NAMES lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase_ = BigBirdTokenizer lowerCamelCase_ = ["input_ids", "attention_mask"] lowerCamelCase_ = [] def __init__( self :str , __A :Tuple=None , __A :Optional[Any]=None , __A :Any="<unk>" , __A :Any="<s>" , __A :Union[str, Any]="</s>" , __A :List[str]="<pad>" , __A :Optional[Any]="[SEP]" , __A :str="[MASK]" , __A :Optional[Any]="[CLS]" , **__A :Union[str, Any] , ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else bos_token SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else eos_token SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else unk_token SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else pad_token SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else cls_token SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else sep_token # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token super().__init__( __A , tokenizer_file=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , **__A , ) SCREAMING_SNAKE_CASE__ = vocab_file SCREAMING_SNAKE_CASE__ = False if not self.vocab_file else True def _snake_case ( self :List[Any] , __A :List[int] , __A :Optional[List[int]] = None ) -> List[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [self.sep_token_id] SCREAMING_SNAKE_CASE__ = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _snake_case ( self :Optional[int] , __A :List[int] , __A :Optional[List[int]] = None , __A :bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( """You should not supply a second sequence if the provided sequence of """ """ids is already formatted with special tokens for the model.""" ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is None: return [1] + ([0] * len(__A )) + [1] return [1] + ([0] * len(__A )) + [1] + ([0] * len(__A )) + [1] def _snake_case ( self :Optional[Any] , __A :List[int] , __A :Optional[List[int]] = None ) -> List[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [self.sep_token_id] SCREAMING_SNAKE_CASE__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _snake_case ( self :int , __A :str , __A :Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(__A ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return SCREAMING_SNAKE_CASE__ = os.path.join( __A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ): copyfile(self.vocab_file , __A ) return (out_vocab_file,)
6
import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=True , lowerCamelCase__="pt" ) -> Dict: __lowerCamelCase : Any = {'add_prefix_space': True} if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and not line.startswith(' ' ) else {} __lowerCamelCase : int = padding_side return tokenizer( [line] , max_length=lowerCamelCase__ , padding='max_length' if pad_to_max_length else None , truncation=lowerCamelCase__ , return_tensors=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , **lowerCamelCase__ , ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , ) -> List[str]: __lowerCamelCase : List[str] = input_ids.ne(lowerCamelCase__ ).any(dim=0 ) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class A_ ( SCREAMING_SNAKE_CASE ): def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int]="train" ,SCREAMING_SNAKE_CASE__ : Tuple=None ,SCREAMING_SNAKE_CASE__ : Dict=None ,SCREAMING_SNAKE_CASE__ : int=None ,SCREAMING_SNAKE_CASE__ : List[Any]="" ,): super().__init__() __lowerCamelCase : Optional[Any] = Path(SCREAMING_SNAKE_CASE__).joinpath(type_path + '.source') __lowerCamelCase : Any = Path(SCREAMING_SNAKE_CASE__).joinpath(type_path + '.target') __lowerCamelCase : List[Any] = self.get_char_lens(self.src_file) __lowerCamelCase : List[Any] = max_source_length __lowerCamelCase : List[str] = max_target_length assert min(self.src_lens) > 0, F"found empty line in {self.src_file}" __lowerCamelCase : Any = tokenizer __lowerCamelCase : Optional[int] = prefix if n_obs is not None: __lowerCamelCase : Dict = self.src_lens[:n_obs] __lowerCamelCase : str = src_lang __lowerCamelCase : Any = tgt_lang def __len__( self : Tuple): return len(self.src_lens) def __getitem__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : str): __lowerCamelCase : Dict = index + 1 # linecache starts at 1 __lowerCamelCase : Any = self.prefix + linecache.getline(str(self.src_file) ,SCREAMING_SNAKE_CASE__).rstrip('\n') __lowerCamelCase : int = linecache.getline(str(self.tgt_file) ,SCREAMING_SNAKE_CASE__).rstrip('\n') assert source_line, F"empty source line for index {index}" assert tgt_line, F"empty tgt line for index {index}" # Need to add eos token manually for T5 if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right __lowerCamelCase : Dict = ( self.tokenizer.question_encoder if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__) else self.tokenizer ) __lowerCamelCase : Optional[int] = self.tokenizer.generator if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__) else self.tokenizer __lowerCamelCase : List[str] = encode_line(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,self.max_source_length ,'right') __lowerCamelCase : Any = encode_line(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,self.max_target_length ,'right') __lowerCamelCase : List[Any] = source_inputs['input_ids'].squeeze() __lowerCamelCase : Tuple = target_inputs['input_ids'].squeeze() __lowerCamelCase : Tuple = source_inputs['attention_mask'].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def lowerCAmelCase ( SCREAMING_SNAKE_CASE__ : int): return [len(SCREAMING_SNAKE_CASE__) for x in Path(SCREAMING_SNAKE_CASE__).open().readlines()] def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : List[Any]): __lowerCamelCase : Optional[Any] = torch.stack([x['input_ids'] for x in batch]) __lowerCamelCase : Any = torch.stack([x['attention_mask'] for x in batch]) __lowerCamelCase : Union[str, Any] = torch.stack([x['decoder_input_ids'] for x in batch]) __lowerCamelCase : Optional[int] = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__) else self.tokenizer.pad_token_id ) __lowerCamelCase : int = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__) else self.tokenizer.pad_token_id ) __lowerCamelCase : int = trim_batch(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) __lowerCamelCase , __lowerCamelCase : int = trim_batch(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__) __lowerCamelCase : Union[str, Any] = { 'input_ids': source_ids, 'attention_mask': source_mask, 'decoder_input_ids': y, } return batch a =getLogger(__name__) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Any: return list(itertools.chain.from_iterable(lowerCamelCase__ ) ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> None: __lowerCamelCase : str = get_git_info() save_json(lowerCamelCase__ , os.path.join(lowerCamelCase__ , 'git_log.json' ) ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=4 , **lowerCamelCase__ ) -> List[str]: with open(lowerCamelCase__ , 'w' ) as f: json.dump(lowerCamelCase__ , lowerCamelCase__ , indent=lowerCamelCase__ , **lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple: with open(lowerCamelCase__ ) as f: return json.load(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( ) -> List[str]: __lowerCamelCase : str = git.Repo(search_parent_directories=lowerCamelCase__ ) __lowerCamelCase : Any = { 'repo_id': str(lowerCamelCase__ ), 'repo_sha': str(repo.head.object.hexsha ), 'repo_branch': str(repo.active_branch ), 'hostname': str(socket.gethostname() ), } return repo_infos def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List: return list(map(lowerCamelCase__ , lowerCamelCase__ ) ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]: with open(lowerCamelCase__ , 'wb' ) as f: return pickle.dump(lowerCamelCase__ , lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str: def remove_articles(lowerCamelCase__ ): return re.sub(R'\b(a|an|the)\b' , ' ' , lowerCamelCase__ ) def white_space_fix(lowerCamelCase__ ): return " ".join(text.split() ) def remove_punc(lowerCamelCase__ ): __lowerCamelCase : Dict = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(lowerCamelCase__ ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase__ ) ) ) ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> int: __lowerCamelCase : str = normalize_answer(lowerCamelCase__ ).split() __lowerCamelCase : Optional[int] = normalize_answer(lowerCamelCase__ ).split() __lowerCamelCase : Union[str, Any] = Counter(lowerCamelCase__ ) & Counter(lowerCamelCase__ ) __lowerCamelCase : Any = sum(common.values() ) if num_same == 0: return 0 __lowerCamelCase : List[Any] = 1.0 * num_same / len(lowerCamelCase__ ) __lowerCamelCase : int = 1.0 * num_same / len(lowerCamelCase__ ) __lowerCamelCase : Optional[Any] = (2 * precision * recall) / (precision + recall) return fa def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Dict: return normalize_answer(lowerCamelCase__ ) == normalize_answer(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Dict: assert len(lowerCamelCase__ ) == len(lowerCamelCase__ ) __lowerCamelCase : Dict = 0 for hypo, pred in zip(lowerCamelCase__ , lowerCamelCase__ ): em += exact_match_score(lowerCamelCase__ , lowerCamelCase__ ) if len(lowerCamelCase__ ) > 0: em /= len(lowerCamelCase__ ) return {"em": em} def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple: return model_prefix.startswith('rag' ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]: __lowerCamelCase : Optional[int] = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead __lowerCamelCase : List[str] = 'dropout_rate' for p in extra_params: if getattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): if not hasattr(lowerCamelCase__ , lowerCamelCase__ ) and not hasattr(lowerCamelCase__ , equivalent_param[p] ): logger.info('config doesn\'t have a `{}` attribute'.format(lowerCamelCase__ ) ) delattr(lowerCamelCase__ , lowerCamelCase__ ) continue __lowerCamelCase : List[Any] = p if hasattr(lowerCamelCase__ , lowerCamelCase__ ) else equivalent_param[p] setattr(lowerCamelCase__ , lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) ) delattr(lowerCamelCase__ , lowerCamelCase__ ) return hparams, config
652
0
"""simple docstring""" import math import os import sys def _snake_case ( _snake_case : str ) -> str: '''simple docstring''' _A = '' try: with open(_snake_case , 'rb' ) as binary_file: _A = binary_file.read() for dat in data: _A = F'''{dat:08b}''' result += curr_byte return result except OSError: print('File not accessible' ) sys.exit() def _snake_case ( _snake_case : dict[str, str] , _snake_case : str , _snake_case : int , _snake_case : str ) -> None: '''simple docstring''' lexicon.pop(_snake_case ) _A = last_match_id if math.loga(_snake_case ).is_integer(): for curr_key in lexicon: _A = '0' + lexicon[curr_key] _A = bin(_snake_case )[2:] def _snake_case ( _snake_case : str ) -> str: '''simple docstring''' _A = {'0': '0', '1': '1'} _A , _A = '', '' _A = len(_snake_case ) for i in range(len(_snake_case ) ): curr_string += data_bits[i] if curr_string not in lexicon: continue _A = lexicon[curr_string] result += last_match_id add_key_to_lexicon(_snake_case , _snake_case , _snake_case , _snake_case ) index += 1 _A = '' while curr_string != "" and curr_string not in lexicon: curr_string += "0" if curr_string != "": _A = lexicon[curr_string] result += last_match_id return result def _snake_case ( _snake_case : str , _snake_case : str ) -> str: '''simple docstring''' _A = os.path.getsize(_snake_case ) _A = bin(_snake_case )[2:] _A = len(_snake_case ) return "0" * (length_length - 1) + file_length_binary + compressed def _snake_case ( _snake_case : str , _snake_case : str ) -> None: '''simple docstring''' _A = 8 try: with open(_snake_case , 'wb' ) as opened_file: _A = [ to_write[i : i + byte_length] for i in range(0 , len(_snake_case ) , _snake_case ) ] if len(result_byte_array[-1] ) % byte_length == 0: result_byte_array.append('10000000' ) else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1] ) - 1 ) for elem in result_byte_array: opened_file.write(int(_snake_case , 2 ).to_bytes(1 , byteorder='big' ) ) except OSError: print('File not accessible' ) sys.exit() def _snake_case ( _snake_case : str , _snake_case : str ) -> None: '''simple docstring''' _A = read_file_binary(_snake_case ) _A = compress_data(_snake_case ) _A = add_file_length(_snake_case , _snake_case ) write_file_binary(_snake_case , _snake_case ) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
7
from typing import Optional, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_mobilenet_va import MobileNetVaConfig a =logging.get_logger(__name__) # General docstring a ="""MobileNetV1Config""" # Base docstring a ="""google/mobilenet_v1_1.0_224""" a =[1, 1024, 7, 7] # Image classification docstring a ="""google/mobilenet_v1_1.0_224""" a ="""tabby, tabby cat""" a =[ """google/mobilenet_v1_1.0_224""", """google/mobilenet_v1_0.75_192""", # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 ] def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ) -> str: __lowerCamelCase : str = {} if isinstance(lowerCamelCase__ , lowerCamelCase__ ): __lowerCamelCase : int = model.mobilenet_va else: __lowerCamelCase : List[str] = model __lowerCamelCase : List[Any] = 'MobilenetV1/Conv2d_0/' __lowerCamelCase : List[Any] = backbone.conv_stem.convolution.weight __lowerCamelCase : List[str] = backbone.conv_stem.normalization.bias __lowerCamelCase : Tuple = backbone.conv_stem.normalization.weight __lowerCamelCase : Union[str, Any] = backbone.conv_stem.normalization.running_mean __lowerCamelCase : Optional[int] = backbone.conv_stem.normalization.running_var for i in range(1_3 ): __lowerCamelCase : Any = i + 1 __lowerCamelCase : Union[str, Any] = i * 2 __lowerCamelCase : Optional[Any] = backbone.layer[pt_index] __lowerCamelCase : Optional[int] = F"MobilenetV1/Conv2d_{tf_index}_depthwise/" __lowerCamelCase : Tuple = pointer.convolution.weight __lowerCamelCase : Optional[Any] = pointer.normalization.bias __lowerCamelCase : Union[str, Any] = pointer.normalization.weight __lowerCamelCase : List[str] = pointer.normalization.running_mean __lowerCamelCase : Union[str, Any] = pointer.normalization.running_var __lowerCamelCase : int = backbone.layer[pt_index + 1] __lowerCamelCase : Union[str, Any] = F"MobilenetV1/Conv2d_{tf_index}_pointwise/" __lowerCamelCase : Optional[Any] = pointer.convolution.weight __lowerCamelCase : Any = pointer.normalization.bias __lowerCamelCase : str = pointer.normalization.weight __lowerCamelCase : Dict = pointer.normalization.running_mean __lowerCamelCase : List[str] = pointer.normalization.running_var if isinstance(lowerCamelCase__ , lowerCamelCase__ ): __lowerCamelCase : Union[str, Any] = 'MobilenetV1/Logits/Conv2d_1c_1x1/' __lowerCamelCase : Any = model.classifier.weight __lowerCamelCase : int = model.classifier.bias return tf_to_pt_map def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]: try: import numpy as np import tensorflow as tf except ImportError: logger.error( 'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see ' 'https://www.tensorflow.org/install/ for installation instructions.' ) raise # Load weights from TF model __lowerCamelCase : List[str] = tf.train.list_variables(lowerCamelCase__ ) __lowerCamelCase : List[str] = {} for name, shape in init_vars: logger.info(F"Loading TF weight {name} with shape {shape}" ) __lowerCamelCase : Any = tf.train.load_variable(lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase : List[Any] = array # Build TF to PyTorch weights loading map __lowerCamelCase : Tuple = _build_tf_to_pytorch_map(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) for name, pointer in tf_to_pt_map.items(): logger.info(F"Importing {name}" ) if name not in tf_weights: logger.info(F"{name} not in tf pre-trained weights, skipping" ) continue __lowerCamelCase : Optional[int] = tf_weights[name] if "depthwise_weights" in name: logger.info('Transposing depthwise' ) __lowerCamelCase : List[str] = np.transpose(lowerCamelCase__ , (2, 3, 0, 1) ) elif "weights" in name: logger.info('Transposing' ) if len(pointer.shape ) == 2: # copying into linear layer __lowerCamelCase : Any = array.squeeze().transpose() else: __lowerCamelCase : Tuple = np.transpose(lowerCamelCase__ , (3, 2, 0, 1) ) if pointer.shape != array.shape: raise ValueError(F"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" ) logger.info(F"Initialize PyTorch weight {name} {array.shape}" ) __lowerCamelCase : Optional[Any] = torch.from_numpy(lowerCamelCase__ ) tf_weights.pop(lowerCamelCase__ , lowerCamelCase__ ) tf_weights.pop(name + '/RMSProp' , lowerCamelCase__ ) tf_weights.pop(name + '/RMSProp_1' , lowerCamelCase__ ) tf_weights.pop(name + '/ExponentialMovingAverage' , lowerCamelCase__ ) logger.info(F"Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}" ) return model def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> torch.Tensor: __lowerCamelCase , __lowerCamelCase : int = features.shape[-2:] __lowerCamelCase , __lowerCamelCase : List[str] = conv_layer.stride __lowerCamelCase , __lowerCamelCase : str = conv_layer.kernel_size if in_height % stride_height == 0: __lowerCamelCase : Optional[int] = max(kernel_height - stride_height , 0 ) else: __lowerCamelCase : Union[str, Any] = max(kernel_height - (in_height % stride_height) , 0 ) if in_width % stride_width == 0: __lowerCamelCase : List[str] = max(kernel_width - stride_width , 0 ) else: __lowerCamelCase : List[str] = max(kernel_width - (in_width % stride_width) , 0 ) __lowerCamelCase : List[str] = pad_along_width // 2 __lowerCamelCase : Optional[int] = pad_along_width - pad_left __lowerCamelCase : Any = pad_along_height // 2 __lowerCamelCase : List[Any] = pad_along_height - pad_top __lowerCamelCase : Union[str, Any] = (pad_left, pad_right, pad_top, pad_bottom) return nn.functional.pad(lowerCamelCase__ , lowerCamelCase__ , 'constant' , 0.0 ) class A_ ( nn.Module ): def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : MobileNetVaConfig ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Optional[int] = 1 ,SCREAMING_SNAKE_CASE__ : Optional[int] = 1 ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : Optional[bool] = True ,SCREAMING_SNAKE_CASE__ : Optional[bool or str] = True ,): super().__init__() __lowerCamelCase : Dict = config if in_channels % groups != 0: raise ValueError(F"Input channels ({in_channels}) are not divisible by {groups} groups.") if out_channels % groups != 0: raise ValueError(F"Output channels ({out_channels}) are not divisible by {groups} groups.") __lowerCamelCase : Optional[Any] = 0 if config.tf_padding else int((kernel_size - 1) / 2) __lowerCamelCase : Optional[int] = nn.Convad( in_channels=SCREAMING_SNAKE_CASE__ ,out_channels=SCREAMING_SNAKE_CASE__ ,kernel_size=SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,padding=SCREAMING_SNAKE_CASE__ ,groups=SCREAMING_SNAKE_CASE__ ,bias=SCREAMING_SNAKE_CASE__ ,padding_mode='zeros' ,) if use_normalization: __lowerCamelCase : Optional[int] = nn.BatchNormad( num_features=SCREAMING_SNAKE_CASE__ ,eps=config.layer_norm_eps ,momentum=0.9997 ,affine=SCREAMING_SNAKE_CASE__ ,track_running_stats=SCREAMING_SNAKE_CASE__ ,) else: __lowerCamelCase : Dict = None if use_activation: if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__): __lowerCamelCase : Dict = ACTaFN[use_activation] elif isinstance(config.hidden_act ,SCREAMING_SNAKE_CASE__): __lowerCamelCase : str = ACTaFN[config.hidden_act] else: __lowerCamelCase : List[str] = config.hidden_act else: __lowerCamelCase : List[str] = None def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : torch.Tensor): if self.config.tf_padding: __lowerCamelCase : Any = apply_tf_padding(SCREAMING_SNAKE_CASE__ ,self.convolution) __lowerCamelCase : Optional[int] = self.convolution(SCREAMING_SNAKE_CASE__) if self.normalization is not None: __lowerCamelCase : Dict = self.normalization(SCREAMING_SNAKE_CASE__) if self.activation is not None: __lowerCamelCase : List[str] = self.activation(SCREAMING_SNAKE_CASE__) return features class A_ ( SCREAMING_SNAKE_CASE ): _UpperCAmelCase : Union[str, Any] = MobileNetVaConfig _UpperCAmelCase : List[str] = load_tf_weights_in_mobilenet_va _UpperCAmelCase : List[str] = '''mobilenet_v1''' _UpperCAmelCase : Any = '''pixel_values''' _UpperCAmelCase : int = False def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Union[nn.Linear, nn.Convad]): if isinstance(SCREAMING_SNAKE_CASE__ ,(nn.Linear, nn.Convad)): module.weight.data.normal_(mean=0.0 ,std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(SCREAMING_SNAKE_CASE__ ,nn.BatchNormad): module.bias.data.zero_() module.weight.data.fill_(1.0) a =r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ a =r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`MobileNetV1ImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( '''The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.''' , SCREAMING_SNAKE_CASE , ) class A_ ( SCREAMING_SNAKE_CASE ): def __init__( self : int ,SCREAMING_SNAKE_CASE__ : MobileNetVaConfig ,SCREAMING_SNAKE_CASE__ : bool = True): super().__init__(SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[str] = config __lowerCamelCase : Optional[int] = 3_2 __lowerCamelCase : List[str] = max(int(depth * config.depth_multiplier) ,config.min_depth) __lowerCamelCase : Optional[Any] = MobileNetVaConvLayer( SCREAMING_SNAKE_CASE__ ,in_channels=config.num_channels ,out_channels=SCREAMING_SNAKE_CASE__ ,kernel_size=3 ,stride=2 ,) __lowerCamelCase : Any = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1] __lowerCamelCase : str = nn.ModuleList() for i in range(1_3): __lowerCamelCase : str = out_channels if strides[i] == 2 or i == 0: depth *= 2 __lowerCamelCase : str = max(int(depth * config.depth_multiplier) ,config.min_depth) self.layer.append( MobileNetVaConvLayer( SCREAMING_SNAKE_CASE__ ,in_channels=SCREAMING_SNAKE_CASE__ ,out_channels=SCREAMING_SNAKE_CASE__ ,kernel_size=3 ,stride=strides[i] ,groups=SCREAMING_SNAKE_CASE__ ,)) self.layer.append( MobileNetVaConvLayer( SCREAMING_SNAKE_CASE__ ,in_channels=SCREAMING_SNAKE_CASE__ ,out_channels=SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,)) __lowerCamelCase : Optional[int] = nn.AdaptiveAvgPoolad((1, 1)) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Dict): raise NotImplementedError @add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC ,output_type=SCREAMING_SNAKE_CASE__ ,config_class=_CONFIG_FOR_DOC ,modality='vision' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,) def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,): __lowerCamelCase : int = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __lowerCamelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('You have to specify pixel_values') __lowerCamelCase : Optional[Any] = self.conv_stem(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[Any] = () if output_hidden_states else None for i, layer_module in enumerate(self.layer): __lowerCamelCase : Dict = layer_module(SCREAMING_SNAKE_CASE__) if output_hidden_states: __lowerCamelCase : Any = all_hidden_states + (hidden_states,) __lowerCamelCase : Optional[Any] = hidden_states if self.pooler is not None: __lowerCamelCase : Tuple = torch.flatten(self.pooler(SCREAMING_SNAKE_CASE__) ,start_dim=1) else: __lowerCamelCase : List[str] = None if not return_dict: return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None) return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=SCREAMING_SNAKE_CASE__ ,pooler_output=SCREAMING_SNAKE_CASE__ ,hidden_states=SCREAMING_SNAKE_CASE__ ,) @add_start_docstrings( ''' MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. ''' , SCREAMING_SNAKE_CASE , ) class A_ ( SCREAMING_SNAKE_CASE ): def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : MobileNetVaConfig): super().__init__(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[Any] = config.num_labels __lowerCamelCase : Optional[Any] = MobileNetVaModel(SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[str] = self.mobilenet_va.layer[-1].convolution.out_channels # Classifier head __lowerCamelCase : Any = nn.Dropout(config.classifier_dropout_prob ,inplace=SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[Any] = nn.Linear(SCREAMING_SNAKE_CASE__ ,config.num_labels) if config.num_labels > 0 else nn.Identity() # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=SCREAMING_SNAKE_CASE__ ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,) def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,): __lowerCamelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict __lowerCamelCase : Optional[int] = self.mobilenet_va(SCREAMING_SNAKE_CASE__ ,output_hidden_states=SCREAMING_SNAKE_CASE__ ,return_dict=SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[Any] = outputs.pooler_output if return_dict else outputs[1] __lowerCamelCase : List[str] = self.classifier(self.dropout(SCREAMING_SNAKE_CASE__)) __lowerCamelCase : List[str] = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: __lowerCamelCase : Dict = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): __lowerCamelCase : int = 'single_label_classification' else: __lowerCamelCase : Tuple = 'multi_label_classification' if self.config.problem_type == "regression": __lowerCamelCase : Tuple = MSELoss() if self.num_labels == 1: __lowerCamelCase : int = loss_fct(logits.squeeze() ,labels.squeeze()) else: __lowerCamelCase : Union[str, Any] = loss_fct(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) elif self.config.problem_type == "single_label_classification": __lowerCamelCase : List[str] = CrossEntropyLoss() __lowerCamelCase : List[str] = loss_fct(logits.view(-1 ,self.num_labels) ,labels.view(-1)) elif self.config.problem_type == "multi_label_classification": __lowerCamelCase : int = BCEWithLogitsLoss() __lowerCamelCase : int = loss_fct(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) if not return_dict: __lowerCamelCase : List[str] = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention( loss=SCREAMING_SNAKE_CASE__ ,logits=SCREAMING_SNAKE_CASE__ ,hidden_states=outputs.hidden_states ,)
652
0
'''simple docstring''' import re def _lowerCAmelCase ( __snake_case : str ) -> bool: __A : Dict = re.compile(r'^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$' ) if match := re.search(__snake_case , __snake_case ): return match.string == phone return False if __name__ == "__main__": print(indian_phone_validator('''+918827897895'''))
8
from pathlib import Path import cva import numpy as np from matplotlib import pyplot as plt def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray: __lowerCamelCase : Optional[Any] = cva.getAffineTransform(lowerCamelCase__ , lowerCamelCase__ ) return cva.warpAffine(lowerCamelCase__ , lowerCamelCase__ , (rows, cols) ) if __name__ == "__main__": # read original image a =cva.imread( str(Path(__file__).resolve().parent.parent / """image_data""" / """lena.jpg""") ) # turn image in gray scale value a =cva.cvtColor(image, cva.COLOR_BGR2GRAY) # get image shape a , a =gray_img.shape # set different points to rotate image a =np.array([[50, 50], [200, 50], [50, 200]], np.floataa) a =np.array([[10, 100], [200, 50], [100, 250]], np.floataa) a =np.array([[50, 50], [150, 50], [120, 200]], np.floataa) a =np.array([[10, 100], [80, 50], [180, 250]], np.floataa) # add all rotated images in a list a =[ gray_img, get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), ] # plot different image rotations a =plt.figure(1) a =["""Original""", """Rotation 1""", """Rotation 2""", """Rotation 3"""] for i, image in enumerate(images): plt.subplot(2, 2, i + 1), plt.imshow(image, """gray""") plt.title(titles[i]) plt.axis("""off""") plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95) plt.show()
652
0
import cva import numpy as np class __lowerCAmelCase : """simple docstring""" def __init__( self : Union[str, Any] , _snake_case : float , _snake_case : int ): """simple docstring""" if k in (0.04, 0.06): A__ = k A__ = window_size else: raise ValueError('invalid k value' ) def __str__( self : Any ): """simple docstring""" return str(self.k ) def _a ( self : Union[str, Any] , _snake_case : str ): """simple docstring""" A__ = cva.imread(_snake_case , 0 ) A__ , A__ = img.shape A__ = [] A__ = img.copy() A__ = cva.cvtColor(_snake_case , cva.COLOR_GRAY2RGB ) A__ , A__ = np.gradient(_snake_case ) A__ = dx**2 A__ = dy**2 A__ = dx * dy A__ = 0.04 A__ = self.window_size // 2 for y in range(_snake_case , h - offset ): for x in range(_snake_case , w - offset ): A__ = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() A__ = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() A__ = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() A__ = (wxx * wyy) - (wxy**2) A__ = wxx + wyy A__ = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) , 0 ) color_img.itemset((y, x, 1) , 0 ) color_img.itemset((y, x, 2) , 2_55 ) return color_img, corner_list if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = HarrisCorner(0.04, 3) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = edge_detect.detect('''path_to_image''') cva.imwrite('''detect.png''', color_img)
9
import math def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> int: if not isinstance(lowerCamelCase__ , lowerCamelCase__ ): __lowerCamelCase : List[str] = F"Input value of [number={number}] must be an integer" raise TypeError(lowerCamelCase__ ) if number < 1: __lowerCamelCase : int = F"Input value of [number={number}] must be > 0" raise ValueError(lowerCamelCase__ ) elif number == 1: return 3 elif number == 2: return 5 else: __lowerCamelCase : Any = int(math.log(number // 3 , 2 ) ) + 2 __lowerCamelCase : List[Any] = [3, 5] __lowerCamelCase : Union[str, Any] = 2 __lowerCamelCase : List[str] = 3 for block in range(1 , lowerCamelCase__ ): for _ in range(lowerCamelCase__ ): proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] ) proth_index += 1 increment *= 2 return proth_list[number - 1] if __name__ == "__main__": import doctest doctest.testmod() for number in range(11): a =0 try: a =proth(number) except ValueError: print(F"""ValueError: there is no {number}th Proth number""") continue print(F"""The {number}th Proth number: {value}""")
652
0
import argparse import json from collections import OrderedDict from functools import partial from pathlib import Path import timm import torch from huggingface_hub import hf_hub_download from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor from transformers.utils import logging logging.set_verbosity_info() _lowerCAmelCase = logging.get_logger() def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case = True ): print(f"""Converting {name}...""" ) with torch.no_grad(): if hidden_sizes == 128: if name[-1] == "S": _UpperCamelCase = timm.create_model('''levit_128s''' , pretrained=__snake_case ) else: _UpperCamelCase = timm.create_model('''levit_128''' , pretrained=__snake_case ) if hidden_sizes == 192: _UpperCamelCase = timm.create_model('''levit_192''' , pretrained=__snake_case ) if hidden_sizes == 256: _UpperCamelCase = timm.create_model('''levit_256''' , pretrained=__snake_case ) if hidden_sizes == 384: _UpperCamelCase = timm.create_model('''levit_384''' , pretrained=__snake_case ) from_model.eval() _UpperCamelCase = LevitForImageClassificationWithTeacher(__snake_case ).eval() _UpperCamelCase = OrderedDict() _UpperCamelCase = from_model.state_dict() _UpperCamelCase = list(from_model.state_dict().keys() ) _UpperCamelCase = list(our_model.state_dict().keys() ) print(len(__snake_case ) , len(__snake_case ) ) for i in range(len(__snake_case ) ): _UpperCamelCase = weights[og_keys[i]] our_model.load_state_dict(__snake_case ) _UpperCamelCase = torch.randn((2, 3, 224, 224) ) _UpperCamelCase = from_model(__snake_case ) _UpperCamelCase = our_model(__snake_case ).logits assert torch.allclose(__snake_case , __snake_case ), "The model logits don't match the original one." _UpperCamelCase = name print(__snake_case ) if push_to_hub: our_model.save_pretrained(save_directory / checkpoint_name ) _UpperCamelCase = LevitImageProcessor() image_processor.save_pretrained(save_directory / checkpoint_name ) print(f"""Pushed {checkpoint_name}""" ) def _snake_case ( __snake_case , __snake_case = None , __snake_case = True ): _UpperCamelCase = '''imagenet-1k-id2label.json''' _UpperCamelCase = 1000 _UpperCamelCase = (1, num_labels) _UpperCamelCase = '''huggingface/label-files''' _UpperCamelCase = num_labels _UpperCamelCase = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type='''dataset''' ) , '''r''' ) ) _UpperCamelCase = {int(__snake_case ): v for k, v in idalabel.items()} _UpperCamelCase = idalabel _UpperCamelCase = {v: k for k, v in idalabel.items()} _UpperCamelCase = partial(__snake_case , num_labels=__snake_case , idalabel=__snake_case , labelaid=__snake_case ) _UpperCamelCase = { '''levit-128S''': 128, '''levit-128''': 128, '''levit-192''': 192, '''levit-256''': 256, '''levit-384''': 384, } _UpperCamelCase = { '''levit-128S''': ImageNetPreTrainedConfig( hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), '''levit-128''': ImageNetPreTrainedConfig( hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), '''levit-192''': ImageNetPreTrainedConfig( hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), '''levit-256''': ImageNetPreTrainedConfig( hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), '''levit-384''': ImageNetPreTrainedConfig( hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ), } if model_name: convert_weight_and_push( names_to_hidden_sizes[model_name] , __snake_case , names_to_config[model_name] , __snake_case , __snake_case ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(names_to_hidden_sizes[model_name] , __snake_case , __snake_case , __snake_case , __snake_case ) return config, expected_shape if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default=None, type=str, help="The name of the model you wish to convert, it must be one of the supported Levit* architecture,", ) parser.add_argument( "--pytorch_dump_folder_path", default="levit-dump-folder/", type=Path, required=False, help="Path to the output PyTorch model directory.", ) parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub") parser.add_argument( "--no-push_to_hub", dest="push_to_hub", action="store_false", help="Do not push model and image processor to the hub", ) _lowerCAmelCase = parser.parse_args() _lowerCAmelCase = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
10
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetaImageProcessor class A_ ( unittest.TestCase ): def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : str=7 ,SCREAMING_SNAKE_CASE__ : Any=3 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=3_0 ,SCREAMING_SNAKE_CASE__ : int=4_0_0 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=True ,SCREAMING_SNAKE_CASE__ : Optional[int]=None ,SCREAMING_SNAKE_CASE__ : Any=True ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=[0.5, 0.5, 0.5] ,SCREAMING_SNAKE_CASE__ : Dict=[0.5, 0.5, 0.5] ,SCREAMING_SNAKE_CASE__ : List[str]=True ,SCREAMING_SNAKE_CASE__ : List[str]=1 / 2_5_5 ,SCREAMING_SNAKE_CASE__ : Tuple=True ,): # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p __lowerCamelCase : List[Any] = size if size is not None else {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3} __lowerCamelCase : str = parent __lowerCamelCase : Union[str, Any] = batch_size __lowerCamelCase : int = num_channels __lowerCamelCase : Dict = min_resolution __lowerCamelCase : Tuple = max_resolution __lowerCamelCase : Dict = do_resize __lowerCamelCase : List[Any] = size __lowerCamelCase : Tuple = do_normalize __lowerCamelCase : Any = image_mean __lowerCamelCase : List[str] = image_std __lowerCamelCase : List[Any] = do_rescale __lowerCamelCase : str = rescale_factor __lowerCamelCase : Tuple = do_pad def lowerCAmelCase ( self : Dict): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : List[str]=False): if not batched: __lowerCamelCase : Optional[Any] = image_inputs[0] if isinstance(SCREAMING_SNAKE_CASE__ ,Image.Image): __lowerCamelCase , __lowerCamelCase : Any = image.size else: __lowerCamelCase , __lowerCamelCase : Any = image.shape[1], image.shape[2] if w < h: __lowerCamelCase : Optional[int] = int(self.size['shortest_edge'] * h / w) __lowerCamelCase : Tuple = self.size['shortest_edge'] elif w > h: __lowerCamelCase : Union[str, Any] = self.size['shortest_edge'] __lowerCamelCase : Union[str, Any] = int(self.size['shortest_edge'] * w / h) else: __lowerCamelCase : List[Any] = self.size['shortest_edge'] __lowerCamelCase : Optional[int] = self.size['shortest_edge'] else: __lowerCamelCase : List[str] = [] for image in image_inputs: __lowerCamelCase , __lowerCamelCase : List[Any] = self.get_expected_values([image]) expected_values.append((expected_height, expected_width)) __lowerCamelCase : Tuple = max(SCREAMING_SNAKE_CASE__ ,key=lambda SCREAMING_SNAKE_CASE__: item[0])[0] __lowerCamelCase : Dict = max(SCREAMING_SNAKE_CASE__ ,key=lambda SCREAMING_SNAKE_CASE__: item[1])[1] return expected_height, expected_width @require_torch @require_vision class A_ ( SCREAMING_SNAKE_CASE , unittest.TestCase ): _UpperCAmelCase : Optional[int] = DetaImageProcessor if is_vision_available() else None def lowerCAmelCase ( self : Optional[Any]): __lowerCamelCase : List[str] = DetaImageProcessingTester(self) @property def lowerCAmelCase ( self : Any): return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase ( self : Dict): __lowerCamelCase : Any = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'image_mean')) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'image_std')) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'do_normalize')) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'do_resize')) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'do_rescale')) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'do_pad')) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'size')) def lowerCAmelCase ( self : str): __lowerCamelCase : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size ,{'shortest_edge': 1_8, 'longest_edge': 1_3_3_3}) self.assertEqual(image_processor.do_pad ,SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Any): pass def lowerCAmelCase ( self : List[str]): # Initialize image_processing __lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict) # create random PIL images __lowerCamelCase : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE__) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,Image.Image) # Test not batched input __lowerCamelCase : List[Any] = image_processing(image_inputs[0] ,return_tensors='pt').pixel_values __lowerCamelCase , __lowerCamelCase : Tuple = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__) self.assertEqual( encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,) # Test batched __lowerCamelCase , __lowerCamelCase : Optional[Any] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ ,batched=SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[int] = image_processing(SCREAMING_SNAKE_CASE__ ,return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) ,) def lowerCAmelCase ( self : str): # Initialize image_processing __lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors __lowerCamelCase : str = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE__ ,numpify=SCREAMING_SNAKE_CASE__) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,np.ndarray) # Test not batched input __lowerCamelCase : Tuple = image_processing(image_inputs[0] ,return_tensors='pt').pixel_values __lowerCamelCase , __lowerCamelCase : int = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__) self.assertEqual( encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,) # Test batched __lowerCamelCase : str = image_processing(SCREAMING_SNAKE_CASE__ ,return_tensors='pt').pixel_values __lowerCamelCase , __lowerCamelCase : Optional[int] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ ,batched=SCREAMING_SNAKE_CASE__) self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) ,) def lowerCAmelCase ( self : int): # Initialize image_processing __lowerCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors __lowerCamelCase : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE__ ,torchify=SCREAMING_SNAKE_CASE__) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,torch.Tensor) # Test not batched input __lowerCamelCase : Union[str, Any] = image_processing(image_inputs[0] ,return_tensors='pt').pixel_values __lowerCamelCase , __lowerCamelCase : int = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__) self.assertEqual( encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,) # Test batched __lowerCamelCase : List[Any] = image_processing(SCREAMING_SNAKE_CASE__ ,return_tensors='pt').pixel_values __lowerCamelCase , __lowerCamelCase : Optional[int] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ ,batched=SCREAMING_SNAKE_CASE__) self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) ,) @slow def lowerCAmelCase ( self : Optional[Any]): # prepare image and target __lowerCamelCase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png') with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' ,'r') as f: __lowerCamelCase : List[str] = json.loads(f.read()) __lowerCamelCase : Union[str, Any] = {'image_id': 3_9_7_6_9, 'annotations': target} # encode them __lowerCamelCase : Optional[int] = DetaImageProcessor() __lowerCamelCase : int = image_processing(images=SCREAMING_SNAKE_CASE__ ,annotations=SCREAMING_SNAKE_CASE__ ,return_tensors='pt') # verify pixel values __lowerCamelCase : List[str] = torch.Size([1, 3, 8_0_0, 1_0_6_6]) self.assertEqual(encoding['pixel_values'].shape ,SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[Any] = torch.tensor([0.2796, 0.3138, 0.3481]) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4)) # verify area __lowerCamelCase : Dict = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438]) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,SCREAMING_SNAKE_CASE__)) # verify boxes __lowerCamelCase : int = torch.Size([6, 4]) self.assertEqual(encoding['labels'][0]['boxes'].shape ,SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[int] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215]) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,SCREAMING_SNAKE_CASE__ ,atol=1E-3)) # verify image_id __lowerCamelCase : Tuple = torch.tensor([3_9_7_6_9]) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,SCREAMING_SNAKE_CASE__)) # verify is_crowd __lowerCamelCase : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,SCREAMING_SNAKE_CASE__)) # verify class_labels __lowerCamelCase : List[Any] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7]) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,SCREAMING_SNAKE_CASE__)) # verify orig_size __lowerCamelCase : str = torch.tensor([4_8_0, 6_4_0]) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,SCREAMING_SNAKE_CASE__)) # verify size __lowerCamelCase : int = torch.tensor([8_0_0, 1_0_6_6]) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,SCREAMING_SNAKE_CASE__)) @slow def lowerCAmelCase ( self : str): # prepare image, target and masks_path __lowerCamelCase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png') with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' ,'r') as f: __lowerCamelCase : Tuple = json.loads(f.read()) __lowerCamelCase : List[Any] = {'file_name': '000000039769.png', 'image_id': 3_9_7_6_9, 'segments_info': target} __lowerCamelCase : Optional[int] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic') # encode them __lowerCamelCase : List[str] = DetaImageProcessor(format='coco_panoptic') __lowerCamelCase : Optional[Any] = image_processing(images=SCREAMING_SNAKE_CASE__ ,annotations=SCREAMING_SNAKE_CASE__ ,masks_path=SCREAMING_SNAKE_CASE__ ,return_tensors='pt') # verify pixel values __lowerCamelCase : List[str] = torch.Size([1, 3, 8_0_0, 1_0_6_6]) self.assertEqual(encoding['pixel_values'].shape ,SCREAMING_SNAKE_CASE__) __lowerCamelCase : Any = torch.tensor([0.2796, 0.3138, 0.3481]) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4)) # verify area __lowerCamelCase : Optional[Any] = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147]) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,SCREAMING_SNAKE_CASE__)) # verify boxes __lowerCamelCase : Tuple = torch.Size([6, 4]) self.assertEqual(encoding['labels'][0]['boxes'].shape ,SCREAMING_SNAKE_CASE__) __lowerCamelCase : Any = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625]) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,SCREAMING_SNAKE_CASE__ ,atol=1E-3)) # verify image_id __lowerCamelCase : int = torch.tensor([3_9_7_6_9]) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,SCREAMING_SNAKE_CASE__)) # verify is_crowd __lowerCamelCase : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,SCREAMING_SNAKE_CASE__)) # verify class_labels __lowerCamelCase : int = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3]) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,SCREAMING_SNAKE_CASE__)) # verify masks __lowerCamelCase : Optional[Any] = 8_2_2_8_7_3 self.assertEqual(encoding['labels'][0]['masks'].sum().item() ,SCREAMING_SNAKE_CASE__) # verify orig_size __lowerCamelCase : Any = torch.tensor([4_8_0, 6_4_0]) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,SCREAMING_SNAKE_CASE__)) # verify size __lowerCamelCase : Any = torch.tensor([8_0_0, 1_0_6_6]) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,SCREAMING_SNAKE_CASE__))
652
0
'''simple docstring''' import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class __A ( A ): '''simple docstring''' __lowerCamelCase : Dict = ['image_processor', 'tokenizer'] __lowerCamelCase : Union[str, Any] = 'OwlViTImageProcessor' __lowerCamelCase : Optional[int] = ('CLIPTokenizer', 'CLIPTokenizerFast') def __init__(self , A=None , A=None , **A ) -> str: """simple docstring""" _a = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , A , ) _a = kwargs.pop('''feature_extractor''' ) _a = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(A , A ) def __call__(self , A=None , A=None , A=None , A="max_length" , A="np" , **A ) -> int: """simple docstring""" if text is None and query_images is None and images is None: raise ValueError( '''You have to specify at least one text or query image or image. All three cannot be none.''' ) if text is not None: if isinstance(A , A ) or (isinstance(A , A ) and not isinstance(text[0] , A )): _a = [self.tokenizer(A , padding=A , return_tensors=A , **A )] elif isinstance(A , A ) and isinstance(text[0] , A ): _a = [] # Maximum number of queries across batch _a = max([len(A ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(A ) != max_num_queries: _a = t + [''' '''] * (max_num_queries - len(A )) _a = self.tokenizer(A , padding=A , return_tensors=A , **A ) encodings.append(A ) else: raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''' ) if return_tensors == "np": _a = np.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 ) _a = np.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp _a = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 ) _a = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch _a = torch.cat([encoding['''input_ids'''] for encoding in encodings] , dim=0 ) _a = torch.cat([encoding['''attention_mask'''] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf _a = tf.stack([encoding['''input_ids'''] for encoding in encodings] , axis=0 ) _a = tf.stack([encoding['''attention_mask'''] for encoding in encodings] , axis=0 ) else: raise ValueError('''Target return tensor type could not be returned''' ) _a = BatchEncoding() _a = input_ids _a = attention_mask if query_images is not None: _a = BatchEncoding() _a = self.image_processor( A , return_tensors=A , **A ).pixel_values _a = query_pixel_values if images is not None: _a = self.image_processor(A , return_tensors=A , **A ) if text is not None and images is not None: _a = image_features.pixel_values return encoding elif query_images is not None and images is not None: _a = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**A ) , tensor_type=A ) def a__ (self , *A , **A ) -> Any: """simple docstring""" return self.image_processor.post_process(*A , **A ) def a__ (self , *A , **A ) -> Union[str, Any]: """simple docstring""" return self.image_processor.post_process_object_detection(*A , **A ) def a__ (self , *A , **A ) -> Optional[Any]: """simple docstring""" return self.image_processor.post_process_image_guided_detection(*A , **A ) def a__ (self , *A , **A ) -> Tuple: """simple docstring""" return self.tokenizer.batch_decode(*A , **A ) def a__ (self , *A , **A ) -> str: """simple docstring""" return self.tokenizer.decode(*A , **A ) @property def a__ (self ) -> Dict: """simple docstring""" warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , A , ) return self.image_processor_class @property def a__ (self ) -> List[Any]: """simple docstring""" warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , A , ) return self.image_processor
11
import gc import random import unittest import numpy as np import torch from diffusers import ( DDIMScheduler, KandinskyVaaControlnetPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class A_ ( SCREAMING_SNAKE_CASE , unittest.TestCase ): _UpperCAmelCase : Optional[Any] = KandinskyVaaControlnetPipeline _UpperCAmelCase : Optional[Any] = ['''image_embeds''', '''negative_image_embeds''', '''hint'''] _UpperCAmelCase : int = ['''image_embeds''', '''negative_image_embeds''', '''hint'''] _UpperCAmelCase : List[Any] = [ '''generator''', '''height''', '''width''', '''latents''', '''guidance_scale''', '''num_inference_steps''', '''return_dict''', '''guidance_scale''', '''num_images_per_prompt''', '''output_type''', '''return_dict''', ] _UpperCAmelCase : Tuple = False @property def lowerCAmelCase ( self : Tuple): return 3_2 @property def lowerCAmelCase ( self : List[Any]): return 3_2 @property def lowerCAmelCase ( self : str): return self.time_input_dim @property def lowerCAmelCase ( self : List[str]): return self.time_input_dim * 4 @property def lowerCAmelCase ( self : List[str]): return 1_0_0 @property def lowerCAmelCase ( self : Dict): torch.manual_seed(0) __lowerCamelCase : Optional[Any] = { 'in_channels': 8, # Out channels is double in channels because predicts mean and variance 'out_channels': 8, 'addition_embed_type': 'image_hint', 'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'), 'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'), 'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn', 'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2), 'layers_per_block': 1, 'encoder_hid_dim': self.text_embedder_hidden_size, 'encoder_hid_dim_type': 'image_proj', 'cross_attention_dim': self.cross_attention_dim, 'attention_head_dim': 4, 'resnet_time_scale_shift': 'scale_shift', 'class_embed_type': None, } __lowerCamelCase : Union[str, Any] = UNetaDConditionModel(**SCREAMING_SNAKE_CASE__) return model @property def lowerCAmelCase ( self : Union[str, Any]): return { "block_out_channels": [3_2, 3_2, 6_4, 6_4], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 1_2, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def lowerCAmelCase ( self : Optional[Any]): torch.manual_seed(0) __lowerCamelCase : int = VQModel(**self.dummy_movq_kwargs) return model def lowerCAmelCase ( self : Optional[Any]): __lowerCamelCase : Tuple = self.dummy_unet __lowerCamelCase : List[Any] = self.dummy_movq __lowerCamelCase : str = DDIMScheduler( num_train_timesteps=1_0_0_0 ,beta_schedule='linear' ,beta_start=0.00085 ,beta_end=0.012 ,clip_sample=SCREAMING_SNAKE_CASE__ ,set_alpha_to_one=SCREAMING_SNAKE_CASE__ ,steps_offset=1 ,prediction_type='epsilon' ,thresholding=SCREAMING_SNAKE_CASE__ ,) __lowerCamelCase : Dict = { 'unet': unet, 'scheduler': scheduler, 'movq': movq, } return components def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Optional[int]=0): __lowerCamelCase : str = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(SCREAMING_SNAKE_CASE__)).to(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1)).to( SCREAMING_SNAKE_CASE__) # create hint __lowerCamelCase : Optional[int] = floats_tensor((1, 3, 6_4, 6_4) ,rng=random.Random(SCREAMING_SNAKE_CASE__)).to(SCREAMING_SNAKE_CASE__) if str(SCREAMING_SNAKE_CASE__).startswith('mps'): __lowerCamelCase : int = torch.manual_seed(SCREAMING_SNAKE_CASE__) else: __lowerCamelCase : int = torch.Generator(device=SCREAMING_SNAKE_CASE__).manual_seed(SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[str] = { 'image_embeds': image_embeds, 'negative_image_embeds': negative_image_embeds, 'hint': hint, 'generator': generator, 'height': 6_4, 'width': 6_4, 'guidance_scale': 4.0, 'num_inference_steps': 2, 'output_type': 'np', } return inputs def lowerCAmelCase ( self : Optional[Any]): __lowerCamelCase : Dict = 'cpu' __lowerCamelCase : Tuple = self.get_dummy_components() __lowerCamelCase : Any = self.pipeline_class(**SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[str] = pipe.to(SCREAMING_SNAKE_CASE__) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[Any] = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__)) __lowerCamelCase : int = output.images __lowerCamelCase : Tuple = pipe( **self.get_dummy_inputs(SCREAMING_SNAKE_CASE__) ,return_dict=SCREAMING_SNAKE_CASE__ ,)[0] __lowerCamelCase : Dict = image[0, -3:, -3:, -1] __lowerCamelCase : int = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) __lowerCamelCase : List[str] = np.array( [0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1E-2 ), F" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2 ), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @slow @require_torch_gpu class A_ ( unittest.TestCase ): def lowerCAmelCase ( self : int): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase ( self : int): __lowerCamelCase : List[Any] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy') __lowerCamelCase : Union[str, Any] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinskyv22/hint_image_cat.png') __lowerCamelCase : Tuple = torch.from_numpy(np.array(SCREAMING_SNAKE_CASE__)).float() / 255.0 __lowerCamelCase : str = hint.permute(2 ,0 ,1).unsqueeze(0) __lowerCamelCase : Tuple = KandinskyVaaPriorPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-prior' ,torch_dtype=torch.floataa) pipe_prior.to(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Any = KandinskyVaaControlnetPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-controlnet-depth' ,torch_dtype=torch.floataa) __lowerCamelCase : int = pipeline.to(SCREAMING_SNAKE_CASE__) pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[Any] = 'A robot, 4k photo' __lowerCamelCase : List[str] = torch.Generator(device='cuda').manual_seed(0) __lowerCamelCase , __lowerCamelCase : Optional[Any] = pipe_prior( SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,num_inference_steps=5 ,negative_prompt='' ,).to_tuple() __lowerCamelCase : Optional[Any] = torch.Generator(device='cuda').manual_seed(0) __lowerCamelCase : Any = pipeline( image_embeds=SCREAMING_SNAKE_CASE__ ,negative_image_embeds=SCREAMING_SNAKE_CASE__ ,hint=SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,num_inference_steps=1_0_0 ,output_type='np' ,) __lowerCamelCase : List[Any] = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
652
0
def UpperCamelCase ( lowercase_ ) -> int: '''simple docstring''' if n == 1 or not isinstance(lowercase_ , lowercase_ ): return 0 elif n == 2: return 1 else: lowercase__ : List[Any] = [0, 1] for i in range(2 , n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def UpperCamelCase ( lowercase_ ) -> int: '''simple docstring''' lowercase__ : Optional[Any] = 0 lowercase__ : Dict = 2 while digits < n: index += 1 lowercase__ : str = len(str(fibonacci(lowercase_ ) ) ) return index def UpperCamelCase ( lowercase_ = 10_00 ) -> int: '''simple docstring''' return fibonacci_digits_index(lowercase_ ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
12
from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class A_ : _UpperCAmelCase : int = XGLMConfig _UpperCAmelCase : List[Any] = {} _UpperCAmelCase : Tuple = '''gelu''' def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int]=1_4 ,SCREAMING_SNAKE_CASE__ : Tuple=7 ,SCREAMING_SNAKE_CASE__ : List[Any]=True ,SCREAMING_SNAKE_CASE__ : Any=True ,SCREAMING_SNAKE_CASE__ : Optional[Any]=True ,SCREAMING_SNAKE_CASE__ : Optional[int]=9_9 ,SCREAMING_SNAKE_CASE__ : str=3_2 ,SCREAMING_SNAKE_CASE__ : Tuple=2 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=4 ,SCREAMING_SNAKE_CASE__ : Tuple=3_7 ,SCREAMING_SNAKE_CASE__ : Tuple="gelu" ,SCREAMING_SNAKE_CASE__ : Any=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 ,SCREAMING_SNAKE_CASE__ : List[Any]=5_1_2 ,SCREAMING_SNAKE_CASE__ : str=0.02 ,): __lowerCamelCase : List[str] = parent __lowerCamelCase : List[str] = batch_size __lowerCamelCase : str = seq_length __lowerCamelCase : Optional[Any] = is_training __lowerCamelCase : Any = use_input_mask __lowerCamelCase : str = use_labels __lowerCamelCase : Any = vocab_size __lowerCamelCase : Dict = d_model __lowerCamelCase : int = num_hidden_layers __lowerCamelCase : List[Any] = num_attention_heads __lowerCamelCase : List[str] = ffn_dim __lowerCamelCase : Optional[Any] = activation_function __lowerCamelCase : Tuple = activation_dropout __lowerCamelCase : Union[str, Any] = attention_dropout __lowerCamelCase : List[str] = max_position_embeddings __lowerCamelCase : List[Any] = initializer_range __lowerCamelCase : Any = None __lowerCamelCase : List[str] = 0 __lowerCamelCase : List[str] = 2 __lowerCamelCase : Dict = 1 def lowerCAmelCase ( self : Any): return XGLMConfig.from_pretrained('facebook/xglm-564M') def lowerCAmelCase ( self : str): __lowerCamelCase : Any = tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size) ,clip_value_min=0 ,clip_value_max=3) __lowerCamelCase : Dict = None if self.use_input_mask: __lowerCamelCase : int = random_attention_mask([self.batch_size, self.seq_length]) __lowerCamelCase : int = self.get_config() __lowerCamelCase : Union[str, Any] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] ,2) return ( config, input_ids, input_mask, head_mask, ) def lowerCAmelCase ( self : List[Any]): return XGLMConfig( vocab_size=self.vocab_size ,d_model=self.hidden_size ,num_layers=self.num_hidden_layers ,attention_heads=self.num_attention_heads ,ffn_dim=self.ffn_dim ,activation_function=self.activation_function ,activation_dropout=self.activation_dropout ,attention_dropout=self.attention_dropout ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,use_cache=SCREAMING_SNAKE_CASE__ ,bos_token_id=self.bos_token_id ,eos_token_id=self.eos_token_id ,pad_token_id=self.pad_token_id ,return_dict=SCREAMING_SNAKE_CASE__ ,) def lowerCAmelCase ( self : int): __lowerCamelCase : int = self.prepare_config_and_inputs() ( ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ) : Any = config_and_inputs __lowerCamelCase : str = { 'input_ids': input_ids, 'head_mask': head_mask, } return config, inputs_dict @require_tf class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ): _UpperCAmelCase : str = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () _UpperCAmelCase : List[Any] = (TFXGLMForCausalLM,) if is_tf_available() else () _UpperCAmelCase : str = ( {'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {} ) _UpperCAmelCase : Tuple = False _UpperCAmelCase : Optional[int] = False _UpperCAmelCase : Union[str, Any] = False def lowerCAmelCase ( self : Tuple): __lowerCamelCase : Tuple = TFXGLMModelTester(self) __lowerCamelCase : int = ConfigTester(self ,config_class=SCREAMING_SNAKE_CASE__ ,n_embd=3_7) def lowerCAmelCase ( self : List[Any]): self.config_tester.run_common_tests() @slow def lowerCAmelCase ( self : str): for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase : Union[str, Any] = TFXGLMModel.from_pretrained(SCREAMING_SNAKE_CASE__) self.assertIsNotNone(SCREAMING_SNAKE_CASE__) @unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.') def lowerCAmelCase ( self : Union[str, Any]): super().test_resize_token_embeddings() @require_tf class A_ ( unittest.TestCase ): @slow def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=True): __lowerCamelCase : Any = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M') __lowerCamelCase : int = tf.convert_to_tensor([[2, 2_6_8, 9_8_6_5]] ,dtype=tf.intaa) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off __lowerCamelCase : Optional[Any] = [2, 2_6_8, 9_8_6_5, 6_7, 1_1, 1_9_8_8, 5_7_2_5_2, 9_8_6_5, 5, 9_8_4, 6_7, 1_9_8_8, 2_1_3_8_3_8, 1_6_5_8, 5_3, 7_0_4_4_6, 3_3, 6_6_5_7, 2_7_8, 1_5_8_1] # fmt: on __lowerCamelCase : int = model.generate(SCREAMING_SNAKE_CASE__ ,do_sample=SCREAMING_SNAKE_CASE__ ,num_beams=1) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist() ,SCREAMING_SNAKE_CASE__) @slow def lowerCAmelCase ( self : List[str]): __lowerCamelCase : Tuple = XGLMTokenizer.from_pretrained('facebook/xglm-564M') __lowerCamelCase : int = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M') tf.random.set_seed(0) __lowerCamelCase : Optional[Any] = tokenizer('Today is a nice day and' ,return_tensors='tf') __lowerCamelCase : List[Any] = tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(':/CPU:0'): __lowerCamelCase : Union[str, Any] = model.generate(SCREAMING_SNAKE_CASE__ ,do_sample=SCREAMING_SNAKE_CASE__ ,seed=[7, 0]) __lowerCamelCase : List[str] = tokenizer.decode(output_ids[0] ,skip_special_tokens=SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[int] = ( 'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due' ) self.assertEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) @slow def lowerCAmelCase ( self : Dict): __lowerCamelCase : Union[str, Any] = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M') __lowerCamelCase : Union[str, Any] = XGLMTokenizer.from_pretrained('facebook/xglm-564M') __lowerCamelCase : Union[str, Any] = 'left' # use different length sentences to test batching __lowerCamelCase : List[str] = [ 'This is an extremelly long sentence that only exists to test the ability of the model to cope with ' 'left-padding, such as in batched generation. The output for the sequence below should be the same ' 'regardless of whether left padding is applied or not. When', 'Hello, my dog is a little', ] __lowerCamelCase : List[Any] = tokenizer(SCREAMING_SNAKE_CASE__ ,return_tensors='tf' ,padding=SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[int] = inputs['input_ids'] __lowerCamelCase : Dict = model.generate(input_ids=SCREAMING_SNAKE_CASE__ ,attention_mask=inputs['attention_mask'] ,max_new_tokens=1_2) __lowerCamelCase : Tuple = tokenizer(sentences[0] ,return_tensors='tf').input_ids __lowerCamelCase : List[str] = model.generate(input_ids=SCREAMING_SNAKE_CASE__ ,max_new_tokens=1_2) __lowerCamelCase : Any = tokenizer(sentences[1] ,return_tensors='tf').input_ids __lowerCamelCase : List[Any] = model.generate(input_ids=SCREAMING_SNAKE_CASE__ ,max_new_tokens=1_2) __lowerCamelCase : int = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ ,skip_special_tokens=SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[Any] = tokenizer.decode(output_non_padded[0] ,skip_special_tokens=SCREAMING_SNAKE_CASE__) __lowerCamelCase : Any = tokenizer.decode(output_padded[0] ,skip_special_tokens=SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[int] = [ 'This is an extremelly long sentence that only exists to test the ability of the model to cope with ' 'left-padding, such as in batched generation. The output for the sequence below should be the same ' 'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be ' 'a single', 'Hello, my dog is a little bit of a shy one, but he is very friendly', ] self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) self.assertListEqual(SCREAMING_SNAKE_CASE__ ,[non_padded_sentence, padded_sentence])
652
0
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class UpperCAmelCase_ (unittest.TestCase ): """simple docstring""" def lowercase_ ( self ) -> int: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def lowercase_ ( self ) -> Any: __lowerCamelCase : int = 1 __lowerCamelCase : List[str] = 3 __lowerCamelCase : Optional[int] = (32, 32) __lowerCamelCase : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ ) return image @property def lowercase_ ( self ) -> str: torch.manual_seed(0 ) __lowerCamelCase : Tuple = UNetaDConditionModel( block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=SCREAMING_SNAKE_CASE_ , only_cross_attention=(True, True, False) , num_class_embeds=1_00 , ) return model @property def lowercase_ ( self ) -> str: torch.manual_seed(0 ) __lowerCamelCase : Optional[Any] = AutoencoderKL( block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) return model @property def lowercase_ ( self ) -> List[Any]: torch.manual_seed(0 ) __lowerCamelCase : List[str] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='gelu' , projection_dim=5_12 , ) return CLIPTextModel(SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> Dict: __lowerCamelCase : Any = 'cpu' # ensure determinism for the device-dependent torch.Generator __lowerCamelCase : Optional[Any] = self.dummy_cond_unet_upscale __lowerCamelCase : Dict = DDPMScheduler() __lowerCamelCase : List[str] = DDIMScheduler(prediction_type='v_prediction' ) __lowerCamelCase : str = self.dummy_vae __lowerCamelCase : List[Any] = self.dummy_text_encoder __lowerCamelCase : int = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) __lowerCamelCase : List[str] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] __lowerCamelCase : Dict = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE_ ) ).convert('RGB' ).resize((64, 64) ) # make sure here that pndm scheduler skips prk __lowerCamelCase : Any = StableDiffusionUpscalePipeline( unet=SCREAMING_SNAKE_CASE_ , low_res_scheduler=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , max_noise_level=3_50 , ) __lowerCamelCase : Optional[Any] = sd_pipe.to(SCREAMING_SNAKE_CASE_ ) sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[str] = 'A painting of a squirrel eating a burger' __lowerCamelCase : Union[str, Any] = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 ) __lowerCamelCase : List[Any] = sd_pipe( [prompt] , image=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , ) __lowerCamelCase : Optional[Any] = output.images __lowerCamelCase : Union[str, Any] = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 ) __lowerCamelCase : int = sd_pipe( [prompt] , image=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , return_dict=SCREAMING_SNAKE_CASE_ , )[0] __lowerCamelCase : Tuple = image[0, -3:, -3:, -1] __lowerCamelCase : Optional[Any] = image_from_tuple[0, -3:, -3:, -1] __lowerCamelCase : Dict = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) __lowerCamelCase : Optional[int] = np.array([0.3_1_1_3, 0.3_9_1_0, 0.4_2_7_2, 0.4_8_5_9, 0.5_0_6_1, 0.4_6_5_2, 0.5_3_6_2, 0.5_7_1_5, 0.5_6_6_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def lowercase_ ( self ) -> Dict: __lowerCamelCase : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator __lowerCamelCase : List[Any] = self.dummy_cond_unet_upscale __lowerCamelCase : List[Any] = DDPMScheduler() __lowerCamelCase : Union[str, Any] = DDIMScheduler(prediction_type='v_prediction' ) __lowerCamelCase : Optional[Any] = self.dummy_vae __lowerCamelCase : str = self.dummy_text_encoder __lowerCamelCase : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) __lowerCamelCase : str = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] __lowerCamelCase : Optional[Any] = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE_ ) ).convert('RGB' ).resize((64, 64) ) # make sure here that pndm scheduler skips prk __lowerCamelCase : str = StableDiffusionUpscalePipeline( unet=SCREAMING_SNAKE_CASE_ , low_res_scheduler=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , max_noise_level=3_50 , ) __lowerCamelCase : str = sd_pipe.to(SCREAMING_SNAKE_CASE_ ) sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Optional[Any] = 'A painting of a squirrel eating a burger' __lowerCamelCase : List[Any] = sd_pipe( 2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , ) __lowerCamelCase : Any = output.images assert image.shape[0] == 2 __lowerCamelCase : List[str] = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 ) __lowerCamelCase : List[Any] = sd_pipe( [prompt] , image=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , ) __lowerCamelCase : Union[str, Any] = output.images assert image.shape[0] == 2 @unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' ) def lowercase_ ( self ) -> str: __lowerCamelCase : int = self.dummy_cond_unet_upscale __lowerCamelCase : Union[str, Any] = DDPMScheduler() __lowerCamelCase : Union[str, Any] = DDIMScheduler(prediction_type='v_prediction' ) __lowerCamelCase : Any = self.dummy_vae __lowerCamelCase : str = self.dummy_text_encoder __lowerCamelCase : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) __lowerCamelCase : Tuple = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] __lowerCamelCase : Union[str, Any] = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE_ ) ).convert('RGB' ).resize((64, 64) ) # put models in fp16, except vae as it overflows in fp16 __lowerCamelCase : str = unet.half() __lowerCamelCase : int = text_encoder.half() # make sure here that pndm scheduler skips prk __lowerCamelCase : int = StableDiffusionUpscalePipeline( unet=SCREAMING_SNAKE_CASE_ , low_res_scheduler=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , max_noise_level=3_50 , ) __lowerCamelCase : List[Any] = sd_pipe.to(SCREAMING_SNAKE_CASE_ ) sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : str = 'A painting of a squirrel eating a burger' __lowerCamelCase : Optional[int] = torch.manual_seed(0 ) __lowerCamelCase : Dict = sd_pipe( [prompt] , image=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=2 , output_type='np' , ).images __lowerCamelCase : int = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) @slow @require_torch_gpu class UpperCAmelCase_ (unittest.TestCase ): """simple docstring""" def lowercase_ ( self ) -> List[str]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase_ ( self ) -> Dict: __lowerCamelCase : str = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-upscale/low_res_cat.png' ) __lowerCamelCase : Any = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale' '/upsampled_cat.npy' ) __lowerCamelCase : Optional[Any] = 'stabilityai/stable-diffusion-x4-upscaler' __lowerCamelCase : List[str] = StableDiffusionUpscalePipeline.from_pretrained(SCREAMING_SNAKE_CASE_ ) pipe.to(SCREAMING_SNAKE_CASE_ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) pipe.enable_attention_slicing() __lowerCamelCase : Tuple = 'a cat sitting on a park bench' __lowerCamelCase : Dict = torch.manual_seed(0 ) __lowerCamelCase : Tuple = pipe( prompt=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , output_type='np' , ) __lowerCamelCase : Union[str, Any] = output.images[0] assert image.shape == (5_12, 5_12, 3) assert np.abs(expected_image - image ).max() < 1E-3 def lowercase_ ( self ) -> List[str]: __lowerCamelCase : List[Any] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-upscale/low_res_cat.png' ) __lowerCamelCase : int = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale' '/upsampled_cat_fp16.npy' ) __lowerCamelCase : Optional[int] = 'stabilityai/stable-diffusion-x4-upscaler' __lowerCamelCase : Dict = StableDiffusionUpscalePipeline.from_pretrained( SCREAMING_SNAKE_CASE_ , torch_dtype=torch.floataa , ) pipe.to(SCREAMING_SNAKE_CASE_ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) pipe.enable_attention_slicing() __lowerCamelCase : str = 'a cat sitting on a park bench' __lowerCamelCase : Dict = torch.manual_seed(0 ) __lowerCamelCase : int = pipe( prompt=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , output_type='np' , ) __lowerCamelCase : List[str] = output.images[0] assert image.shape == (5_12, 5_12, 3) assert np.abs(expected_image - image ).max() < 5E-1 def lowercase_ ( self ) -> Optional[Any]: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __lowerCamelCase : List[Any] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-upscale/low_res_cat.png' ) __lowerCamelCase : Dict = 'stabilityai/stable-diffusion-x4-upscaler' __lowerCamelCase : Any = StableDiffusionUpscalePipeline.from_pretrained( SCREAMING_SNAKE_CASE_ , torch_dtype=torch.floataa , ) pipe.to(SCREAMING_SNAKE_CASE_ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() __lowerCamelCase : Union[str, Any] = 'a cat sitting on a park bench' __lowerCamelCase : Union[str, Any] = torch.manual_seed(0 ) __lowerCamelCase : List[Any] = pipe( prompt=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=5 , output_type='np' , ) __lowerCamelCase : List[Any] = torch.cuda.max_memory_allocated() # make sure that less than 2.9 GB is allocated assert mem_bytes < 2.9 * 10**9
13
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline else: from .pipeline_kandinsky import KandinskyPipeline from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput from .text_encoder import MultilingualCLIP
652
0
import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class UpperCAmelCase_ ( __lowercase ): """simple docstring""" UpperCAmelCase__ : Union[str, Any] = (DPMSolverSinglestepScheduler,) UpperCAmelCase__ : Any = (("num_inference_steps", 25),) def __lowercase ( self , **_a ) -> List[str]: _a : List[str] = { '''num_train_timesteps''': 1_0_0_0, '''beta_start''': 0.0001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''solver_order''': 2, '''prediction_type''': '''epsilon''', '''thresholding''': False, '''sample_max_value''': 1.0, '''algorithm_type''': '''dpmsolver++''', '''solver_type''': '''midpoint''', '''lambda_min_clipped''': -float('''inf''' ), '''variance_type''': None, } config.update(**_a ) return config def __lowercase ( self , _a=0 , **_a ) -> Any: _a : List[str] = dict(self.forward_default_kwargs ) _a : Optional[Any] = kwargs.pop('''num_inference_steps''' , _a ) _a : Dict = self.dummy_sample _a : Any = 0.1 * sample _a : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: _a : List[str] = self.get_scheduler_config(**_a ) _a : str = scheduler_class(**_a ) scheduler.set_timesteps(_a ) # copy over dummy past residuals _a : Tuple = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_a ) _a : List[str] = scheduler_class.from_pretrained(_a ) new_scheduler.set_timesteps(_a ) # copy over dummy past residuals _a : int = dummy_past_residuals[: new_scheduler.config.solver_order] _a , _a : Any = sample, sample for t in range(_a , time_step + scheduler.config.solver_order + 1 ): _a : Dict = scheduler.step(_a , _a , _a , **_a ).prev_sample _a : List[str] = new_scheduler.step(_a , _a , _a , **_a ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def __lowercase ( self ) -> Dict: pass def __lowercase ( self , _a=0 , **_a ) -> List[Any]: _a : List[str] = dict(self.forward_default_kwargs ) _a : Union[str, Any] = kwargs.pop('''num_inference_steps''' , _a ) _a : Union[str, Any] = self.dummy_sample _a : List[str] = 0.1 * sample _a : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: _a : int = self.get_scheduler_config() _a : Optional[int] = scheduler_class(**_a ) scheduler.set_timesteps(_a ) # copy over dummy past residuals (must be after setting timesteps) _a : str = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_a ) _a : str = scheduler_class.from_pretrained(_a ) # copy over dummy past residuals new_scheduler.set_timesteps(_a ) # copy over dummy past residual (must be after setting timesteps) _a : Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order] _a : int = scheduler.step(_a , _a , _a , **_a ).prev_sample _a : int = new_scheduler.step(_a , _a , _a , **_a ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def __lowercase ( self , _a=None , **_a ) -> Dict: if scheduler is None: _a : Optional[Any] = self.scheduler_classes[0] _a : List[str] = self.get_scheduler_config(**_a ) _a : Union[str, Any] = scheduler_class(**_a ) _a : int = self.scheduler_classes[0] _a : List[Any] = self.get_scheduler_config(**_a ) _a : Union[str, Any] = scheduler_class(**_a ) _a : Optional[Any] = 1_0 _a : List[Any] = self.dummy_model() _a : Union[str, Any] = self.dummy_sample_deter scheduler.set_timesteps(_a ) for i, t in enumerate(scheduler.timesteps ): _a : Any = model(_a , _a ) _a : Dict = scheduler.step(_a , _a , _a ).prev_sample return sample def __lowercase ( self ) -> Dict: _a : Optional[int] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) _a : Union[str, Any] = 5_0 _a : Any = self.dummy_model() _a : Tuple = self.dummy_sample_deter scheduler.set_timesteps(_a ) # make sure that the first t is uneven for i, t in enumerate(scheduler.timesteps[3:] ): _a : List[Any] = model(_a , _a ) _a : Optional[int] = scheduler.step(_a , _a , _a ).prev_sample _a : Union[str, Any] = torch.mean(torch.abs(_a ) ) assert abs(result_mean.item() - 0.2574 ) < 1e-3 def __lowercase ( self ) -> int: for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_configs(num_train_timesteps=_a ) def __lowercase ( self ) -> List[Any]: # make sure that iterating over schedulers with same config names gives same results # for defaults _a : Dict = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) _a : Dict = self.full_loop(scheduler=_a ) _a : Optional[Any] = torch.mean(torch.abs(_a ) ) assert abs(result_mean.item() - 0.2791 ) < 1e-3 _a : int = DEISMultistepScheduler.from_config(scheduler.config ) _a : Union[str, Any] = DPMSolverMultistepScheduler.from_config(scheduler.config ) _a : List[Any] = UniPCMultistepScheduler.from_config(scheduler.config ) _a : Union[str, Any] = DPMSolverSinglestepScheduler.from_config(scheduler.config ) _a : Optional[int] = self.full_loop(scheduler=_a ) _a : List[str] = torch.mean(torch.abs(_a ) ) assert abs(result_mean.item() - 0.2791 ) < 1e-3 def __lowercase ( self ) -> Union[str, Any]: self.check_over_configs(thresholding=_a ) for order in [1, 2, 3]: for solver_type in ["midpoint", "heun"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=_a , prediction_type=_a , sample_max_value=_a , algorithm_type='''dpmsolver++''' , solver_order=_a , solver_type=_a , ) def __lowercase ( self ) -> Tuple: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_a ) def __lowercase ( self ) -> Dict: for algorithm_type in ["dpmsolver", "dpmsolver++"]: for solver_type in ["midpoint", "heun"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=_a , solver_type=_a , prediction_type=_a , algorithm_type=_a , ) _a : List[Any] = self.full_loop( solver_order=_a , solver_type=_a , prediction_type=_a , algorithm_type=_a , ) assert not torch.isnan(_a ).any(), "Samples have nan numbers" def __lowercase ( self ) -> List[Any]: self.check_over_configs(lower_order_final=_a ) self.check_over_configs(lower_order_final=_a ) def __lowercase ( self ) -> List[Any]: self.check_over_configs(lambda_min_clipped=-float('''inf''' ) ) self.check_over_configs(lambda_min_clipped=-5.1 ) def __lowercase ( self ) -> int: self.check_over_configs(variance_type=_a ) self.check_over_configs(variance_type='''learned_range''' ) def __lowercase ( self ) -> Optional[Any]: for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_forward(num_inference_steps=_a , time_step=0 ) def __lowercase ( self ) -> Optional[Any]: _a : Any = self.full_loop() _a : Optional[Any] = torch.mean(torch.abs(_a ) ) assert abs(result_mean.item() - 0.2791 ) < 1e-3 def __lowercase ( self ) -> Optional[int]: _a : Optional[Any] = self.full_loop(use_karras_sigmas=_a ) _a : int = torch.mean(torch.abs(_a ) ) assert abs(result_mean.item() - 0.2248 ) < 1e-3 def __lowercase ( self ) -> Optional[Any]: _a : Dict = self.full_loop(prediction_type='''v_prediction''' ) _a : Optional[int] = torch.mean(torch.abs(_a ) ) assert abs(result_mean.item() - 0.1453 ) < 1e-3 def __lowercase ( self ) -> str: _a : List[str] = self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=_a ) _a : Tuple = torch.mean(torch.abs(_a ) ) assert abs(result_mean.item() - 0.0649 ) < 1e-3 def __lowercase ( self ) -> List[str]: _a : Union[str, Any] = self.scheduler_classes[0] _a : Dict = self.get_scheduler_config(thresholding=_a , dynamic_thresholding_ratio=0 ) _a : str = scheduler_class(**_a ) _a : Dict = 1_0 _a : Optional[Any] = self.dummy_model() _a : List[str] = self.dummy_sample_deter.half() scheduler.set_timesteps(_a ) for i, t in enumerate(scheduler.timesteps ): _a : Any = model(_a , _a ) _a : int = scheduler.step(_a , _a , _a ).prev_sample assert sample.dtype == torch.floataa
14
from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig a =logging.get_logger(__name__) # General docstring a ="""RegNetConfig""" # Base docstring a ="""facebook/regnet-y-040""" a =[1, 1088, 7, 7] # Image classification docstring a ="""facebook/regnet-y-040""" a ="""tabby, tabby cat""" a =[ """facebook/regnet-y-040""", # See all regnet models at https://huggingface.co/models?filter=regnet ] class A_ ( tf.keras.layers.Layer ): def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int = 3 ,SCREAMING_SNAKE_CASE__ : int = 1 ,SCREAMING_SNAKE_CASE__ : int = 1 ,SCREAMING_SNAKE_CASE__ : Optional[str] = "relu" ,**SCREAMING_SNAKE_CASE__ : Optional[int] ,): super().__init__(**SCREAMING_SNAKE_CASE__) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb __lowerCamelCase : Optional[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2) __lowerCamelCase : Union[str, Any] = tf.keras.layers.ConvaD( filters=SCREAMING_SNAKE_CASE__ ,kernel_size=SCREAMING_SNAKE_CASE__ ,strides=SCREAMING_SNAKE_CASE__ ,padding='VALID' ,groups=SCREAMING_SNAKE_CASE__ ,use_bias=SCREAMING_SNAKE_CASE__ ,name='convolution' ,) __lowerCamelCase : int = tf.keras.layers.BatchNormalization(epsilon=1E-5 ,momentum=0.9 ,name='normalization') __lowerCamelCase : Union[str, Any] = ACTaFN[activation] if activation is not None else tf.identity def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : List[str]): __lowerCamelCase : List[Any] = self.convolution(self.padding(SCREAMING_SNAKE_CASE__)) __lowerCamelCase : Union[str, Any] = self.normalization(SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[Any] = self.activation(SCREAMING_SNAKE_CASE__) return hidden_state class A_ ( tf.keras.layers.Layer ): def __init__( self : str ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,**SCREAMING_SNAKE_CASE__ : Dict): super().__init__(**SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[int] = config.num_channels __lowerCamelCase : Dict = TFRegNetConvLayer( out_channels=config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act ,name='embedder' ,) def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[Any]): __lowerCamelCase : Optional[int] = shape_list(SCREAMING_SNAKE_CASE__)[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( 'Make sure that the channel dimension of the pixel values match with the one set in the configuration.') # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) __lowerCamelCase : Optional[int] = tf.transpose(SCREAMING_SNAKE_CASE__ ,perm=(0, 2, 3, 1)) __lowerCamelCase : List[Any] = self.embedder(SCREAMING_SNAKE_CASE__) return hidden_state class A_ ( tf.keras.layers.Layer ): def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int = 2 ,**SCREAMING_SNAKE_CASE__ : Tuple): super().__init__(**SCREAMING_SNAKE_CASE__) __lowerCamelCase : Any = tf.keras.layers.ConvaD( filters=SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,strides=SCREAMING_SNAKE_CASE__ ,use_bias=SCREAMING_SNAKE_CASE__ ,name='convolution') __lowerCamelCase : Optional[Any] = tf.keras.layers.BatchNormalization(epsilon=1E-5 ,momentum=0.9 ,name='normalization') def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : tf.Tensor ,SCREAMING_SNAKE_CASE__ : bool = False): return self.normalization(self.convolution(SCREAMING_SNAKE_CASE__) ,training=SCREAMING_SNAKE_CASE__) class A_ ( tf.keras.layers.Layer ): def __init__( self : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : Any): super().__init__(**SCREAMING_SNAKE_CASE__) __lowerCamelCase : int = tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE__ ,name='pooler') __lowerCamelCase : Dict = [ tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation='relu' ,name='attention.0'), tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation='sigmoid' ,name='attention.2'), ] def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int): # [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels] __lowerCamelCase : Optional[Any] = self.pooler(SCREAMING_SNAKE_CASE__) for layer_module in self.attention: __lowerCamelCase : Any = layer_module(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[int] = hidden_state * pooled return hidden_state class A_ ( tf.keras.layers.Layer ): def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int = 1 ,**SCREAMING_SNAKE_CASE__ : List[Any]): super().__init__(**SCREAMING_SNAKE_CASE__) __lowerCamelCase : Tuple = in_channels != out_channels or stride != 1 __lowerCamelCase : Union[str, Any] = max(1 ,out_channels // config.groups_width) __lowerCamelCase : Dict = ( TFRegNetShortCut(SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,name='shortcut') if should_apply_shortcut else tf.keras.layers.Activation('linear' ,name='shortcut') ) # `self.layers` instead of `self.layer` because that is a reserved argument. __lowerCamelCase : Optional[int] = [ TFRegNetConvLayer(SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation=config.hidden_act ,name='layer.0'), TFRegNetConvLayer( SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,groups=SCREAMING_SNAKE_CASE__ ,activation=config.hidden_act ,name='layer.1'), TFRegNetConvLayer(SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation=SCREAMING_SNAKE_CASE__ ,name='layer.2'), ] __lowerCamelCase : Dict = ACTaFN[config.hidden_act] def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Optional[int]): __lowerCamelCase : int = hidden_state for layer_module in self.layers: __lowerCamelCase : List[str] = layer_module(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[int] = self.shortcut(SCREAMING_SNAKE_CASE__) hidden_state += residual __lowerCamelCase : int = self.activation(SCREAMING_SNAKE_CASE__) return hidden_state class A_ ( tf.keras.layers.Layer ): def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int = 1 ,**SCREAMING_SNAKE_CASE__ : List[str]): super().__init__(**SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[Any] = in_channels != out_channels or stride != 1 __lowerCamelCase : Tuple = max(1 ,out_channels // config.groups_width) __lowerCamelCase : int = ( TFRegNetShortCut(SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,name='shortcut') if should_apply_shortcut else tf.keras.layers.Activation('linear' ,name='shortcut') ) __lowerCamelCase : Optional[int] = [ TFRegNetConvLayer(SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation=config.hidden_act ,name='layer.0'), TFRegNetConvLayer( SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,groups=SCREAMING_SNAKE_CASE__ ,activation=config.hidden_act ,name='layer.1'), TFRegNetSELayer(SCREAMING_SNAKE_CASE__ ,reduced_channels=int(round(in_channels / 4)) ,name='layer.2'), TFRegNetConvLayer(SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation=SCREAMING_SNAKE_CASE__ ,name='layer.3'), ] __lowerCamelCase : List[Any] = ACTaFN[config.hidden_act] def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : str): __lowerCamelCase : Optional[int] = hidden_state for layer_module in self.layers: __lowerCamelCase : Dict = layer_module(SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[Any] = self.shortcut(SCREAMING_SNAKE_CASE__) hidden_state += residual __lowerCamelCase : Any = self.activation(SCREAMING_SNAKE_CASE__) return hidden_state class A_ ( tf.keras.layers.Layer ): def __init__( self : int ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int = 2 ,SCREAMING_SNAKE_CASE__ : int = 2 ,**SCREAMING_SNAKE_CASE__ : Tuple): super().__init__(**SCREAMING_SNAKE_CASE__) __lowerCamelCase : Any = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer __lowerCamelCase : Tuple = [ # downsampling is done in the first layer with stride of 2 layer(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,name='layers.0'), *[layer(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,name=F"layers.{i+1}") for i in range(depth - 1)], ] def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : List[Any]): for layer_module in self.layers: __lowerCamelCase : Any = layer_module(SCREAMING_SNAKE_CASE__) return hidden_state class A_ ( tf.keras.layers.Layer ): def __init__( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,**SCREAMING_SNAKE_CASE__ : Any): super().__init__(**SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[Any] = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( SCREAMING_SNAKE_CASE__ ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,name='stages.0' ,)) __lowerCamelCase : Optional[int] = zip(config.hidden_sizes ,config.hidden_sizes[1:]) for i, ((in_channels, out_channels), depth) in enumerate(zip(SCREAMING_SNAKE_CASE__ ,config.depths[1:])): self.stages.append(TFRegNetStage(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,depth=SCREAMING_SNAKE_CASE__ ,name=F"stages.{i+1}")) def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : tf.Tensor ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = True): __lowerCamelCase : Optional[Any] = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: __lowerCamelCase : Optional[Any] = hidden_states + (hidden_state,) __lowerCamelCase : str = stage_module(SCREAMING_SNAKE_CASE__) if output_hidden_states: __lowerCamelCase : Tuple = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None) return TFBaseModelOutputWithNoAttention(last_hidden_state=SCREAMING_SNAKE_CASE__ ,hidden_states=SCREAMING_SNAKE_CASE__) @keras_serializable class A_ ( tf.keras.layers.Layer ): _UpperCAmelCase : List[Any] = RegNetConfig def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : Optional[int]): super().__init__(**SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[Any] = config __lowerCamelCase : Optional[int] = TFRegNetEmbeddings(SCREAMING_SNAKE_CASE__ ,name='embedder') __lowerCamelCase : Union[str, Any] = TFRegNetEncoder(SCREAMING_SNAKE_CASE__ ,name='encoder') __lowerCamelCase : int = tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE__ ,name='pooler') @unpack_inputs def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : tf.Tensor ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : bool = False ,): __lowerCamelCase : Dict = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __lowerCamelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict __lowerCamelCase : Union[str, Any] = self.embedder(SCREAMING_SNAKE_CASE__ ,training=SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[Any] = self.encoder( SCREAMING_SNAKE_CASE__ ,output_hidden_states=SCREAMING_SNAKE_CASE__ ,return_dict=SCREAMING_SNAKE_CASE__ ,training=SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[int] = encoder_outputs[0] __lowerCamelCase : int = self.pooler(SCREAMING_SNAKE_CASE__) # Change to NCHW output format have uniformity in the modules __lowerCamelCase : Union[str, Any] = tf.transpose(SCREAMING_SNAKE_CASE__ ,perm=(0, 3, 1, 2)) __lowerCamelCase : str = tf.transpose(SCREAMING_SNAKE_CASE__ ,perm=(0, 3, 1, 2)) # Change the other hidden state outputs to NCHW as well if output_hidden_states: __lowerCamelCase : Union[str, Any] = tuple([tf.transpose(SCREAMING_SNAKE_CASE__ ,perm=(0, 3, 1, 2)) for h in encoder_outputs[1]]) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=SCREAMING_SNAKE_CASE__ ,pooler_output=SCREAMING_SNAKE_CASE__ ,hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states ,) class A_ ( SCREAMING_SNAKE_CASE ): _UpperCAmelCase : Any = RegNetConfig _UpperCAmelCase : Optional[int] = '''regnet''' _UpperCAmelCase : List[Any] = '''pixel_values''' @property def lowerCAmelCase ( self : int): return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) ,dtype=tf.floataa)} a =r""" Parameters: This model is a Tensorflow [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and behavior. config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. """ a =r""" Args: pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConveNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( '''The bare RegNet model outputting raw features without any specific head on top.''' , SCREAMING_SNAKE_CASE , ) class A_ ( SCREAMING_SNAKE_CASE ): def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,*SCREAMING_SNAKE_CASE__ : Tuple ,**SCREAMING_SNAKE_CASE__ : Tuple): super().__init__(SCREAMING_SNAKE_CASE__ ,*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[str] = TFRegNetMainLayer(SCREAMING_SNAKE_CASE__ ,name='regnet') @unpack_inputs @add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC ,output_type=SCREAMING_SNAKE_CASE__ ,config_class=_CONFIG_FOR_DOC ,modality='vision' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,) def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : tf.Tensor ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : int=False ,): __lowerCamelCase : List[Any] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __lowerCamelCase : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict __lowerCamelCase : Tuple = self.regnet( pixel_values=SCREAMING_SNAKE_CASE__ ,output_hidden_states=SCREAMING_SNAKE_CASE__ ,return_dict=SCREAMING_SNAKE_CASE__ ,training=SCREAMING_SNAKE_CASE__ ,) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state ,pooler_output=outputs.pooler_output ,hidden_states=outputs.hidden_states ,) @add_start_docstrings( ''' RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. ''' , SCREAMING_SNAKE_CASE , ) class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,*SCREAMING_SNAKE_CASE__ : List[str] ,**SCREAMING_SNAKE_CASE__ : str): super().__init__(SCREAMING_SNAKE_CASE__ ,*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__) __lowerCamelCase : Union[str, Any] = config.num_labels __lowerCamelCase : Union[str, Any] = TFRegNetMainLayer(SCREAMING_SNAKE_CASE__ ,name='regnet') # classification head __lowerCamelCase : Optional[Any] = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels ,name='classifier.1') if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=SCREAMING_SNAKE_CASE__ ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,) def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : tf.Tensor = None ,SCREAMING_SNAKE_CASE__ : tf.Tensor = None ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : Any=False ,): __lowerCamelCase : Any = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __lowerCamelCase : Tuple = return_dict if return_dict is not None else self.config.use_return_dict __lowerCamelCase : str = self.regnet( SCREAMING_SNAKE_CASE__ ,output_hidden_states=SCREAMING_SNAKE_CASE__ ,return_dict=SCREAMING_SNAKE_CASE__ ,training=SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[Any] = outputs.pooler_output if return_dict else outputs[1] __lowerCamelCase : Optional[Any] = self.classifier[0](SCREAMING_SNAKE_CASE__) __lowerCamelCase : Dict = self.classifier[1](SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[str] = None if labels is None else self.hf_compute_loss(labels=SCREAMING_SNAKE_CASE__ ,logits=SCREAMING_SNAKE_CASE__) if not return_dict: __lowerCamelCase : Union[str, Any] = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=SCREAMING_SNAKE_CASE__ ,logits=SCREAMING_SNAKE_CASE__ ,hidden_states=outputs.hidden_states)
652
0
import logging import math from functools import partial from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union import torch from .tensor_utils import tensor_tree_map, tree_map def UpperCamelCase ( __magic_name__ : Union[dict, list, tuple, torch.Tensor] ) -> List[Tuple[int, ...]]: """simple docstring""" lowercase__ = [] if isinstance(__magic_name__ , __magic_name__ ): for v in tree.values(): shapes.extend(_fetch_dims(__magic_name__ ) ) elif isinstance(__magic_name__ , (list, tuple) ): for t in tree: shapes.extend(_fetch_dims(__magic_name__ ) ) elif isinstance(__magic_name__ , torch.Tensor ): shapes.append(tree.shape ) else: raise ValueError("""Not supported""" ) return shapes @torch.jit.ignore def UpperCamelCase ( __magic_name__ : int , __magic_name__ : Tuple[int, ...] ) -> Tuple[int, ...]: """simple docstring""" lowercase__ = [] for d in reversed(__magic_name__ ): idx.append(flat_idx % d ) lowercase__ = flat_idx // d return tuple(reversed(__magic_name__ ) ) @torch.jit.ignore def UpperCamelCase ( __magic_name__ : Sequence[int] , __magic_name__ : Sequence[int] , __magic_name__ : Sequence[int] , __magic_name__ : Optional[Sequence[bool]] = None , __magic_name__ : Optional[Sequence[bool]] = None , ) -> List[Tuple[slice, ...]]: """simple docstring""" def reduce_edge_list(__magic_name__ : List[bool] ) -> None: lowercase__ = True for i in range(len(__magic_name__ ) ): lowercase__ = -1 * (i + 1) l[reversed_idx] &= tally lowercase__ = l[reversed_idx] if start_edges is None: lowercase__ = [s == 0 for s in start] reduce_edge_list(__magic_name__ ) if end_edges is None: lowercase__ = [e == (d - 1) for e, d in zip(__magic_name__ , __magic_name__ )] reduce_edge_list(__magic_name__ ) # Base cases. Either start/end are empty and we're done, or the final, # one-dimensional tensor can be simply sliced if len(__magic_name__ ) == 0: return [()] elif len(__magic_name__ ) == 1: return [(slice(start[0] , end[0] + 1 ),)] lowercase__ = [] lowercase__ = [] # Dimensions common to start and end can be selected directly for s, e in zip(__magic_name__ , __magic_name__ ): if s == e: path_list.append(slice(__magic_name__ , s + 1 ) ) else: break lowercase__ = tuple(__magic_name__ ) lowercase__ = len(__magic_name__ ) # start == end, and we're done if divergence_idx == len(__magic_name__ ): return [path] def upper() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None lowercase__ = start[divergence_idx] return tuple( path + (slice(__magic_name__ , sdi + 1 ),) + s for s in _get_minimal_slice_set( start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) ) def lower() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None lowercase__ = end[divergence_idx] return tuple( path + (slice(__magic_name__ , edi + 1 ),) + s for s in _get_minimal_slice_set( [0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) ) # If both start and end are at the edges of the subtree rooted at # divergence_idx, we can just select the whole subtree at once if start_edges[divergence_idx] and end_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) ) # If just start is at the edge, we can grab almost all of the subtree, # treating only the ragged bottom edge as an edge case elif start_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) ) slices.extend(lower() ) # Analogous to the previous case, but the top is ragged this time elif end_edges[divergence_idx]: slices.extend(upper() ) slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) ) # If both sides of the range are ragged, we need to handle both sides # separately. If there's contiguous meat in between them, we can index it # in one big chunk else: slices.extend(upper() ) lowercase__ = end[divergence_idx] - start[divergence_idx] if middle_ground > 1: slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) ) slices.extend(lower() ) return slices @torch.jit.ignore def UpperCamelCase ( __magic_name__ : torch.Tensor , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int ) -> torch.Tensor: """simple docstring""" lowercase__ = t.shape[:no_batch_dims] lowercase__ = list(_flat_idx_to_idx(__magic_name__ , __magic_name__ ) ) # _get_minimal_slice_set is inclusive lowercase__ = list(_flat_idx_to_idx(flat_end - 1 , __magic_name__ ) ) # Get an ordered list of slices to perform lowercase__ = _get_minimal_slice_set( __magic_name__ , __magic_name__ , __magic_name__ , ) lowercase__ = [t[s] for s in slices] return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] ) def UpperCamelCase ( __magic_name__ : Callable , __magic_name__ : Dict[str, Any] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : bool = False , __magic_name__ : Any = None , __magic_name__ : bool = False , ) -> Any: """simple docstring""" if not (len(__magic_name__ ) > 0): raise ValueError("""Must provide at least one input""" ) lowercase__ = [shape[:no_batch_dims] for shape in _fetch_dims(__magic_name__ )] lowercase__ = tuple([max(__magic_name__ ) for s in zip(*__magic_name__ )] ) def _prep_inputs(__magic_name__ : torch.Tensor ) -> torch.Tensor: if not low_mem: if not sum(t.shape[:no_batch_dims] ) == no_batch_dims: lowercase__ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] ) lowercase__ = t.reshape(-1 , *t.shape[no_batch_dims:] ) else: lowercase__ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] ) return t lowercase__ = tensor_tree_map(_prep_inputs , __magic_name__ ) lowercase__ = None if _out is not None: lowercase__ = tensor_tree_map(lambda __magic_name__ : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out ) lowercase__ = 1 for d in orig_batch_dims: flat_batch_dim *= d lowercase__ = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0) def _select_chunk(__magic_name__ : torch.Tensor ) -> torch.Tensor: return t[i : i + chunk_size] if t.shape[0] != 1 else t lowercase__ = 0 lowercase__ = prepped_outputs for _ in range(__magic_name__ ): # Chunk the input if not low_mem: lowercase__ = _select_chunk else: lowercase__ = partial( _chunk_slice , flat_start=__magic_name__ , flat_end=min(__magic_name__ , i + chunk_size ) , no_batch_dims=len(__magic_name__ ) , ) lowercase__ = tensor_tree_map(__magic_name__ , __magic_name__ ) # Run the layer on the chunk lowercase__ = layer(**__magic_name__ ) # Allocate space for the output if out is None: lowercase__ = tensor_tree_map(lambda __magic_name__ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , __magic_name__ ) # Put the chunk in its pre-allocated space if isinstance(__magic_name__ , __magic_name__ ): def assign(__magic_name__ : dict , __magic_name__ : dict ) -> None: for k, v in da.items(): if isinstance(__magic_name__ , __magic_name__ ): assign(__magic_name__ , da[k] ) else: if _add_into_out: v[i : i + chunk_size] += da[k] else: lowercase__ = da[k] assign(__magic_name__ , __magic_name__ ) elif isinstance(__magic_name__ , __magic_name__ ): for xa, xa in zip(__magic_name__ , __magic_name__ ): if _add_into_out: xa[i : i + chunk_size] += xa else: lowercase__ = xa elif isinstance(__magic_name__ , torch.Tensor ): if _add_into_out: out[i : i + chunk_size] += output_chunk else: lowercase__ = output_chunk else: raise ValueError("""Not supported""" ) i += chunk_size lowercase__ = tensor_tree_map(lambda __magic_name__ : t.view(orig_batch_dims + t.shape[1:] ) , __magic_name__ ) return out class A : '''simple docstring''' def __init__(self : List[str] , _UpperCAmelCase : int = 512 , ) -> str: """simple docstring""" lowercase__ = max_chunk_size lowercase__ = None lowercase__ = None def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : Callable , _UpperCAmelCase : tuple , _UpperCAmelCase : int ) -> int: """simple docstring""" logging.info("""Tuning chunk size...""" ) if min_chunk_size >= self.max_chunk_size: return min_chunk_size lowercase__ = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )] lowercase__ = [c for c in candidates if c > min_chunk_size] lowercase__ = [min_chunk_size] + candidates candidates[-1] += 4 def test_chunk_size(_UpperCAmelCase : int ) -> bool: try: with torch.no_grad(): fn(*_UpperCAmelCase , chunk_size=_UpperCAmelCase ) return True except RuntimeError: return False lowercase__ = 0 lowercase__ = len(_UpperCAmelCase ) - 1 while i > min_viable_chunk_size_index: lowercase__ = test_chunk_size(candidates[i] ) if not viable: lowercase__ = (min_viable_chunk_size_index + i) // 2 else: lowercase__ = i lowercase__ = (i + len(_UpperCAmelCase ) - 1) // 2 return candidates[min_viable_chunk_size_index] def lowerCamelCase__ (self : Dict , _UpperCAmelCase : Iterable , _UpperCAmelCase : Iterable ) -> bool: """simple docstring""" lowercase__ = True for aa, aa in zip(_UpperCAmelCase , _UpperCAmelCase ): assert type(_UpperCAmelCase ) == type(_UpperCAmelCase ) if isinstance(_UpperCAmelCase , (list, tuple) ): consistent &= self._compare_arg_caches(_UpperCAmelCase , _UpperCAmelCase ) elif isinstance(_UpperCAmelCase , _UpperCAmelCase ): lowercase__ = [v for _, v in sorted(aa.items() , key=lambda _UpperCAmelCase : x[0] )] lowercase__ = [v for _, v in sorted(aa.items() , key=lambda _UpperCAmelCase : x[0] )] consistent &= self._compare_arg_caches(_UpperCAmelCase , _UpperCAmelCase ) else: consistent &= aa == aa return consistent def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : Callable , _UpperCAmelCase : tuple , _UpperCAmelCase : int , ) -> int: """simple docstring""" lowercase__ = True lowercase__ = tree_map(lambda _UpperCAmelCase : a.shape if isinstance(_UpperCAmelCase , torch.Tensor ) else a , _UpperCAmelCase , _UpperCAmelCase ) if self.cached_arg_data is not None: # If args have changed shape/value, we need to re-tune assert len(self.cached_arg_data ) == len(_UpperCAmelCase ) lowercase__ = self._compare_arg_caches(self.cached_arg_data , _UpperCAmelCase ) else: # Otherwise, we can reuse the precomputed value lowercase__ = False if not consistent: lowercase__ = self._determine_favorable_chunk_size( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) lowercase__ = arg_data assert self.cached_chunk_size is not None return self.cached_chunk_size
15
import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # Sentinels a =object() # For specifying empty leaf dict `{}` a =object() def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> int: __lowerCamelCase : Tuple = tuple((re.compile(x + '$' ) for x in qs) ) for i in range(len(lowerCamelCase__ ) - len(lowerCamelCase__ ) + 1 ): __lowerCamelCase : str = [x.match(lowerCamelCase__ ) for x, y in zip(lowerCamelCase__ , ks[i:] )] if matches and all(lowerCamelCase__ ): return True return False def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple: def replace(lowerCamelCase__ , lowerCamelCase__ ): for rule, replacement in rules: if _match(lowerCamelCase__ , lowerCamelCase__ ): return replacement return val return replace def SCREAMING_SNAKE_CASE__ ( ) -> str: return [ # embeddings (("transformer", "wpe", "embedding"), P('mp' , lowerCamelCase__ )), (("transformer", "wte", "embedding"), P('mp' , lowerCamelCase__ )), # atention (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(lowerCamelCase__ , 'mp' )), (("attention", "out_proj", "kernel"), P('mp' , lowerCamelCase__ )), (("attention", "out_proj", "bias"), None), # mlp (("mlp", "c_fc", "kernel"), P(lowerCamelCase__ , 'mp' )), (("mlp", "c_fc", "bias"), P('mp' )), (("mlp", "c_proj", "kernel"), P('mp' , lowerCamelCase__ )), (("mlp", "c_proj", "bias"), None), # layer norms ((r"ln_\d+", "bias"), None), ((r"\d+", r"ln_\d+", "scale"), None), (("ln_f", "bias"), None), (("ln_f", "scale"), None), ] def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str: __lowerCamelCase : List[str] = _get_partition_rules() __lowerCamelCase : Optional[Any] = _replacement_rules(lowerCamelCase__ ) __lowerCamelCase : Tuple = {k: _unmatched for k in flatten_dict(lowerCamelCase__ )} __lowerCamelCase : List[Any] = {k: replace(lowerCamelCase__ , lowerCamelCase__ ) for k, v in initd.items()} assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(lowerCamelCase__ ) )
652
0
from string import ascii_lowercase, ascii_uppercase def __a ( A__ : str ): if not sentence: return "" SCREAMING_SNAKE_CASE = dict(zip(A__ , A__ ) ) return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:] if __name__ == "__main__": from doctest import testmod testmod()
16
import math def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> list: __lowerCamelCase : Union[str, Any] = [True] * n __lowerCamelCase : List[Any] = False __lowerCamelCase : int = False __lowerCamelCase : Any = True for i in range(3 , int(n**0.5 + 1 ) , 2 ): __lowerCamelCase : List[str] = i * 2 while index < n: __lowerCamelCase : Optional[int] = False __lowerCamelCase : List[Any] = index + i __lowerCamelCase : Optional[Any] = [2] for i in range(3 , lowerCamelCase__ , 2 ): if is_prime[i]: primes.append(lowerCamelCase__ ) return primes def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = 9_9_9_9_6_6_6_6_3_3_3_3 ) -> int: __lowerCamelCase : Optional[Any] = math.floor(math.sqrt(lowerCamelCase__ ) ) + 1_0_0 __lowerCamelCase : Dict = prime_sieve(lowerCamelCase__ ) __lowerCamelCase : Tuple = 0 __lowerCamelCase : Dict = 0 __lowerCamelCase : Any = primes[prime_index] while (last_prime**2) <= limit: __lowerCamelCase : Any = primes[prime_index + 1] __lowerCamelCase : Optional[Any] = last_prime**2 __lowerCamelCase : Dict = next_prime**2 # Get numbers divisible by lps(current) __lowerCamelCase : Tuple = lower_bound + last_prime while upper_bound > current <= limit: matches_sum += current current += last_prime # Reset the upper_bound while (upper_bound - next_prime) > limit: upper_bound -= next_prime # Add the numbers divisible by ups(current) __lowerCamelCase : Any = upper_bound - next_prime while current > lower_bound: matches_sum += current current -= next_prime # Remove the numbers divisible by both ups and lps __lowerCamelCase : List[Any] = 0 while upper_bound > current <= limit: if current <= lower_bound: # Increment the current number current += last_prime * next_prime continue if current > limit: break # Remove twice since it was added by both ups and lps matches_sum -= current * 2 # Increment the current number current += last_prime * next_prime # Setup for next pair __lowerCamelCase : Dict = next_prime prime_index += 1 return matches_sum if __name__ == "__main__": print(solution())
652
0
import argparse import torch from huggingface_hub import hf_hub_download from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase_ : str = logging.get_logger(__name__) def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : str ) -> List[Any]: __A : Tuple = RobertaPreLayerNormConfig.from_pretrained( a__ ,architectures=["""RobertaPreLayerNormForMaskedLM"""] ) # convert state_dict __A : List[str] = torch.load(hf_hub_download(repo_id=a__ ,filename="""pytorch_model.bin""" ) ) __A : Dict = {} for tensor_key, tensor_value in original_state_dict.items(): # The transformer implementation gives the model a unique name, rather than overwiriting 'roberta' if tensor_key.startswith("""roberta.""" ): __A : Optional[Any] = """roberta_prelayernorm.""" + tensor_key[len("""roberta.""" ) :] # The original implementation contains weights which are not used, remove them from the state_dict if tensor_key.endswith(""".self.LayerNorm.weight""" ) or tensor_key.endswith(""".self.LayerNorm.bias""" ): continue __A : List[Any] = tensor_value __A : List[Any] = RobertaPreLayerNormForMaskedLM.from_pretrained( pretrained_model_name_or_path=a__ ,config=a__ ,state_dict=a__ ) model.save_pretrained(a__ ) # convert tokenizer __A : int = AutoTokenizer.from_pretrained(a__ ) tokenizer.save_pretrained(a__ ) if __name__ == "__main__": UpperCAmelCase_ : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint-repo''', default=None, type=str, required=True, help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) UpperCAmelCase_ : Dict = parser.parse_args() convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
17
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin, SchedulerOutput @dataclass class A_ ( SCREAMING_SNAKE_CASE ): _UpperCAmelCase : torch.FloatTensor _UpperCAmelCase : torch.FloatTensor class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): _UpperCAmelCase : Dict = 1 @register_to_config def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : int = 2_0_0_0 ,SCREAMING_SNAKE_CASE__ : float = 0.15 ,SCREAMING_SNAKE_CASE__ : float = 0.01 ,SCREAMING_SNAKE_CASE__ : float = 1348.0 ,SCREAMING_SNAKE_CASE__ : float = 1E-5 ,SCREAMING_SNAKE_CASE__ : int = 1 ,): # standard deviation of the initial noise distribution __lowerCamelCase : int = sigma_max # setable values __lowerCamelCase : List[str] = None self.set_sigmas(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : Optional[int] = None): return sample def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : float = None ,SCREAMING_SNAKE_CASE__ : Union[str, torch.device] = None): __lowerCamelCase : Dict = sampling_eps if sampling_eps is not None else self.config.sampling_eps __lowerCamelCase : Optional[int] = torch.linspace(1 ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,device=SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : float = None ,SCREAMING_SNAKE_CASE__ : float = None ,SCREAMING_SNAKE_CASE__ : float = None): __lowerCamelCase : Optional[int] = sigma_min if sigma_min is not None else self.config.sigma_min __lowerCamelCase : Optional[int] = sigma_max if sigma_max is not None else self.config.sigma_max __lowerCamelCase : Any = sampling_eps if sampling_eps is not None else self.config.sampling_eps if self.timesteps is None: self.set_timesteps(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[int] = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps) __lowerCamelCase : Optional[Any] = torch.exp(torch.linspace(math.log(SCREAMING_SNAKE_CASE__) ,math.log(SCREAMING_SNAKE_CASE__) ,SCREAMING_SNAKE_CASE__)) __lowerCamelCase : str = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps]) def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : List[str]): return torch.where( timesteps == 0 ,torch.zeros_like(t.to(timesteps.device)) ,self.discrete_sigmas[timesteps - 1].to(timesteps.device) ,) def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : Optional[torch.Generator] = None ,SCREAMING_SNAKE_CASE__ : bool = True ,): if self.timesteps is None: raise ValueError( '`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler') __lowerCamelCase : List[str] = timestep * torch.ones( sample.shape[0] ,device=sample.device) # torch.repeat_interleave(timestep, sample.shape[0]) __lowerCamelCase : str = (timestep * (len(self.timesteps) - 1)).long() # mps requires indices to be in the same device, so we use cpu as is the default with cuda __lowerCamelCase : Dict = timesteps.to(self.discrete_sigmas.device) __lowerCamelCase : Optional[Any] = self.discrete_sigmas[timesteps].to(sample.device) __lowerCamelCase : Optional[Any] = self.get_adjacent_sigma(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__).to(sample.device) __lowerCamelCase : int = torch.zeros_like(SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[Any] = (sigma**2 - adjacent_sigma**2) ** 0.5 # equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x) # also equation 47 shows the analog from SDE models to ancestral sampling methods __lowerCamelCase : int = diffusion.flatten() while len(diffusion.shape) < len(sample.shape): __lowerCamelCase : List[Any] = diffusion.unsqueeze(-1) __lowerCamelCase : Any = drift - diffusion**2 * model_output # equation 6: sample noise for the diffusion term of __lowerCamelCase : int = randn_tensor( sample.shape ,layout=sample.layout ,generator=SCREAMING_SNAKE_CASE__ ,device=sample.device ,dtype=sample.dtype) __lowerCamelCase : Optional[int] = sample - drift # subtract because `dt` is a small negative timestep # TODO is the variable diffusion the correct scaling term for the noise? __lowerCamelCase : Union[str, Any] = prev_sample_mean + diffusion * noise # add impact of diffusion field g if not return_dict: return (prev_sample, prev_sample_mean) return SdeVeOutput(prev_sample=SCREAMING_SNAKE_CASE__ ,prev_sample_mean=SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : Optional[torch.Generator] = None ,SCREAMING_SNAKE_CASE__ : bool = True ,): if self.timesteps is None: raise ValueError( '`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler') # For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z" # sample noise for correction __lowerCamelCase : Optional[int] = randn_tensor(sample.shape ,layout=sample.layout ,generator=SCREAMING_SNAKE_CASE__).to(sample.device) # compute step size from the model_output, the noise, and the snr __lowerCamelCase : str = torch.norm(model_output.reshape(model_output.shape[0] ,-1) ,dim=-1).mean() __lowerCamelCase : Tuple = torch.norm(noise.reshape(noise.shape[0] ,-1) ,dim=-1).mean() __lowerCamelCase : Tuple = (self.config.snr * noise_norm / grad_norm) ** 2 * 2 __lowerCamelCase : Optional[int] = step_size * torch.ones(sample.shape[0]).to(sample.device) # self.repeat_scalar(step_size, sample.shape[0]) # compute corrected sample: model_output term and noise term __lowerCamelCase : Union[str, Any] = step_size.flatten() while len(step_size.shape) < len(sample.shape): __lowerCamelCase : List[str] = step_size.unsqueeze(-1) __lowerCamelCase : str = sample + step_size * model_output __lowerCamelCase : Any = prev_sample_mean + ((step_size * 2) ** 0.5) * noise if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,): # Make sure sigmas and timesteps have the same device and dtype as original_samples __lowerCamelCase : int = timesteps.to(original_samples.device) __lowerCamelCase : Any = self.discrete_sigmas.to(original_samples.device)[timesteps] __lowerCamelCase : Optional[Any] = ( noise * sigmas[:, None, None, None] if noise is not None else torch.randn_like(SCREAMING_SNAKE_CASE__) * sigmas[:, None, None, None] ) __lowerCamelCase : str = noise + original_samples return noisy_samples def __len__( self : Optional[int]): return self.config.num_train_timesteps
652
0
'''simple docstring''' from sklearn.metrics import fa_score import datasets _SCREAMING_SNAKE_CASE = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n" _SCREAMING_SNAKE_CASE = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n" _SCREAMING_SNAKE_CASE = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): def _snake_case ( self ) -> Dict: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("int32" ) ), "references": datasets.Sequence(datasets.Value("int32" ) ), } if self.config_name == "multilabel" else { "predictions": datasets.Value("int32" ), "references": datasets.Value("int32" ), } ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"] , ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=1 , _lowerCAmelCase="binary" , _lowerCAmelCase=None ) -> Optional[Any]: _lowerCAmelCase = fa_score( _lowerCAmelCase , _lowerCAmelCase , labels=_lowerCAmelCase , pos_label=_lowerCAmelCase , average=_lowerCAmelCase , sample_weight=_lowerCAmelCase ) return {"f1": float(_lowerCAmelCase ) if score.size == 1 else score}
18
import logging import numpy as np import pytest from scipy.linalg import eigh logging.basicConfig(level=logging.INFO, format="""%(message)s""") def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> np.ndarray: return input_array.reshape((input_array.size, 1) ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray: __lowerCamelCase : str = np.nan for i in range(lowerCamelCase__ ): __lowerCamelCase : int = features[:, labels == i] __lowerCamelCase : Optional[int] = data.mean(1 ) # Centralize the data of class i __lowerCamelCase : int = data - column_reshape(lowerCamelCase__ ) if i > 0: # If covariance_sum is not None covariance_sum += np.dot(lowerCamelCase__ , centered_data.T ) else: # If covariance_sum is np.nan (i.e. first loop) __lowerCamelCase : Union[str, Any] = np.dot(lowerCamelCase__ , centered_data.T ) return covariance_sum / features.shape[1] def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray: __lowerCamelCase : Optional[Any] = features.mean(1 ) __lowerCamelCase : Union[str, Any] = np.nan for i in range(lowerCamelCase__ ): __lowerCamelCase : Optional[Any] = features[:, labels == i] __lowerCamelCase : Union[str, Any] = data.shape[1] __lowerCamelCase : Union[str, Any] = data.mean(1 ) if i > 0: # If covariance_sum is not None covariance_sum += device_data * np.dot( column_reshape(lowerCamelCase__ ) - column_reshape(lowerCamelCase__ ) , (column_reshape(lowerCamelCase__ ) - column_reshape(lowerCamelCase__ )).T , ) else: # If covariance_sum is np.nan (i.e. first loop) __lowerCamelCase : List[str] = device_data * np.dot( column_reshape(lowerCamelCase__ ) - column_reshape(lowerCamelCase__ ) , (column_reshape(lowerCamelCase__ ) - column_reshape(lowerCamelCase__ )).T , ) return covariance_sum / features.shape[1] def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray: # Check if the features have been loaded if features.any(): __lowerCamelCase : Tuple = features.mean(1 ) # Center the dataset __lowerCamelCase : Any = features - np.reshape(lowerCamelCase__ , (data_mean.size, 1) ) __lowerCamelCase : Optional[int] = np.dot(lowerCamelCase__ , centered_data.T ) / features.shape[1] __lowerCamelCase , __lowerCamelCase : List[Any] = np.linalg.eigh(lowerCamelCase__ ) # Take all the columns in the reverse order (-1), and then takes only the first __lowerCamelCase : Dict = eigenvectors[:, ::-1][:, 0:dimensions] # Project the database on the new space __lowerCamelCase : int = np.dot(filtered_eigenvectors.T , lowerCamelCase__ ) logging.info('Principal Component Analysis computed' ) return projected_data else: logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=lowerCamelCase__ ) logging.error('Dataset empty' ) raise AssertionError def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray: assert classes > dimensions # Check if features have been already loaded if features.any: __lowerCamelCase , __lowerCamelCase : Dict = eigh( covariance_between_classes(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) , covariance_within_classes(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) , ) __lowerCamelCase : Union[str, Any] = eigenvectors[:, ::-1][:, :dimensions] __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = np.linalg.svd(lowerCamelCase__ ) __lowerCamelCase : int = svd_matrix[:, 0:dimensions] __lowerCamelCase : Optional[int] = np.dot(filtered_svd_matrix.T , lowerCamelCase__ ) logging.info('Linear Discriminant Analysis computed' ) return projected_data else: logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=lowerCamelCase__ ) logging.error('Dataset empty' ) raise AssertionError def SCREAMING_SNAKE_CASE__ ( ) -> None: # Create dummy dataset with 2 classes and 3 features __lowerCamelCase : Optional[int] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] ) __lowerCamelCase : Optional[int] = np.array([0, 0, 0, 1, 1] ) __lowerCamelCase : Optional[Any] = 2 __lowerCamelCase : Tuple = 2 # Assert that the function raises an AssertionError if dimensions > classes with pytest.raises(lowerCamelCase__ ) as error_info: __lowerCamelCase : int = linear_discriminant_analysis( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) if isinstance(lowerCamelCase__ , np.ndarray ): raise AssertionError( 'Did not raise AssertionError for dimensions > classes' ) assert error_info.type is AssertionError def SCREAMING_SNAKE_CASE__ ( ) -> None: __lowerCamelCase : Dict = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] ) __lowerCamelCase : Dict = 2 __lowerCamelCase : int = np.array([[6.9282_0323, 8.6602_5404, 10.3923_0485], [3.0, 3.0, 3.0]] ) with pytest.raises(lowerCamelCase__ ) as error_info: __lowerCamelCase : Optional[Any] = principal_component_analysis(lowerCamelCase__ , lowerCamelCase__ ) if not np.allclose(lowerCamelCase__ , lowerCamelCase__ ): raise AssertionError assert error_info.type is AssertionError if __name__ == "__main__": import doctest doctest.testmod()
652
0
"""simple docstring""" import os import unittest from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, BertTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class _UpperCAmelCase( lowerCamelCase , unittest.TestCase ): lowercase__ = BertTokenizer lowercase__ = BertTokenizerFast lowercase__ = True lowercase__ = True lowercase__ = filter_non_english def UpperCAmelCase ( self) -> Optional[int]: '''simple docstring''' super().setUp() _UpperCamelCase = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] _UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file''']) with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens])) def UpperCAmelCase ( self , __a) -> str: '''simple docstring''' _UpperCamelCase = '''UNwant\u00E9d,running''' _UpperCamelCase = '''unwanted, running''' return input_text, output_text def UpperCAmelCase ( self) -> str: '''simple docstring''' _UpperCamelCase = self.tokenizer_class(self.vocab_file) _UpperCamelCase = tokenizer.tokenize('''UNwant\u00E9d,running''') self.assertListEqual(__a , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing''']) self.assertListEqual(tokenizer.convert_tokens_to_ids(__a) , [9, 6, 7, 12, 10, 11]) def UpperCAmelCase ( self) -> int: '''simple docstring''' if not self.test_rust_tokenizer: return _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = self.get_rust_tokenizer() _UpperCamelCase = '''UNwant\u00E9d,running''' _UpperCamelCase = tokenizer.tokenize(__a) _UpperCamelCase = rust_tokenizer.tokenize(__a) self.assertListEqual(__a , __a) _UpperCamelCase = tokenizer.encode(__a , add_special_tokens=__a) _UpperCamelCase = rust_tokenizer.encode(__a , add_special_tokens=__a) self.assertListEqual(__a , __a) _UpperCamelCase = self.get_rust_tokenizer() _UpperCamelCase = tokenizer.encode(__a) _UpperCamelCase = rust_tokenizer.encode(__a) self.assertListEqual(__a , __a) # With lower casing _UpperCamelCase = self.get_tokenizer(do_lower_case=__a) _UpperCamelCase = self.get_rust_tokenizer(do_lower_case=__a) _UpperCamelCase = '''UNwant\u00E9d,running''' _UpperCamelCase = tokenizer.tokenize(__a) _UpperCamelCase = rust_tokenizer.tokenize(__a) self.assertListEqual(__a , __a) _UpperCamelCase = tokenizer.encode(__a , add_special_tokens=__a) _UpperCamelCase = rust_tokenizer.encode(__a , add_special_tokens=__a) self.assertListEqual(__a , __a) _UpperCamelCase = self.get_rust_tokenizer() _UpperCamelCase = tokenizer.encode(__a) _UpperCamelCase = rust_tokenizer.encode(__a) self.assertListEqual(__a , __a) def UpperCAmelCase ( self) -> Any: '''simple docstring''' _UpperCamelCase = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''') , ['''ah''', '''\u535A''', '''\u63A8''', '''zz''']) def UpperCAmelCase ( self) -> Optional[int]: '''simple docstring''' _UpperCamelCase = BasicTokenizer(do_lower_case=__a) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''') , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?''']) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') , ['''hello''']) def UpperCAmelCase ( self) -> str: '''simple docstring''' _UpperCamelCase = BasicTokenizer(do_lower_case=__a , strip_accents=__a) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?''']) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') , ['''h\u00E9llo''']) def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' _UpperCamelCase = BasicTokenizer(do_lower_case=__a , strip_accents=__a) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?''']) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') , ['''hello''']) def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' _UpperCamelCase = BasicTokenizer(do_lower_case=__a) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?''']) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') , ['''hello''']) def UpperCAmelCase ( self) -> Optional[int]: '''simple docstring''' _UpperCamelCase = BasicTokenizer(do_lower_case=__a) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''') , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''']) def UpperCAmelCase ( self) -> Tuple: '''simple docstring''' _UpperCamelCase = BasicTokenizer(do_lower_case=__a , strip_accents=__a) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''']) def UpperCAmelCase ( self) -> Optional[int]: '''simple docstring''' _UpperCamelCase = BasicTokenizer(do_lower_case=__a , strip_accents=__a) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''']) def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' _UpperCamelCase = BasicTokenizer(do_lower_case=__a , never_split=['''[UNK]''']) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''') , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]''']) def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' _UpperCamelCase = BasicTokenizer() _UpperCamelCase = '''a\n\'ll !!to?\'d of, can\'t.''' _UpperCamelCase = ['''a''', '''\'''', '''ll''', '''!''', '''!''', '''to''', '''?''', '''\'''', '''d''', '''of''', ''',''', '''can''', '''\'''', '''t''', '''.'''] self.assertListEqual(tokenizer.tokenize(__a) , __a) def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' _UpperCamelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing'''] _UpperCamelCase = {} for i, token in enumerate(__a): _UpperCamelCase = i _UpperCamelCase = WordpieceTokenizer(vocab=__a , unk_token='''[UNK]''') self.assertListEqual(tokenizer.tokenize('''''') , []) self.assertListEqual(tokenizer.tokenize('''unwanted running''') , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing''']) self.assertListEqual(tokenizer.tokenize('''unwantedX running''') , ['''[UNK]''', '''runn''', '''##ing''']) def UpperCAmelCase ( self) -> Optional[Any]: '''simple docstring''' self.assertTrue(_is_whitespace(''' ''')) self.assertTrue(_is_whitespace('''\t''')) self.assertTrue(_is_whitespace('''\r''')) self.assertTrue(_is_whitespace('''\n''')) self.assertTrue(_is_whitespace('''\u00A0''')) self.assertFalse(_is_whitespace('''A''')) self.assertFalse(_is_whitespace('''-''')) def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' self.assertTrue(_is_control('''\u0005''')) self.assertFalse(_is_control('''A''')) self.assertFalse(_is_control(''' ''')) self.assertFalse(_is_control('''\t''')) self.assertFalse(_is_control('''\r''')) def UpperCAmelCase ( self) -> Dict: '''simple docstring''' self.assertTrue(_is_punctuation('''-''')) self.assertTrue(_is_punctuation('''$''')) self.assertTrue(_is_punctuation('''`''')) self.assertTrue(_is_punctuation('''.''')) self.assertFalse(_is_punctuation('''A''')) self.assertFalse(_is_punctuation(''' ''')) def UpperCAmelCase ( self) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(__a) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']]) self.assertListEqual( [rust_tokenizer.tokenize(__a) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']]) @slow def UpperCAmelCase ( self) -> Dict: '''simple docstring''' _UpperCamelCase = self.tokenizer_class.from_pretrained('''bert-base-uncased''') _UpperCamelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=__a) _UpperCamelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__a) _UpperCamelCase = tokenizer.build_inputs_with_special_tokens(__a) _UpperCamelCase = tokenizer.build_inputs_with_special_tokens(__a , __a) assert encoded_sentence == [1_01] + text + [1_02] assert encoded_pair == [1_01] + text + [1_02] + text_a + [1_02] def UpperCAmelCase ( self) -> Optional[int]: '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})'''): _UpperCamelCase = self.rust_tokenizer_class.from_pretrained(__a , **__a) _UpperCamelCase = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.''' _UpperCamelCase = tokenizer_r.encode_plus( __a , return_attention_mask=__a , return_token_type_ids=__a , return_offsets_mapping=__a , add_special_tokens=__a , ) _UpperCamelCase = tokenizer_r.do_lower_case if hasattr(__a , '''do_lower_case''') else False _UpperCamelCase = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), '''A'''), ((1, 2), ''','''), ((3, 5), '''na'''), ((5, 6), '''##ï'''), ((6, 8), '''##ve'''), ((9, 15), tokenizer_r.mask_token), ((16, 21), '''Allen'''), ((21, 23), '''##NL'''), ((23, 24), '''##P'''), ((25, 33), '''sentence'''), ((33, 34), '''.'''), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), '''a'''), ((1, 2), ''','''), ((3, 8), '''naive'''), ((9, 15), tokenizer_r.mask_token), ((16, 21), '''allen'''), ((21, 23), '''##nl'''), ((23, 24), '''##p'''), ((25, 33), '''sentence'''), ((33, 34), '''.'''), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''])) self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping''']) def UpperCAmelCase ( self) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = ['''的''', '''人''', '''有'''] _UpperCamelCase = ''''''.join(__a) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})'''): _UpperCamelCase = True _UpperCamelCase = self.tokenizer_class.from_pretrained(__a , **__a) _UpperCamelCase = self.rust_tokenizer_class.from_pretrained(__a , **__a) _UpperCamelCase = tokenizer_p.encode(__a , add_special_tokens=__a) _UpperCamelCase = tokenizer_r.encode(__a , add_special_tokens=__a) _UpperCamelCase = tokenizer_r.convert_ids_to_tokens(__a) _UpperCamelCase = tokenizer_p.convert_ids_to_tokens(__a) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(__a , __a) self.assertListEqual(__a , __a) _UpperCamelCase = False _UpperCamelCase = self.rust_tokenizer_class.from_pretrained(__a , **__a) _UpperCamelCase = self.tokenizer_class.from_pretrained(__a , **__a) _UpperCamelCase = tokenizer_r.encode(__a , add_special_tokens=__a) _UpperCamelCase = tokenizer_p.encode(__a , add_special_tokens=__a) _UpperCamelCase = tokenizer_r.convert_ids_to_tokens(__a) _UpperCamelCase = tokenizer_p.convert_ids_to_tokens(__a) # it is expected that only the first Chinese character is not preceded by "##". _UpperCamelCase = [ F'''##{token}''' if idx != 0 else token for idx, token in enumerate(__a) ] self.assertListEqual(__a , __a) self.assertListEqual(__a , __a)
19
import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow a =logging.getLogger() @unittest.skip('''Temporarily disable the doc tests.''' ) @require_torch @require_tf @slow class A_ ( unittest.TestCase ): def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Path ,SCREAMING_SNAKE_CASE__ : Union[str, None] = None ,SCREAMING_SNAKE_CASE__ : Union[List[str], None] = None ,SCREAMING_SNAKE_CASE__ : Union[str, List[str], None] = None ,SCREAMING_SNAKE_CASE__ : bool = True ,): __lowerCamelCase : List[str] = [file for file in os.listdir(SCREAMING_SNAKE_CASE__) if os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__))] if identifier is not None: __lowerCamelCase : str = [file for file in files if identifier in file] if n_identifier is not None: if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__): for n_ in n_identifier: __lowerCamelCase : Optional[int] = [file for file in files if n_ not in file] else: __lowerCamelCase : Dict = [file for file in files if n_identifier not in file] __lowerCamelCase : str = ignore_files or [] ignore_files.append('__init__.py') __lowerCamelCase : Tuple = [file for file in files if file not in ignore_files] for file in files: # Open all files print('Testing' ,SCREAMING_SNAKE_CASE__) if only_modules: __lowerCamelCase : Optional[int] = file.split('.')[0] try: __lowerCamelCase : Optional[Any] = getattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[str] = doctest.DocTestSuite(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[int] = unittest.TextTestRunner().run(SCREAMING_SNAKE_CASE__) self.assertIs(len(result.failures) ,0) except AttributeError: logger.info(F"{module_identifier} is not a module.") else: __lowerCamelCase : int = doctest.testfile(str('..' / directory / file) ,optionflags=doctest.ELLIPSIS) self.assertIs(result.failed ,0) def lowerCAmelCase ( self : List[Any]): __lowerCamelCase : Dict = Path('src/transformers') __lowerCamelCase : Any = 'modeling' __lowerCamelCase : Dict = [ 'modeling_ctrl.py', 'modeling_tf_ctrl.py', ] self.analyze_directory(SCREAMING_SNAKE_CASE__ ,identifier=SCREAMING_SNAKE_CASE__ ,ignore_files=SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Dict): __lowerCamelCase : Tuple = Path('src/transformers') __lowerCamelCase : Optional[int] = 'tokenization' self.analyze_directory(SCREAMING_SNAKE_CASE__ ,identifier=SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Tuple): __lowerCamelCase : List[Any] = Path('src/transformers') __lowerCamelCase : str = 'configuration' self.analyze_directory(SCREAMING_SNAKE_CASE__ ,identifier=SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : int): __lowerCamelCase : Dict = Path('src/transformers') __lowerCamelCase : Any = ['configuration', 'modeling', 'tokenization'] self.analyze_directory(SCREAMING_SNAKE_CASE__ ,n_identifier=SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : int): __lowerCamelCase : List[Any] = Path('docs/source') __lowerCamelCase : str = ['favicon.ico'] self.analyze_directory(SCREAMING_SNAKE_CASE__ ,ignore_files=SCREAMING_SNAKE_CASE__ ,only_modules=SCREAMING_SNAKE_CASE__)
652
0
import argparse import json import os import torch from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def _lowercase( __a : Union[str, Any] , __a : Union[str, Any] , __a : Tuple , __a : List[str] , __a : Tuple ): # Load configuration defined in the metadata file with open(__a ) as metadata_file: a__ =json.load(__a ) a__ =LukeConfig(use_entity_aware_attention=__a , **metadata['model_config'] ) # Load in the weights from the checkpoint_path a__ =torch.load(__a , map_location='cpu' ) # Load the entity vocab file a__ =load_entity_vocab(__a ) a__ =RobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] ) # Add special tokens to the token vocabulary for downstream tasks a__ =AddedToken('<ent>' , lstrip=__a , rstrip=__a ) a__ =AddedToken('<ent2>' , lstrip=__a , rstrip=__a ) tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(f"""Saving tokenizer to {pytorch_dump_folder_path}""" ) tokenizer.save_pretrained(__a ) with open(os.path.join(__a , LukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f: json.dump(__a , __a ) a__ =LukeTokenizer.from_pretrained(__a ) # Initialize the embeddings of the special tokens a__ =state_dict['embeddings.word_embeddings.weight'] a__ =word_emb[tokenizer.convert_tokens_to_ids(['@'] )[0]].unsqueeze(0 ) a__ =word_emb[tokenizer.convert_tokens_to_ids(['#'] )[0]].unsqueeze(0 ) a__ =torch.cat([word_emb, ent_emb, enta_emb] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: a__ =f"""encoder.layer.{layer_index}.attention.self.""" a__ =state_dict[prefix + matrix_name] a__ =state_dict[prefix + matrix_name] a__ =state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks a__ =state_dict['entity_embeddings.entity_embeddings.weight'] a__ =entity_emb[entity_vocab['[MASK]']] a__ =LukeModel(config=__a ).eval() a__ , a__ =model.load_state_dict(__a , strict=__a ) if not (len(__a ) == 1 and missing_keys[0] == "embeddings.position_ids"): raise ValueError(f"""Missing keys {', '.join(__a )}. Expected only missing embeddings.position_ids""" ) if not (all(key.startswith('entity_predictions' ) or key.startswith('lm_head' ) for key in unexpected_keys )): raise ValueError( 'Unexpected keys' f""" {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions' ) or key.startswith('lm_head' ))] )}""" ) # Check outputs a__ =LukeTokenizer.from_pretrained(__a , task='entity_classification' ) a__ =( 'Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the' ' new world number one avoid a humiliating second- round exit at Wimbledon .' ) a__ =(39, 42) a__ =tokenizer(__a , entity_spans=[span] , add_prefix_space=__a , return_tensors='pt' ) a__ =model(**__a ) # Verify word hidden states if model_size == "large": a__ =torch.Size((1, 42, 1024) ) a__ =torch.tensor( [[0.01_33, 0.08_65, 0.00_95], [0.30_93, -0.25_76, -0.74_18], [-0.17_20, -0.21_17, -0.28_69]] ) else: # base a__ =torch.Size((1, 42, 768) ) a__ =torch.tensor([[0.00_37, 0.13_68, -0.00_91], [0.10_99, 0.33_29, -0.10_95], [0.07_65, 0.53_35, 0.11_79]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( f"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __a , atol=1e-4 ): raise ValueError # Verify entity hidden states if model_size == "large": a__ =torch.Size((1, 1, 1024) ) a__ =torch.tensor([[0.04_66, -0.01_06, -0.01_79]] ) else: # base a__ =torch.Size((1, 1, 768) ) a__ =torch.tensor([[0.14_57, 0.10_44, 0.01_74]] ) if not (outputs.entity_last_hidden_state.shape != expected_shape): raise ValueError( f"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is""" f""" {expected_shape}""" ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __a , atol=1e-4 ): raise ValueError # Finally, save our PyTorch model and tokenizer print('Saving PyTorch model to {}'.format(__a ) ) model.save_pretrained(__a ) def _lowercase( __a : int ): a__ ={} with open(__a , 'r' , encoding='utf-8' ) as f: for index, line in enumerate(__a ): a__ , a__ =line.rstrip().split('\t' ) a__ =index return entity_vocab if __name__ == "__main__": _lowerCAmelCase: Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.') parser.add_argument( '--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.' ) parser.add_argument( '--entity_vocab_path', default=None, type=str, help='Path to an entity_vocab.tsv file, containing the entity vocabulary.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.' ) parser.add_argument( '--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.' ) _lowerCAmelCase: List[Any] = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
20
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging a =logging.get_logger(__name__) a ="""▁""" a ={"""vocab_file""": """sentencepiece.bpe.model""", """monolingual_vocab_file""": """dict.txt"""} a ={ """vocab_file""": { """vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model""", }, """monolingual_vocab_file""": { """vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt""", }, } a ={"""vinai/bartpho-syllable""": 1024} class A_ ( SCREAMING_SNAKE_CASE ): _UpperCAmelCase : List[str] = VOCAB_FILES_NAMES _UpperCAmelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase : Dict = ['''input_ids''', '''attention_mask'''] def __init__( self : Dict ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : List[Any]="<s>" ,SCREAMING_SNAKE_CASE__ : Any="</s>" ,SCREAMING_SNAKE_CASE__ : List[str]="</s>" ,SCREAMING_SNAKE_CASE__ : List[str]="<s>" ,SCREAMING_SNAKE_CASE__ : int="<unk>" ,SCREAMING_SNAKE_CASE__ : Dict="<pad>" ,SCREAMING_SNAKE_CASE__ : List[str]="<mask>" ,SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None ,**SCREAMING_SNAKE_CASE__ : Dict ,): # Mask token behave like a normal word, i.e. include the space before it __lowerCamelCase : Union[str, Any] = AddedToken(SCREAMING_SNAKE_CASE__ ,lstrip=SCREAMING_SNAKE_CASE__ ,rstrip=SCREAMING_SNAKE_CASE__) if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) else mask_token __lowerCamelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=SCREAMING_SNAKE_CASE__ ,eos_token=SCREAMING_SNAKE_CASE__ ,unk_token=SCREAMING_SNAKE_CASE__ ,sep_token=SCREAMING_SNAKE_CASE__ ,cls_token=SCREAMING_SNAKE_CASE__ ,pad_token=SCREAMING_SNAKE_CASE__ ,mask_token=SCREAMING_SNAKE_CASE__ ,sp_model_kwargs=self.sp_model_kwargs ,**SCREAMING_SNAKE_CASE__ ,) __lowerCamelCase : int = vocab_file __lowerCamelCase : Tuple = monolingual_vocab_file __lowerCamelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(str(SCREAMING_SNAKE_CASE__)) # Load the reduced vocab # Keep order of special tokens for backward compatibility __lowerCamelCase : Optional[int] = {} __lowerCamelCase : List[Any] = 0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(SCREAMING_SNAKE_CASE__) not in self.fairseq_tokens_to_ids: __lowerCamelCase : Any = cnt cnt += 1 with open(SCREAMING_SNAKE_CASE__ ,'r' ,encoding='utf-8') as f: for line in f.readlines(): __lowerCamelCase : Any = line.strip().split()[0] __lowerCamelCase : List[str] = len(self.fairseq_tokens_to_ids) if str(SCREAMING_SNAKE_CASE__) not in self.fairseq_tokens_to_ids: __lowerCamelCase : Dict = len(self.fairseq_tokens_to_ids) __lowerCamelCase : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : int): __lowerCamelCase : Tuple = self.__dict__.copy() __lowerCamelCase : Optional[int] = None __lowerCamelCase : Optional[int] = self.sp_model.serialized_model_proto() return state def __setstate__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any]): __lowerCamelCase : List[str] = d # for backward compatibility if not hasattr(self ,'sp_model_kwargs'): __lowerCamelCase : str = {} __lowerCamelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.LoadFromSerializedProto(self.sp_model_proto) def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __lowerCamelCase : Tuple = [self.cls_token_id] __lowerCamelCase : int = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ,SCREAMING_SNAKE_CASE__ : bool = False): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE__ ,token_ids_a=SCREAMING_SNAKE_CASE__ ,already_has_special_tokens=SCREAMING_SNAKE_CASE__) if token_ids_a is None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1] return [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1] def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None): __lowerCamelCase : Dict = [self.sep_token_id] __lowerCamelCase : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] @property def lowerCAmelCase ( self : List[str]): return len(self.fairseq_ids_to_tokens) def lowerCAmelCase ( self : Dict): __lowerCamelCase : Optional[int] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : str): return self.sp_model.encode(SCREAMING_SNAKE_CASE__ ,out_type=SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[Any]): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Dict): return self.fairseq_ids_to_tokens[index] def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : str): __lowerCamelCase : Any = ''.join(SCREAMING_SNAKE_CASE__).replace(SCREAMING_SNAKE_CASE__ ,' ').strip() return out_string def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[str] = None): if not os.path.isdir(SCREAMING_SNAKE_CASE__): logger.error(F"Vocabulary path ({save_directory}) should be a directory") return __lowerCamelCase : Union[str, Any] = os.path.join( SCREAMING_SNAKE_CASE__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) __lowerCamelCase : Union[str, Any] = os.path.join( SCREAMING_SNAKE_CASE__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['monolingual_vocab_file'] ,) if os.path.abspath(self.vocab_file) != os.path.abspath(SCREAMING_SNAKE_CASE__) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file ,SCREAMING_SNAKE_CASE__) elif not os.path.isfile(self.vocab_file): with open(SCREAMING_SNAKE_CASE__ ,'wb') as fi: __lowerCamelCase : List[str] = self.sp_model.serialized_model_proto() fi.write(SCREAMING_SNAKE_CASE__) if os.path.abspath(self.monolingual_vocab_file) != os.path.abspath( SCREAMING_SNAKE_CASE__) and os.path.isfile(self.monolingual_vocab_file): copyfile(self.monolingual_vocab_file ,SCREAMING_SNAKE_CASE__) elif not os.path.isfile(self.monolingual_vocab_file): with open(SCREAMING_SNAKE_CASE__ ,'w' ,encoding='utf-8') as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(F"{str(SCREAMING_SNAKE_CASE__)} \n") return out_vocab_file, out_monolingual_vocab_file
652
0
from collections.abc import Callable from math import pi, sqrt from random import uniform from statistics import mean def lowerCAmelCase_ ( lowerCamelCase ): # A local function to see if a dot lands in the circle. def is_in_circle(lowerCamelCase , lowerCamelCase ) -> bool: __magic_name__ : Dict =sqrt((x**2) + (y**2) ) # Our circle has a radius of 1, so a distance # greater than 1 would land outside the circle. return distance_from_centre <= 1 # The proportion of guesses that landed in the circle __magic_name__ : Union[str, Any] =mean( int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) ) for _ in range(lowerCamelCase ) ) # The ratio of the area for circle to square is pi/4. __magic_name__ : List[Any] =proportion * 4 print(F"The estimated value of pi is {pi_estimate}" ) print(F"The numpy value of pi is {pi}" ) print(F"The total error is {abs(pi - pi_estimate )}" ) def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 , ): return mean( function_to_integrate(uniform(lowerCamelCase , lowerCamelCase ) ) for _ in range(lowerCamelCase ) ) * (max_value - min_value) def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 ): def identity_function(lowerCamelCase ) -> float: return x __magic_name__ : Optional[int] =area_under_curve_estimator( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) __magic_name__ : str =(max_value * max_value - min_value * min_value) / 2 print("""******************""" ) print(F"Estimating area under y=x where x varies from {min_value} to {max_value}" ) print(F"Estimated value is {estimated_value}" ) print(F"Expected value is {expected_value}" ) print(F"Total error is {abs(estimated_value - expected_value )}" ) print("""******************""" ) def lowerCAmelCase_ ( lowerCamelCase ): def function_to_integrate(lowerCamelCase ) -> float: return sqrt(4.0 - x * x ) __magic_name__ : Dict =area_under_curve_estimator( lowerCamelCase , lowerCamelCase , 0.0 , 2.0 ) print("""******************""" ) print("""Estimating pi using area_under_curve_estimator""" ) print(F"Estimated value is {estimated_value}" ) print(F"Expected value is {pi}" ) print(F"Total error is {abs(estimated_value - pi )}" ) print("""******************""" ) if __name__ == "__main__": import doctest doctest.testmod()
21
from __future__ import annotations import inspect import unittest import numpy as np from transformers import DeiTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, ) from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class A_ : def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Dict=1_3 ,SCREAMING_SNAKE_CASE__ : int=3_0 ,SCREAMING_SNAKE_CASE__ : int=2 ,SCREAMING_SNAKE_CASE__ : List[Any]=3 ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : int=True ,SCREAMING_SNAKE_CASE__ : List[str]=3_2 ,SCREAMING_SNAKE_CASE__ : Any=2 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=4 ,SCREAMING_SNAKE_CASE__ : List[str]=3_7 ,SCREAMING_SNAKE_CASE__ : Optional[Any]="gelu" ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_0 ,SCREAMING_SNAKE_CASE__ : int=0.02 ,SCREAMING_SNAKE_CASE__ : str=3 ,SCREAMING_SNAKE_CASE__ : Dict=None ,SCREAMING_SNAKE_CASE__ : Optional[Any]=2 ,): __lowerCamelCase : Optional[int] = parent __lowerCamelCase : Optional[Any] = batch_size __lowerCamelCase : Dict = image_size __lowerCamelCase : Optional[Any] = patch_size __lowerCamelCase : Optional[Any] = num_channels __lowerCamelCase : str = is_training __lowerCamelCase : List[Any] = use_labels __lowerCamelCase : Any = hidden_size __lowerCamelCase : Optional[int] = num_hidden_layers __lowerCamelCase : Any = num_attention_heads __lowerCamelCase : Tuple = intermediate_size __lowerCamelCase : Dict = hidden_act __lowerCamelCase : Optional[Any] = hidden_dropout_prob __lowerCamelCase : List[Any] = attention_probs_dropout_prob __lowerCamelCase : Dict = type_sequence_label_size __lowerCamelCase : Optional[Any] = initializer_range __lowerCamelCase : List[str] = scope __lowerCamelCase : Union[str, Any] = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) __lowerCamelCase : str = (image_size // patch_size) ** 2 __lowerCamelCase : str = num_patches + 2 def lowerCAmelCase ( self : Optional[Any]): __lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) __lowerCamelCase : List[Any] = None if self.use_labels: __lowerCamelCase : int = ids_tensor([self.batch_size] ,self.type_sequence_label_size) __lowerCamelCase : List[str] = self.get_config() return config, pixel_values, labels def lowerCAmelCase ( self : List[Any]): return DeiTConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=SCREAMING_SNAKE_CASE__ ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,) def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Dict): __lowerCamelCase : Optional[Any] = TFDeiTModel(config=SCREAMING_SNAKE_CASE__) __lowerCamelCase : Dict = model(SCREAMING_SNAKE_CASE__) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size)) def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Any): __lowerCamelCase : Optional[int] = TFDeiTForMaskedImageModeling(config=SCREAMING_SNAKE_CASE__) __lowerCamelCase : str = model(SCREAMING_SNAKE_CASE__) self.parent.assertEqual( result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size)) # test greyscale images __lowerCamelCase : int = 1 __lowerCamelCase : Tuple = TFDeiTForMaskedImageModeling(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) __lowerCamelCase : str = model(SCREAMING_SNAKE_CASE__) self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size)) def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any]): __lowerCamelCase : Dict = self.type_sequence_label_size __lowerCamelCase : List[Any] = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE__) __lowerCamelCase : str = model(SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size)) # test greyscale images __lowerCamelCase : List[Any] = 1 __lowerCamelCase : Tuple = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) __lowerCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size)) def lowerCAmelCase ( self : Dict): __lowerCamelCase : Optional[Any] = self.prepare_config_and_inputs() __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = config_and_inputs __lowerCamelCase : Optional[int] = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ): _UpperCAmelCase : Union[str, Any] = ( ( TFDeiTModel, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, ) if is_tf_available() else () ) _UpperCAmelCase : List[Any] = ( { '''feature-extraction''': TFDeiTModel, '''image-classification''': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher), } if is_tf_available() else {} ) _UpperCAmelCase : Optional[int] = False _UpperCAmelCase : Optional[Any] = False _UpperCAmelCase : Optional[int] = False _UpperCAmelCase : Optional[int] = False def lowerCAmelCase ( self : Any): __lowerCamelCase : str = TFDeiTModelTester(self) __lowerCamelCase : Optional[int] = ConfigTester(self ,config_class=SCREAMING_SNAKE_CASE__ ,has_text_modality=SCREAMING_SNAKE_CASE__ ,hidden_size=3_7) def lowerCAmelCase ( self : str): self.config_tester.run_common_tests() @unittest.skip(reason='DeiT does not use inputs_embeds') def lowerCAmelCase ( self : List[Any]): pass def lowerCAmelCase ( self : Dict): __lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase : Dict = model_class(SCREAMING_SNAKE_CASE__) self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer)) __lowerCamelCase : int = model.get_output_embeddings() self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE__ ,tf.keras.layers.Dense)) def lowerCAmelCase ( self : List[Any]): __lowerCamelCase , __lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase : List[Any] = model_class(SCREAMING_SNAKE_CASE__) __lowerCamelCase : str = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCamelCase : Any = [*signature.parameters.keys()] __lowerCamelCase : Union[str, Any] = ['pixel_values'] self.assertListEqual(arg_names[:1] ,SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : str): __lowerCamelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : str): __lowerCamelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Dict): __lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : str=False): __lowerCamelCase : Optional[Any] = super()._prepare_for_class(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,return_labels=SCREAMING_SNAKE_CASE__) if return_labels: if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call).parameters: del inputs_dict["labels"] return inputs_dict @slow def lowerCAmelCase ( self : Optional[int]): for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase : Union[str, Any] = TFDeiTModel.from_pretrained(SCREAMING_SNAKE_CASE__) self.assertIsNotNone(SCREAMING_SNAKE_CASE__) def SCREAMING_SNAKE_CASE__ ( ) -> Tuple: __lowerCamelCase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class A_ ( unittest.TestCase ): @cached_property def lowerCAmelCase ( self : List[Any]): return ( DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224') if is_vision_available() else None ) @slow def lowerCAmelCase ( self : List[Any]): __lowerCamelCase : Optional[int] = TFDeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224') __lowerCamelCase : int = self.default_image_processor __lowerCamelCase : Tuple = prepare_img() __lowerCamelCase : Tuple = image_processor(images=SCREAMING_SNAKE_CASE__ ,return_tensors='tf') # forward pass __lowerCamelCase : int = model(**SCREAMING_SNAKE_CASE__) # verify the logits __lowerCamelCase : Optional[int] = tf.TensorShape((1, 1_0_0_0)) self.assertEqual(outputs.logits.shape ,SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[Any] = tf.constant([-1.0266, 0.1912, -1.2861]) self.assertTrue(np.allclose(outputs.logits[0, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4))
652
0
'''simple docstring''' def snake_case_ (UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] ): '''simple docstring''' _a = 0 _a = len(UpperCamelCase ) - 1 while left <= right: # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None _a = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(UpperCamelCase ): return None _a = sorted_collection[point] if current_item == item: return point else: if point < left: _a = left _a = point elif point > right: _a = right _a = point else: if item < current_item: _a = point - 1 else: _a = point + 1 return None def snake_case_ (UpperCamelCase : List[str] , UpperCamelCase : Tuple , UpperCamelCase : int , UpperCamelCase : List[str] ): '''simple docstring''' if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None _a = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(UpperCamelCase ): return None if sorted_collection[point] == item: return point elif point < left: return interpolation_search_by_recursion(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) elif point > right: return interpolation_search_by_recursion(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) else: if sorted_collection[point] > item: return interpolation_search_by_recursion( UpperCamelCase , UpperCamelCase , UpperCamelCase , point - 1 ) else: return interpolation_search_by_recursion( UpperCamelCase , UpperCamelCase , point + 1 , UpperCamelCase ) def snake_case_ (UpperCamelCase : List[str] ): '''simple docstring''' if collection != sorted(UpperCamelCase ): raise ValueError('''Collection must be ascending sorted''' ) return True if __name__ == "__main__": import sys _snake_case : int = 0 if debug == 1: _snake_case : Optional[int] = [10, 30, 40, 45, 50, 66, 77, 93] try: __assert_sorted(collection) except ValueError: sys.exit('Sequence must be ascending sorted to apply interpolation search') _snake_case : Tuple = 67 _snake_case : Tuple = interpolation_search(collection, target) if result is not None: print(F'''{target} found at positions: {result}''') else: print('Not found')
22
from dataclasses import dataclass from enum import Enum from typing import List, Optional, Union import numpy as np import PIL from PIL import Image from ...utils import BaseOutput, is_torch_available, is_transformers_available @dataclass class A_ ( SCREAMING_SNAKE_CASE ): _UpperCAmelCase : Union[List[PIL.Image.Image], np.ndarray] _UpperCAmelCase : Optional[List[bool]] if is_transformers_available() and is_torch_available(): from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
652
0
import unittest import numpy as np from transformers import DistilBertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.distilbert.modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, ) class _a ( unittest.TestCase ): """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=4 , ) -> int: UpperCamelCase_ = parent UpperCamelCase_ = batch_size UpperCamelCase_ = seq_length UpperCamelCase_ = is_training UpperCamelCase_ = use_attention_mask UpperCamelCase_ = use_token_type_ids UpperCamelCase_ = use_labels UpperCamelCase_ = vocab_size UpperCamelCase_ = hidden_size UpperCamelCase_ = num_hidden_layers UpperCamelCase_ = num_attention_heads UpperCamelCase_ = intermediate_size UpperCamelCase_ = hidden_act UpperCamelCase_ = hidden_dropout_prob UpperCamelCase_ = attention_probs_dropout_prob UpperCamelCase_ = max_position_embeddings UpperCamelCase_ = type_vocab_size UpperCamelCase_ = type_sequence_label_size UpperCamelCase_ = initializer_range UpperCamelCase_ = num_choices def _UpperCAmelCase ( self ) -> str: UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase_ = None if self.use_attention_mask: UpperCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase_ = DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=_UpperCAmelCase , ) return config, input_ids, attention_mask def _UpperCAmelCase ( self ) -> Optional[int]: UpperCamelCase_ = self.prepare_config_and_inputs() UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = config_and_inputs UpperCamelCase_ = {'input_ids': input_ids, 'attention_mask': attention_mask} return config, inputs_dict @require_flax class _a ( UpperCAmelCase__ , unittest.TestCase ): """simple docstring""" A_ = ( ( FlaxDistilBertModel, FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertForQuestionAnswering, ) if is_flax_available() else () ) def _UpperCAmelCase ( self ) -> List[Any]: UpperCamelCase_ = FlaxDistilBertModelTester(self ) @slow def _UpperCAmelCase ( self ) -> List[str]: for model_class_name in self.all_model_classes: UpperCamelCase_ = model_class_name.from_pretrained('distilbert-base-uncased' ) UpperCamelCase_ = model(np.ones((1, 1) ) ) self.assertIsNotNone(_UpperCAmelCase ) @require_flax class _a ( unittest.TestCase ): """simple docstring""" @slow def _UpperCAmelCase ( self ) -> Optional[Any]: UpperCamelCase_ = FlaxDistilBertModel.from_pretrained('distilbert-base-uncased' ) UpperCamelCase_ = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) UpperCamelCase_ = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) UpperCamelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0] UpperCamelCase_ = (1, 11, 768) self.assertEqual(output.shape , _UpperCAmelCase ) UpperCamelCase_ = np.array([[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _UpperCAmelCase , atol=1e-4 ) )
23
# NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from ...utils import deprecate from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401 from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401 deprecate( """stable diffusion controlnet""", """0.22.0""", """Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""", standard_warn=False, stacklevel=3, )
652
0
'''simple docstring''' def _UpperCamelCase (_lowerCamelCase : float , _lowerCamelCase : float )-> float: '''simple docstring''' if mass < 0: raise ValueError('''The mass of a body cannot be negative''' ) return 0.5 * mass * abs(_lowerCamelCase ) * abs(_lowerCamelCase ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
24
import collections import os from typing import List, Optional, Tuple from transformers.utils import is_jieba_available, requires_backends if is_jieba_available(): import jieba from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging a =logging.get_logger(__name__) a ={"""vocab_file""": """vocab.txt"""} a ={ """vocab_file""": { """openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""", }, } a ={ """openbmb/cpm-ant-10b""": 1024, } def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple: __lowerCamelCase : int = collections.OrderedDict() with open(lowerCamelCase__ , 'r' , encoding='utf-8' ) as reader: __lowerCamelCase : Optional[int] = reader.readlines() for index, token in enumerate(lowerCamelCase__ ): __lowerCamelCase : Optional[Any] = token.rstrip('\n' ) __lowerCamelCase : Union[str, Any] = index return vocab class A_ ( SCREAMING_SNAKE_CASE ): def __init__( self : int ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Optional[int]="<unk>" ,SCREAMING_SNAKE_CASE__ : Optional[int]=2_0_0): __lowerCamelCase : str = vocab __lowerCamelCase : Dict = unk_token __lowerCamelCase : int = max_input_chars_per_word def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any]): __lowerCamelCase : int = list(SCREAMING_SNAKE_CASE__) if len(SCREAMING_SNAKE_CASE__) > self.max_input_chars_per_word: return [self.unk_token] __lowerCamelCase : Tuple = 0 __lowerCamelCase : str = [] while start < len(SCREAMING_SNAKE_CASE__): __lowerCamelCase : List[Any] = len(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Any = None while start < end: __lowerCamelCase : Any = ''.join(chars[start:end]) if substr in self.vocab: __lowerCamelCase : Optional[Any] = substr break end -= 1 if cur_substr is None: sub_tokens.append(self.unk_token) start += 1 else: sub_tokens.append(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Dict = end return sub_tokens class A_ ( SCREAMING_SNAKE_CASE ): _UpperCAmelCase : List[str] = VOCAB_FILES_NAMES _UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase : str = ['''input_ids''', '''attention_mask'''] _UpperCAmelCase : Optional[int] = False def __init__( self : Tuple ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Tuple="<d>" ,SCREAMING_SNAKE_CASE__ : Tuple="</d>" ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="<s>" ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="</s>" ,SCREAMING_SNAKE_CASE__ : str="<pad>" ,SCREAMING_SNAKE_CASE__ : List[str]="<unk>" ,SCREAMING_SNAKE_CASE__ : List[Any]="</n>" ,SCREAMING_SNAKE_CASE__ : int="</_>" ,SCREAMING_SNAKE_CASE__ : List[Any]="left" ,**SCREAMING_SNAKE_CASE__ : List[str] ,): requires_backends(self ,['jieba']) super().__init__( bod_token=SCREAMING_SNAKE_CASE__ ,eod_token=SCREAMING_SNAKE_CASE__ ,bos_token=SCREAMING_SNAKE_CASE__ ,eos_token=SCREAMING_SNAKE_CASE__ ,pad_token=SCREAMING_SNAKE_CASE__ ,unk_token=SCREAMING_SNAKE_CASE__ ,line_token=SCREAMING_SNAKE_CASE__ ,space_token=SCREAMING_SNAKE_CASE__ ,padding_side=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,) __lowerCamelCase : Optional[Any] = bod_token __lowerCamelCase : Dict = eod_token __lowerCamelCase : Any = load_vocab(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Any = self.encoder[space_token] __lowerCamelCase : Dict = self.encoder[line_token] del self.encoder[space_token] del self.encoder[line_token] __lowerCamelCase : Optional[Any] = collections.OrderedDict(sorted(self.encoder.items() ,key=lambda SCREAMING_SNAKE_CASE__: x[1])) __lowerCamelCase : int = {v: k for k, v in self.encoder.items()} __lowerCamelCase : Union[str, Any] = WordpieceTokenizer(vocab=self.encoder ,unk_token=self.unk_token) @property def lowerCAmelCase ( self : List[Any]): return self.encoder[self.bod_token] @property def lowerCAmelCase ( self : Tuple): return self.encoder[self.eod_token] @property def lowerCAmelCase ( self : Union[str, Any]): return self.encoder["\n"] @property def lowerCAmelCase ( self : str): return len(self.encoder) def lowerCAmelCase ( self : str): return dict(self.encoder ,**self.added_tokens_encoder) def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Optional[int]): __lowerCamelCase : Any = [] for x in jieba.cut(SCREAMING_SNAKE_CASE__ ,cut_all=SCREAMING_SNAKE_CASE__): output_tokens.extend(self.wordpiece_tokenizer.tokenize(SCREAMING_SNAKE_CASE__)) return output_tokens def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Tuple ,**SCREAMING_SNAKE_CASE__ : List[Any]): __lowerCamelCase : Tuple = [i for i in token_ids if i >= 0] __lowerCamelCase : str = [ x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id ] return super()._decode(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : List[Any]): return token in self.encoder def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[str]): return "".join(SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[Any]): return self.encoder.get(SCREAMING_SNAKE_CASE__ ,self.encoder.get(self.unk_token)) def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[Any]): return self.decoder.get(SCREAMING_SNAKE_CASE__ ,self.unk_token) def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[str] = None): if os.path.isdir(SCREAMING_SNAKE_CASE__): __lowerCamelCase : Any = os.path.join( SCREAMING_SNAKE_CASE__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) else: __lowerCamelCase : int = (filename_prefix + '-' if filename_prefix else '') + save_directory __lowerCamelCase : Any = 0 if " " in self.encoder: __lowerCamelCase : Any = self.encoder[' '] del self.encoder[" "] if "\n" in self.encoder: __lowerCamelCase : str = self.encoder['\n'] del self.encoder["\n"] __lowerCamelCase : str = collections.OrderedDict(sorted(self.encoder.items() ,key=lambda SCREAMING_SNAKE_CASE__: x[1])) with open(SCREAMING_SNAKE_CASE__ ,'w' ,encoding='utf-8') as writer: for token, token_index in self.encoder.items(): if index != token_index: logger.warning( F"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive." ' Please check that the vocabulary is not corrupted!') __lowerCamelCase : Any = token_index writer.write(token + '\n') index += 1 return (vocab_file,) def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : List[int] = None): if token_ids_a is None: return [self.bos_token_id] + token_ids_a return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ,SCREAMING_SNAKE_CASE__ : bool = False): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE__ ,token_ids_a=SCREAMING_SNAKE_CASE__ ,already_has_special_tokens=SCREAMING_SNAKE_CASE__) if token_ids_a is not None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) return [1] + ([0] * len(SCREAMING_SNAKE_CASE__))
652
0
import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __UpperCamelCase ( self : Any ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE : str = [ "safety_checker/pytorch_model.bin", "safety_checker/model.safetensors", "vae/diffusion_pytorch_model.bin", "vae/diffusion_pytorch_model.safetensors", "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] self.assertTrue(is_safetensors_compatible(a ) ) def __UpperCamelCase ( self : List[Any] ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = [ "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] self.assertTrue(is_safetensors_compatible(a ) ) def __UpperCamelCase ( self : Tuple ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = [ "safety_checker/pytorch_model.bin", "safety_checker/model.safetensors", "vae/diffusion_pytorch_model.bin", "vae/diffusion_pytorch_model.safetensors", "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", "unet/diffusion_pytorch_model.bin", # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(a ) ) def __UpperCamelCase ( self : List[Any] ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = [ "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", ] self.assertTrue(is_safetensors_compatible(a ) ) def __UpperCamelCase ( self : List[Any] ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = [ "safety_checker/pytorch_model.bin", "safety_checker/model.safetensors", "vae/diffusion_pytorch_model.bin", "vae/diffusion_pytorch_model.safetensors", "text_encoder/pytorch_model.bin", # Removed: 'text_encoder/model.safetensors', "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] self.assertFalse(is_safetensors_compatible(a ) ) def __UpperCamelCase ( self : Any ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE : str = [ "safety_checker/pytorch_model.fp16.bin", "safety_checker/model.fp16.safetensors", "vae/diffusion_pytorch_model.fp16.bin", "vae/diffusion_pytorch_model.fp16.safetensors", "text_encoder/pytorch_model.fp16.bin", "text_encoder/model.fp16.safetensors", "unet/diffusion_pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] SCREAMING_SNAKE_CASE : str = "fp16" self.assertTrue(is_safetensors_compatible(a , variant=a ) ) def __UpperCamelCase ( self : Tuple ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE : str = [ "unet/diffusion_pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] SCREAMING_SNAKE_CASE : int = "fp16" self.assertTrue(is_safetensors_compatible(a , variant=a ) ) def __UpperCamelCase ( self : Dict ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Any = [ "unet/diffusion_pytorch_model.bin", "unet/diffusion_pytorch_model.safetensors", ] SCREAMING_SNAKE_CASE : Tuple = "fp16" self.assertTrue(is_safetensors_compatible(a , variant=a ) ) def __UpperCamelCase ( self : str ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE : int = [ "safety_checker/pytorch_model.fp16.bin", "safety_checker/model.fp16.safetensors", "vae/diffusion_pytorch_model.fp16.bin", "vae/diffusion_pytorch_model.fp16.safetensors", "text_encoder/pytorch_model.fp16.bin", "text_encoder/model.fp16.safetensors", "unet/diffusion_pytorch_model.fp16.bin", # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] SCREAMING_SNAKE_CASE : List[Any] = "fp16" self.assertFalse(is_safetensors_compatible(a , variant=a ) ) def __UpperCamelCase ( self : List[str] ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = [ "text_encoder/pytorch_model.fp16.bin", "text_encoder/model.fp16.safetensors", ] SCREAMING_SNAKE_CASE : Optional[int] = "fp16" self.assertTrue(is_safetensors_compatible(a , variant=a ) ) def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = [ "text_encoder/pytorch_model.bin", "text_encoder/model.safetensors", ] SCREAMING_SNAKE_CASE : int = "fp16" self.assertTrue(is_safetensors_compatible(a , variant=a ) ) def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = [ "safety_checker/pytorch_model.fp16.bin", "safety_checker/model.fp16.safetensors", "vae/diffusion_pytorch_model.fp16.bin", "vae/diffusion_pytorch_model.fp16.safetensors", "text_encoder/pytorch_model.fp16.bin", # 'text_encoder/model.fp16.safetensors', "unet/diffusion_pytorch_model.fp16.bin", "unet/diffusion_pytorch_model.fp16.safetensors", ] SCREAMING_SNAKE_CASE : List[str] = "fp16" self.assertFalse(is_safetensors_compatible(a , variant=a ) )
25
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a ={"""configuration_mmbt""": ["""MMBTConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a =["""MMBTForClassification""", """MMBTModel""", """ModalEmbeddings"""] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys a =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
652
0
'''simple docstring''' from __future__ import annotations def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]: # noqa: E741 """simple docstring""" while r - l > 1: __snake_case : str = (l + r) // 2 if v[m] >= key: __snake_case : Any = m else: __snake_case : Union[str, Any] = m # noqa: E741 return r def _a ( _lowerCamelCase ) -> int: """simple docstring""" if len(_lowerCamelCase ) == 0: return 0 __snake_case : Optional[Any] = [0] * len(_lowerCamelCase ) __snake_case : Dict = 1 __snake_case : Any = v[0] for i in range(1 , len(_lowerCamelCase ) ): if v[i] < tail[0]: __snake_case : List[str] = v[i] elif v[i] > tail[length - 1]: __snake_case : Any = v[i] length += 1 else: __snake_case : Optional[Any] = v[i] return length if __name__ == "__main__": import doctest doctest.testmod()
26
import torch from diffusers import UnCLIPScheduler from .test_schedulers import SchedulerCommonTest class A_ ( SCREAMING_SNAKE_CASE ): _UpperCAmelCase : int = (UnCLIPScheduler,) def lowerCAmelCase ( self : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : List[Any]): __lowerCamelCase : Any = { 'num_train_timesteps': 1_0_0_0, 'variance_type': 'fixed_small_log', 'clip_sample': True, 'clip_sample_range': 1.0, 'prediction_type': 'epsilon', } config.update(**SCREAMING_SNAKE_CASE__) return config def lowerCAmelCase ( self : Optional[Any]): for timesteps in [1, 5, 1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Optional[Any]): for variance in ["fixed_small_log", "learned_range"]: self.check_over_configs(variance_type=SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Union[str, Any]): for clip_sample in [True, False]: self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Tuple): for clip_sample_range in [1, 5, 1_0, 2_0]: self.check_over_configs(clip_sample_range=SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : List[Any]): for prediction_type in ["epsilon", "sample"]: self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Dict): for time_step in [0, 5_0_0, 9_9_9]: for prev_timestep in [None, 5, 1_0_0, 2_5_0, 5_0_0, 7_5_0]: if prev_timestep is not None and prev_timestep >= time_step: continue self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ ,prev_timestep=SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Optional[int]): __lowerCamelCase : Optional[int] = self.scheduler_classes[0] __lowerCamelCase : Any = self.get_scheduler_config(variance_type='fixed_small_log') __lowerCamelCase : Dict = scheduler_class(**SCREAMING_SNAKE_CASE__) assert torch.sum(torch.abs(scheduler._get_variance(0) - 1.00_00E-10)) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(4_8_7) - 0.0549625)) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(9_9_9) - 0.9994987)) < 1E-5 def lowerCAmelCase ( self : Any): __lowerCamelCase : Dict = self.scheduler_classes[0] __lowerCamelCase : List[str] = self.get_scheduler_config(variance_type='learned_range') __lowerCamelCase : int = scheduler_class(**SCREAMING_SNAKE_CASE__) __lowerCamelCase : Tuple = 0.5 assert scheduler._get_variance(1 ,predicted_variance=SCREAMING_SNAKE_CASE__) - -10.1712790 < 1E-5 assert scheduler._get_variance(4_8_7 ,predicted_variance=SCREAMING_SNAKE_CASE__) - -5.7998052 < 1E-5 assert scheduler._get_variance(9_9_9 ,predicted_variance=SCREAMING_SNAKE_CASE__) - -0.0010011 < 1E-5 def lowerCAmelCase ( self : List[str]): __lowerCamelCase : str = self.scheduler_classes[0] __lowerCamelCase : str = self.get_scheduler_config() __lowerCamelCase : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[Any] = scheduler.timesteps __lowerCamelCase : Union[str, Any] = self.dummy_model() __lowerCamelCase : Optional[Any] = self.dummy_sample_deter __lowerCamelCase : List[str] = torch.manual_seed(0) for i, t in enumerate(SCREAMING_SNAKE_CASE__): # 1. predict noise residual __lowerCamelCase : int = model(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) # 2. predict previous mean of sample x_t-1 __lowerCamelCase : Optional[int] = scheduler.step(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__).prev_sample __lowerCamelCase : Optional[Any] = pred_prev_sample __lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__)) __lowerCamelCase : Tuple = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__)) assert abs(result_sum.item() - 252.2682495) < 1E-2 assert abs(result_mean.item() - 0.3284743) < 1E-3 def lowerCAmelCase ( self : str): __lowerCamelCase : str = self.scheduler_classes[0] __lowerCamelCase : List[Any] = self.get_scheduler_config() __lowerCamelCase : int = scheduler_class(**SCREAMING_SNAKE_CASE__) scheduler.set_timesteps(2_5) __lowerCamelCase : int = scheduler.timesteps __lowerCamelCase : Tuple = self.dummy_model() __lowerCamelCase : Any = self.dummy_sample_deter __lowerCamelCase : Any = torch.manual_seed(0) for i, t in enumerate(SCREAMING_SNAKE_CASE__): # 1. predict noise residual __lowerCamelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) if i + 1 == timesteps.shape[0]: __lowerCamelCase : Optional[Any] = None else: __lowerCamelCase : Union[str, Any] = timesteps[i + 1] # 2. predict previous mean of sample x_t-1 __lowerCamelCase : int = scheduler.step( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,prev_timestep=SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__).prev_sample __lowerCamelCase : Union[str, Any] = pred_prev_sample __lowerCamelCase : Tuple = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__)) __lowerCamelCase : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__)) assert abs(result_sum.item() - 258.2044983) < 1E-2 assert abs(result_mean.item() - 0.3362038) < 1E-3 def lowerCAmelCase ( self : List[Any]): pass def lowerCAmelCase ( self : Union[str, Any]): pass
652
0
import unittest import numpy as np from transformers.file_utils import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class lowerCamelCase( unittest.TestCase ): '''simple docstring''' def __init__( self , snake_case_ , snake_case_=7 , snake_case_=3 , snake_case_=18 , snake_case_=30 , snake_case_=400 , snake_case_=True , snake_case_=None , snake_case_=True , snake_case_=[0.5, 0.5, 0.5] , snake_case_=[0.5, 0.5, 0.5] , ): _A = size if size is not None else {'height': 18, 'width': 18} _A = parent _A = batch_size _A = num_channels _A = image_size _A = min_resolution _A = max_resolution _A = do_resize _A = size _A = do_normalize _A = image_mean _A = image_std def lowerCAmelCase__ ( self ): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class lowerCamelCase( __snake_case , unittest.TestCase ): '''simple docstring''' __magic_name__ = DPTImageProcessor if is_vision_available() else None def lowerCAmelCase__ ( self ): _A = DPTImageProcessingTester(self ) @property def lowerCAmelCase__ ( self ): return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase__ ( self ): _A = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(snake_case_ , 'image_mean' ) ) self.assertTrue(hasattr(snake_case_ , 'image_std' ) ) self.assertTrue(hasattr(snake_case_ , 'do_normalize' ) ) self.assertTrue(hasattr(snake_case_ , 'do_resize' ) ) self.assertTrue(hasattr(snake_case_ , 'size' ) ) def lowerCAmelCase__ ( self ): _A = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'height': 18, 'width': 18} ) _A = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'height': 42, 'width': 42} ) def lowerCAmelCase__ ( self ): # Initialize image_processing _A = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ ) for image in image_inputs: self.assertIsInstance(snake_case_ , Image.Image ) # Test not batched input _A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched _A = image_processing(snake_case_ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def lowerCAmelCase__ ( self ): # Initialize image_processing _A = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , numpify=snake_case_ ) for image in image_inputs: self.assertIsInstance(snake_case_ , np.ndarray ) # Test not batched input _A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched _A = image_processing(snake_case_ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def lowerCAmelCase__ ( self ): # Initialize image_processing _A = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , torchify=snake_case_ ) for image in image_inputs: self.assertIsInstance(snake_case_ , torch.Tensor ) # Test not batched input _A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched _A = image_processing(snake_case_ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , )
27
from ...configuration_utils import PretrainedConfig from ...utils import logging a =logging.get_logger(__name__) a ={ """caidas/swin2sr-classicalsr-x2-64""": ( """https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json""" ), } class A_ ( SCREAMING_SNAKE_CASE ): _UpperCAmelCase : Optional[int] = '''swin2sr''' _UpperCAmelCase : Any = { '''hidden_size''': '''embed_dim''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any]=6_4 ,SCREAMING_SNAKE_CASE__ : Optional[int]=1 ,SCREAMING_SNAKE_CASE__ : List[Any]=3 ,SCREAMING_SNAKE_CASE__ : Tuple=1_8_0 ,SCREAMING_SNAKE_CASE__ : Any=[6, 6, 6, 6, 6, 6] ,SCREAMING_SNAKE_CASE__ : int=[6, 6, 6, 6, 6, 6] ,SCREAMING_SNAKE_CASE__ : Optional[Any]=8 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=2.0 ,SCREAMING_SNAKE_CASE__ : Optional[int]=True ,SCREAMING_SNAKE_CASE__ : Any=0.0 ,SCREAMING_SNAKE_CASE__ : Any=0.0 ,SCREAMING_SNAKE_CASE__ : List[str]=0.1 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="gelu" ,SCREAMING_SNAKE_CASE__ : Any=False ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.02 ,SCREAMING_SNAKE_CASE__ : Dict=1E-5 ,SCREAMING_SNAKE_CASE__ : Dict=2 ,SCREAMING_SNAKE_CASE__ : Tuple=1.0 ,SCREAMING_SNAKE_CASE__ : int="1conv" ,SCREAMING_SNAKE_CASE__ : Optional[int]="pixelshuffle" ,**SCREAMING_SNAKE_CASE__ : Optional[int] ,): super().__init__(**SCREAMING_SNAKE_CASE__) __lowerCamelCase : Dict = image_size __lowerCamelCase : str = patch_size __lowerCamelCase : List[Any] = num_channels __lowerCamelCase : Dict = embed_dim __lowerCamelCase : Dict = depths __lowerCamelCase : Any = len(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[Any] = num_heads __lowerCamelCase : Tuple = window_size __lowerCamelCase : Dict = mlp_ratio __lowerCamelCase : str = qkv_bias __lowerCamelCase : Optional[int] = hidden_dropout_prob __lowerCamelCase : Optional[Any] = attention_probs_dropout_prob __lowerCamelCase : List[Any] = drop_path_rate __lowerCamelCase : Optional[int] = hidden_act __lowerCamelCase : Dict = use_absolute_embeddings __lowerCamelCase : Optional[Any] = layer_norm_eps __lowerCamelCase : str = initializer_range __lowerCamelCase : List[Any] = upscale __lowerCamelCase : List[Any] = img_range __lowerCamelCase : List[str] = resi_connection __lowerCamelCase : Union[str, Any] = upsampler
652
0
'''simple docstring''' import itertools import math def lowercase__( __UpperCamelCase: int ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 ,int(math.sqrt(__UpperCamelCase ) + 1 ) ,6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def lowercase__( ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = 2 while True: if is_prime(__UpperCamelCase ): yield num num += 1 def lowercase__( __UpperCamelCase: int = 1_00_01 ): """simple docstring""" return next(itertools.islice(prime_generator() ,nth - 1 ,__UpperCamelCase ) ) if __name__ == "__main__": print(F"""{solution() = }""")
28
import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=True , lowerCamelCase__="pt" ) -> Dict: __lowerCamelCase : Any = {'add_prefix_space': True} if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and not line.startswith(' ' ) else {} __lowerCamelCase : int = padding_side return tokenizer( [line] , max_length=lowerCamelCase__ , padding='max_length' if pad_to_max_length else None , truncation=lowerCamelCase__ , return_tensors=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , **lowerCamelCase__ , ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , ) -> List[str]: __lowerCamelCase : List[str] = input_ids.ne(lowerCamelCase__ ).any(dim=0 ) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class A_ ( SCREAMING_SNAKE_CASE ): def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int]="train" ,SCREAMING_SNAKE_CASE__ : Tuple=None ,SCREAMING_SNAKE_CASE__ : Dict=None ,SCREAMING_SNAKE_CASE__ : int=None ,SCREAMING_SNAKE_CASE__ : List[Any]="" ,): super().__init__() __lowerCamelCase : Optional[Any] = Path(SCREAMING_SNAKE_CASE__).joinpath(type_path + '.source') __lowerCamelCase : Any = Path(SCREAMING_SNAKE_CASE__).joinpath(type_path + '.target') __lowerCamelCase : List[Any] = self.get_char_lens(self.src_file) __lowerCamelCase : List[Any] = max_source_length __lowerCamelCase : List[str] = max_target_length assert min(self.src_lens) > 0, F"found empty line in {self.src_file}" __lowerCamelCase : Any = tokenizer __lowerCamelCase : Optional[int] = prefix if n_obs is not None: __lowerCamelCase : Dict = self.src_lens[:n_obs] __lowerCamelCase : str = src_lang __lowerCamelCase : Any = tgt_lang def __len__( self : Tuple): return len(self.src_lens) def __getitem__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : str): __lowerCamelCase : Dict = index + 1 # linecache starts at 1 __lowerCamelCase : Any = self.prefix + linecache.getline(str(self.src_file) ,SCREAMING_SNAKE_CASE__).rstrip('\n') __lowerCamelCase : int = linecache.getline(str(self.tgt_file) ,SCREAMING_SNAKE_CASE__).rstrip('\n') assert source_line, F"empty source line for index {index}" assert tgt_line, F"empty tgt line for index {index}" # Need to add eos token manually for T5 if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right __lowerCamelCase : Dict = ( self.tokenizer.question_encoder if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__) else self.tokenizer ) __lowerCamelCase : Optional[int] = self.tokenizer.generator if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__) else self.tokenizer __lowerCamelCase : List[str] = encode_line(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,self.max_source_length ,'right') __lowerCamelCase : Any = encode_line(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,self.max_target_length ,'right') __lowerCamelCase : List[Any] = source_inputs['input_ids'].squeeze() __lowerCamelCase : Tuple = target_inputs['input_ids'].squeeze() __lowerCamelCase : Tuple = source_inputs['attention_mask'].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def lowerCAmelCase ( SCREAMING_SNAKE_CASE__ : int): return [len(SCREAMING_SNAKE_CASE__) for x in Path(SCREAMING_SNAKE_CASE__).open().readlines()] def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : List[Any]): __lowerCamelCase : Optional[Any] = torch.stack([x['input_ids'] for x in batch]) __lowerCamelCase : Any = torch.stack([x['attention_mask'] for x in batch]) __lowerCamelCase : Union[str, Any] = torch.stack([x['decoder_input_ids'] for x in batch]) __lowerCamelCase : Optional[int] = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__) else self.tokenizer.pad_token_id ) __lowerCamelCase : int = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__) else self.tokenizer.pad_token_id ) __lowerCamelCase : int = trim_batch(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) __lowerCamelCase , __lowerCamelCase : int = trim_batch(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__) __lowerCamelCase : Union[str, Any] = { 'input_ids': source_ids, 'attention_mask': source_mask, 'decoder_input_ids': y, } return batch a =getLogger(__name__) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Any: return list(itertools.chain.from_iterable(lowerCamelCase__ ) ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> None: __lowerCamelCase : str = get_git_info() save_json(lowerCamelCase__ , os.path.join(lowerCamelCase__ , 'git_log.json' ) ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=4 , **lowerCamelCase__ ) -> List[str]: with open(lowerCamelCase__ , 'w' ) as f: json.dump(lowerCamelCase__ , lowerCamelCase__ , indent=lowerCamelCase__ , **lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple: with open(lowerCamelCase__ ) as f: return json.load(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( ) -> List[str]: __lowerCamelCase : str = git.Repo(search_parent_directories=lowerCamelCase__ ) __lowerCamelCase : Any = { 'repo_id': str(lowerCamelCase__ ), 'repo_sha': str(repo.head.object.hexsha ), 'repo_branch': str(repo.active_branch ), 'hostname': str(socket.gethostname() ), } return repo_infos def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List: return list(map(lowerCamelCase__ , lowerCamelCase__ ) ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]: with open(lowerCamelCase__ , 'wb' ) as f: return pickle.dump(lowerCamelCase__ , lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str: def remove_articles(lowerCamelCase__ ): return re.sub(R'\b(a|an|the)\b' , ' ' , lowerCamelCase__ ) def white_space_fix(lowerCamelCase__ ): return " ".join(text.split() ) def remove_punc(lowerCamelCase__ ): __lowerCamelCase : Dict = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(lowerCamelCase__ ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase__ ) ) ) ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> int: __lowerCamelCase : str = normalize_answer(lowerCamelCase__ ).split() __lowerCamelCase : Optional[int] = normalize_answer(lowerCamelCase__ ).split() __lowerCamelCase : Union[str, Any] = Counter(lowerCamelCase__ ) & Counter(lowerCamelCase__ ) __lowerCamelCase : Any = sum(common.values() ) if num_same == 0: return 0 __lowerCamelCase : List[Any] = 1.0 * num_same / len(lowerCamelCase__ ) __lowerCamelCase : int = 1.0 * num_same / len(lowerCamelCase__ ) __lowerCamelCase : Optional[Any] = (2 * precision * recall) / (precision + recall) return fa def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Dict: return normalize_answer(lowerCamelCase__ ) == normalize_answer(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Dict: assert len(lowerCamelCase__ ) == len(lowerCamelCase__ ) __lowerCamelCase : Dict = 0 for hypo, pred in zip(lowerCamelCase__ , lowerCamelCase__ ): em += exact_match_score(lowerCamelCase__ , lowerCamelCase__ ) if len(lowerCamelCase__ ) > 0: em /= len(lowerCamelCase__ ) return {"em": em} def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple: return model_prefix.startswith('rag' ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]: __lowerCamelCase : Optional[int] = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead __lowerCamelCase : List[str] = 'dropout_rate' for p in extra_params: if getattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): if not hasattr(lowerCamelCase__ , lowerCamelCase__ ) and not hasattr(lowerCamelCase__ , equivalent_param[p] ): logger.info('config doesn\'t have a `{}` attribute'.format(lowerCamelCase__ ) ) delattr(lowerCamelCase__ , lowerCamelCase__ ) continue __lowerCamelCase : List[Any] = p if hasattr(lowerCamelCase__ , lowerCamelCase__ ) else equivalent_param[p] setattr(lowerCamelCase__ , lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) ) delattr(lowerCamelCase__ , lowerCamelCase__ ) return hparams, config
652
0
"""simple docstring""" from ..utils import DummyObject, requires_backends class __lowerCamelCase ( metaclass=lowerCAmelCase ): a__: Optional[Any] = ['keras_nlp'] def __init__( self , *UpperCAmelCase , **UpperCAmelCase ): requires_backends(self , ['''keras_nlp'''] )
29
from typing import Optional, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_mobilenet_va import MobileNetVaConfig a =logging.get_logger(__name__) # General docstring a ="""MobileNetV1Config""" # Base docstring a ="""google/mobilenet_v1_1.0_224""" a =[1, 1024, 7, 7] # Image classification docstring a ="""google/mobilenet_v1_1.0_224""" a ="""tabby, tabby cat""" a =[ """google/mobilenet_v1_1.0_224""", """google/mobilenet_v1_0.75_192""", # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 ] def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ) -> str: __lowerCamelCase : str = {} if isinstance(lowerCamelCase__ , lowerCamelCase__ ): __lowerCamelCase : int = model.mobilenet_va else: __lowerCamelCase : List[str] = model __lowerCamelCase : List[Any] = 'MobilenetV1/Conv2d_0/' __lowerCamelCase : List[Any] = backbone.conv_stem.convolution.weight __lowerCamelCase : List[str] = backbone.conv_stem.normalization.bias __lowerCamelCase : Tuple = backbone.conv_stem.normalization.weight __lowerCamelCase : Union[str, Any] = backbone.conv_stem.normalization.running_mean __lowerCamelCase : Optional[int] = backbone.conv_stem.normalization.running_var for i in range(1_3 ): __lowerCamelCase : Any = i + 1 __lowerCamelCase : Union[str, Any] = i * 2 __lowerCamelCase : Optional[Any] = backbone.layer[pt_index] __lowerCamelCase : Optional[int] = F"MobilenetV1/Conv2d_{tf_index}_depthwise/" __lowerCamelCase : Tuple = pointer.convolution.weight __lowerCamelCase : Optional[Any] = pointer.normalization.bias __lowerCamelCase : Union[str, Any] = pointer.normalization.weight __lowerCamelCase : List[str] = pointer.normalization.running_mean __lowerCamelCase : Union[str, Any] = pointer.normalization.running_var __lowerCamelCase : int = backbone.layer[pt_index + 1] __lowerCamelCase : Union[str, Any] = F"MobilenetV1/Conv2d_{tf_index}_pointwise/" __lowerCamelCase : Optional[Any] = pointer.convolution.weight __lowerCamelCase : Any = pointer.normalization.bias __lowerCamelCase : str = pointer.normalization.weight __lowerCamelCase : Dict = pointer.normalization.running_mean __lowerCamelCase : List[str] = pointer.normalization.running_var if isinstance(lowerCamelCase__ , lowerCamelCase__ ): __lowerCamelCase : Union[str, Any] = 'MobilenetV1/Logits/Conv2d_1c_1x1/' __lowerCamelCase : Any = model.classifier.weight __lowerCamelCase : int = model.classifier.bias return tf_to_pt_map def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]: try: import numpy as np import tensorflow as tf except ImportError: logger.error( 'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see ' 'https://www.tensorflow.org/install/ for installation instructions.' ) raise # Load weights from TF model __lowerCamelCase : List[str] = tf.train.list_variables(lowerCamelCase__ ) __lowerCamelCase : List[str] = {} for name, shape in init_vars: logger.info(F"Loading TF weight {name} with shape {shape}" ) __lowerCamelCase : Any = tf.train.load_variable(lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase : List[Any] = array # Build TF to PyTorch weights loading map __lowerCamelCase : Tuple = _build_tf_to_pytorch_map(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) for name, pointer in tf_to_pt_map.items(): logger.info(F"Importing {name}" ) if name not in tf_weights: logger.info(F"{name} not in tf pre-trained weights, skipping" ) continue __lowerCamelCase : Optional[int] = tf_weights[name] if "depthwise_weights" in name: logger.info('Transposing depthwise' ) __lowerCamelCase : List[str] = np.transpose(lowerCamelCase__ , (2, 3, 0, 1) ) elif "weights" in name: logger.info('Transposing' ) if len(pointer.shape ) == 2: # copying into linear layer __lowerCamelCase : Any = array.squeeze().transpose() else: __lowerCamelCase : Tuple = np.transpose(lowerCamelCase__ , (3, 2, 0, 1) ) if pointer.shape != array.shape: raise ValueError(F"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" ) logger.info(F"Initialize PyTorch weight {name} {array.shape}" ) __lowerCamelCase : Optional[Any] = torch.from_numpy(lowerCamelCase__ ) tf_weights.pop(lowerCamelCase__ , lowerCamelCase__ ) tf_weights.pop(name + '/RMSProp' , lowerCamelCase__ ) tf_weights.pop(name + '/RMSProp_1' , lowerCamelCase__ ) tf_weights.pop(name + '/ExponentialMovingAverage' , lowerCamelCase__ ) logger.info(F"Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}" ) return model def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> torch.Tensor: __lowerCamelCase , __lowerCamelCase : int = features.shape[-2:] __lowerCamelCase , __lowerCamelCase : List[str] = conv_layer.stride __lowerCamelCase , __lowerCamelCase : str = conv_layer.kernel_size if in_height % stride_height == 0: __lowerCamelCase : Optional[int] = max(kernel_height - stride_height , 0 ) else: __lowerCamelCase : Union[str, Any] = max(kernel_height - (in_height % stride_height) , 0 ) if in_width % stride_width == 0: __lowerCamelCase : List[str] = max(kernel_width - stride_width , 0 ) else: __lowerCamelCase : List[str] = max(kernel_width - (in_width % stride_width) , 0 ) __lowerCamelCase : List[str] = pad_along_width // 2 __lowerCamelCase : Optional[int] = pad_along_width - pad_left __lowerCamelCase : Any = pad_along_height // 2 __lowerCamelCase : List[Any] = pad_along_height - pad_top __lowerCamelCase : Union[str, Any] = (pad_left, pad_right, pad_top, pad_bottom) return nn.functional.pad(lowerCamelCase__ , lowerCamelCase__ , 'constant' , 0.0 ) class A_ ( nn.Module ): def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : MobileNetVaConfig ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Optional[int] = 1 ,SCREAMING_SNAKE_CASE__ : Optional[int] = 1 ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : Optional[bool] = True ,SCREAMING_SNAKE_CASE__ : Optional[bool or str] = True ,): super().__init__() __lowerCamelCase : Dict = config if in_channels % groups != 0: raise ValueError(F"Input channels ({in_channels}) are not divisible by {groups} groups.") if out_channels % groups != 0: raise ValueError(F"Output channels ({out_channels}) are not divisible by {groups} groups.") __lowerCamelCase : Optional[Any] = 0 if config.tf_padding else int((kernel_size - 1) / 2) __lowerCamelCase : Optional[int] = nn.Convad( in_channels=SCREAMING_SNAKE_CASE__ ,out_channels=SCREAMING_SNAKE_CASE__ ,kernel_size=SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,padding=SCREAMING_SNAKE_CASE__ ,groups=SCREAMING_SNAKE_CASE__ ,bias=SCREAMING_SNAKE_CASE__ ,padding_mode='zeros' ,) if use_normalization: __lowerCamelCase : Optional[int] = nn.BatchNormad( num_features=SCREAMING_SNAKE_CASE__ ,eps=config.layer_norm_eps ,momentum=0.9997 ,affine=SCREAMING_SNAKE_CASE__ ,track_running_stats=SCREAMING_SNAKE_CASE__ ,) else: __lowerCamelCase : Dict = None if use_activation: if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__): __lowerCamelCase : Dict = ACTaFN[use_activation] elif isinstance(config.hidden_act ,SCREAMING_SNAKE_CASE__): __lowerCamelCase : str = ACTaFN[config.hidden_act] else: __lowerCamelCase : List[str] = config.hidden_act else: __lowerCamelCase : List[str] = None def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : torch.Tensor): if self.config.tf_padding: __lowerCamelCase : Any = apply_tf_padding(SCREAMING_SNAKE_CASE__ ,self.convolution) __lowerCamelCase : Optional[int] = self.convolution(SCREAMING_SNAKE_CASE__) if self.normalization is not None: __lowerCamelCase : Dict = self.normalization(SCREAMING_SNAKE_CASE__) if self.activation is not None: __lowerCamelCase : List[str] = self.activation(SCREAMING_SNAKE_CASE__) return features class A_ ( SCREAMING_SNAKE_CASE ): _UpperCAmelCase : Union[str, Any] = MobileNetVaConfig _UpperCAmelCase : List[str] = load_tf_weights_in_mobilenet_va _UpperCAmelCase : List[str] = '''mobilenet_v1''' _UpperCAmelCase : Any = '''pixel_values''' _UpperCAmelCase : int = False def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Union[nn.Linear, nn.Convad]): if isinstance(SCREAMING_SNAKE_CASE__ ,(nn.Linear, nn.Convad)): module.weight.data.normal_(mean=0.0 ,std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(SCREAMING_SNAKE_CASE__ ,nn.BatchNormad): module.bias.data.zero_() module.weight.data.fill_(1.0) a =r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ a =r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`MobileNetV1ImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( '''The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.''' , SCREAMING_SNAKE_CASE , ) class A_ ( SCREAMING_SNAKE_CASE ): def __init__( self : int ,SCREAMING_SNAKE_CASE__ : MobileNetVaConfig ,SCREAMING_SNAKE_CASE__ : bool = True): super().__init__(SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[str] = config __lowerCamelCase : Optional[int] = 3_2 __lowerCamelCase : List[str] = max(int(depth * config.depth_multiplier) ,config.min_depth) __lowerCamelCase : Optional[Any] = MobileNetVaConvLayer( SCREAMING_SNAKE_CASE__ ,in_channels=config.num_channels ,out_channels=SCREAMING_SNAKE_CASE__ ,kernel_size=3 ,stride=2 ,) __lowerCamelCase : Any = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1] __lowerCamelCase : str = nn.ModuleList() for i in range(1_3): __lowerCamelCase : str = out_channels if strides[i] == 2 or i == 0: depth *= 2 __lowerCamelCase : str = max(int(depth * config.depth_multiplier) ,config.min_depth) self.layer.append( MobileNetVaConvLayer( SCREAMING_SNAKE_CASE__ ,in_channels=SCREAMING_SNAKE_CASE__ ,out_channels=SCREAMING_SNAKE_CASE__ ,kernel_size=3 ,stride=strides[i] ,groups=SCREAMING_SNAKE_CASE__ ,)) self.layer.append( MobileNetVaConvLayer( SCREAMING_SNAKE_CASE__ ,in_channels=SCREAMING_SNAKE_CASE__ ,out_channels=SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,)) __lowerCamelCase : Optional[int] = nn.AdaptiveAvgPoolad((1, 1)) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Dict): raise NotImplementedError @add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC ,output_type=SCREAMING_SNAKE_CASE__ ,config_class=_CONFIG_FOR_DOC ,modality='vision' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,) def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,): __lowerCamelCase : int = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __lowerCamelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('You have to specify pixel_values') __lowerCamelCase : Optional[Any] = self.conv_stem(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[Any] = () if output_hidden_states else None for i, layer_module in enumerate(self.layer): __lowerCamelCase : Dict = layer_module(SCREAMING_SNAKE_CASE__) if output_hidden_states: __lowerCamelCase : Any = all_hidden_states + (hidden_states,) __lowerCamelCase : Optional[Any] = hidden_states if self.pooler is not None: __lowerCamelCase : Tuple = torch.flatten(self.pooler(SCREAMING_SNAKE_CASE__) ,start_dim=1) else: __lowerCamelCase : List[str] = None if not return_dict: return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None) return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=SCREAMING_SNAKE_CASE__ ,pooler_output=SCREAMING_SNAKE_CASE__ ,hidden_states=SCREAMING_SNAKE_CASE__ ,) @add_start_docstrings( ''' MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. ''' , SCREAMING_SNAKE_CASE , ) class A_ ( SCREAMING_SNAKE_CASE ): def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : MobileNetVaConfig): super().__init__(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[Any] = config.num_labels __lowerCamelCase : Optional[Any] = MobileNetVaModel(SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[str] = self.mobilenet_va.layer[-1].convolution.out_channels # Classifier head __lowerCamelCase : Any = nn.Dropout(config.classifier_dropout_prob ,inplace=SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[Any] = nn.Linear(SCREAMING_SNAKE_CASE__ ,config.num_labels) if config.num_labels > 0 else nn.Identity() # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=SCREAMING_SNAKE_CASE__ ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,) def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,): __lowerCamelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict __lowerCamelCase : Optional[int] = self.mobilenet_va(SCREAMING_SNAKE_CASE__ ,output_hidden_states=SCREAMING_SNAKE_CASE__ ,return_dict=SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[Any] = outputs.pooler_output if return_dict else outputs[1] __lowerCamelCase : List[str] = self.classifier(self.dropout(SCREAMING_SNAKE_CASE__)) __lowerCamelCase : List[str] = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: __lowerCamelCase : Dict = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): __lowerCamelCase : int = 'single_label_classification' else: __lowerCamelCase : Tuple = 'multi_label_classification' if self.config.problem_type == "regression": __lowerCamelCase : Tuple = MSELoss() if self.num_labels == 1: __lowerCamelCase : int = loss_fct(logits.squeeze() ,labels.squeeze()) else: __lowerCamelCase : Union[str, Any] = loss_fct(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) elif self.config.problem_type == "single_label_classification": __lowerCamelCase : List[str] = CrossEntropyLoss() __lowerCamelCase : List[str] = loss_fct(logits.view(-1 ,self.num_labels) ,labels.view(-1)) elif self.config.problem_type == "multi_label_classification": __lowerCamelCase : int = BCEWithLogitsLoss() __lowerCamelCase : int = loss_fct(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) if not return_dict: __lowerCamelCase : List[str] = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention( loss=SCREAMING_SNAKE_CASE__ ,logits=SCREAMING_SNAKE_CASE__ ,hidden_states=outputs.hidden_states ,)
652
0
def lowerCamelCase__ ( _lowercase , _lowercase ): '''simple docstring''' UpperCAmelCase_ : Dict = [0 for i in range(r + 1 )] # nc0 = 1 UpperCAmelCase_ : int = 1 for i in range(1 , n + 1 ): # to compute current row from previous row. UpperCAmelCase_ : Union[str, Any] = min(_lowercase , _lowercase ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=10, r=5))
30
from pathlib import Path import cva import numpy as np from matplotlib import pyplot as plt def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray: __lowerCamelCase : Optional[Any] = cva.getAffineTransform(lowerCamelCase__ , lowerCamelCase__ ) return cva.warpAffine(lowerCamelCase__ , lowerCamelCase__ , (rows, cols) ) if __name__ == "__main__": # read original image a =cva.imread( str(Path(__file__).resolve().parent.parent / """image_data""" / """lena.jpg""") ) # turn image in gray scale value a =cva.cvtColor(image, cva.COLOR_BGR2GRAY) # get image shape a , a =gray_img.shape # set different points to rotate image a =np.array([[50, 50], [200, 50], [50, 200]], np.floataa) a =np.array([[10, 100], [200, 50], [100, 250]], np.floataa) a =np.array([[50, 50], [150, 50], [120, 200]], np.floataa) a =np.array([[10, 100], [80, 50], [180, 250]], np.floataa) # add all rotated images in a list a =[ gray_img, get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), ] # plot different image rotations a =plt.figure(1) a =["""Original""", """Rotation 1""", """Rotation 2""", """Rotation 3"""] for i, image in enumerate(images): plt.subplot(2, 2, i + 1), plt.imshow(image, """gray""") plt.title(titles[i]) plt.axis("""off""") plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95) plt.show()
652
0
from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig lowerCamelCase__ : List[Any] = logging.get_logger(__name__) # General docstring lowerCamelCase__ : Dict = 'RegNetConfig' # Base docstring lowerCamelCase__ : Union[str, Any] = 'facebook/regnet-y-040' lowerCamelCase__ : Dict = [1, 1_088, 7, 7] # Image classification docstring lowerCamelCase__ : List[Any] = 'facebook/regnet-y-040' lowerCamelCase__ : Any = 'tabby, tabby cat' lowerCamelCase__ : List[Any] = [ 'facebook/regnet-y-040', # See all regnet models at https://huggingface.co/models?filter=regnet ] class lowerCamelCase_ ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : int = 3 , _lowerCAmelCase : int = 1 , _lowerCAmelCase : int = 1 , _lowerCAmelCase : Optional[str] = "relu" , **_lowerCAmelCase : Tuple , ): super().__init__(**_lowerCAmelCase ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb SCREAMING_SNAKE_CASE_ = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) SCREAMING_SNAKE_CASE_ = tf.keras.layers.ConvaD( filters=_lowerCAmelCase , kernel_size=_lowerCAmelCase , strides=_lowerCAmelCase , padding='VALID' , groups=_lowerCAmelCase , use_bias=_lowerCAmelCase , name='convolution' , ) SCREAMING_SNAKE_CASE_ = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' ) SCREAMING_SNAKE_CASE_ = ACTaFN[activation] if activation is not None else tf.identity def lowerCAmelCase_ ( self : int , _lowerCAmelCase : Optional[Any] ): SCREAMING_SNAKE_CASE_ = self.convolution(self.padding(_lowerCAmelCase ) ) SCREAMING_SNAKE_CASE_ = self.normalization(_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = self.activation(_lowerCAmelCase ) return hidden_state class lowerCamelCase_ ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : Optional[int] , _lowerCAmelCase : RegNetConfig , **_lowerCAmelCase : List[Any] ): super().__init__(**_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = config.num_channels SCREAMING_SNAKE_CASE_ = TFRegNetConvLayer( out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , ) def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : List[str] ): SCREAMING_SNAKE_CASE_ = shape_list(_lowerCAmelCase )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( 'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) SCREAMING_SNAKE_CASE_ = tf.transpose(_lowerCAmelCase , perm=(0, 2, 3, 1) ) SCREAMING_SNAKE_CASE_ = self.embedder(_lowerCAmelCase ) return hidden_state class lowerCamelCase_ ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : int , _lowerCAmelCase : int , _lowerCAmelCase : int = 2 , **_lowerCAmelCase : Dict ): super().__init__(**_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = tf.keras.layers.ConvaD( filters=_lowerCAmelCase , kernel_size=1 , strides=_lowerCAmelCase , use_bias=_lowerCAmelCase , name='convolution' ) SCREAMING_SNAKE_CASE_ = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' ) def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : tf.Tensor , _lowerCAmelCase : bool = False ): return self.normalization(self.convolution(_lowerCAmelCase ) , training=_lowerCAmelCase ) class lowerCamelCase_ ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : int , **_lowerCAmelCase : Any ): super().__init__(**_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_lowerCAmelCase , name='pooler' ) SCREAMING_SNAKE_CASE_ = [ tf.keras.layers.ConvaD(filters=_lowerCAmelCase , kernel_size=1 , activation='relu' , name='attention.0' ), tf.keras.layers.ConvaD(filters=_lowerCAmelCase , kernel_size=1 , activation='sigmoid' , name='attention.2' ), ] def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : int ): # [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels] SCREAMING_SNAKE_CASE_ = self.pooler(_lowerCAmelCase ) for layer_module in self.attention: SCREAMING_SNAKE_CASE_ = layer_module(_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = hidden_state * pooled return hidden_state class lowerCamelCase_ ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : str , _lowerCAmelCase : RegNetConfig , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int = 1 , **_lowerCAmelCase : str ): super().__init__(**_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = in_channels != out_channels or stride != 1 SCREAMING_SNAKE_CASE_ = max(1 , out_channels // config.groups_width ) SCREAMING_SNAKE_CASE_ = ( TFRegNetShortCut(_lowerCAmelCase , stride=_lowerCAmelCase , name='shortcut' ) if should_apply_shortcut else tf.keras.layers.Activation('linear' , name='shortcut' ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. SCREAMING_SNAKE_CASE_ = [ TFRegNetConvLayer(_lowerCAmelCase , kernel_size=1 , activation=config.hidden_act , name='layer.0' ), TFRegNetConvLayer( _lowerCAmelCase , stride=_lowerCAmelCase , groups=_lowerCAmelCase , activation=config.hidden_act , name='layer.1' ), TFRegNetConvLayer(_lowerCAmelCase , kernel_size=1 , activation=_lowerCAmelCase , name='layer.2' ), ] SCREAMING_SNAKE_CASE_ = ACTaFN[config.hidden_act] def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : Dict ): SCREAMING_SNAKE_CASE_ = hidden_state for layer_module in self.layers: SCREAMING_SNAKE_CASE_ = layer_module(_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = self.shortcut(_lowerCAmelCase ) hidden_state += residual SCREAMING_SNAKE_CASE_ = self.activation(_lowerCAmelCase ) return hidden_state class lowerCamelCase_ ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : List[Any] , _lowerCAmelCase : RegNetConfig , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int = 1 , **_lowerCAmelCase : Dict ): super().__init__(**_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = in_channels != out_channels or stride != 1 SCREAMING_SNAKE_CASE_ = max(1 , out_channels // config.groups_width ) SCREAMING_SNAKE_CASE_ = ( TFRegNetShortCut(_lowerCAmelCase , stride=_lowerCAmelCase , name='shortcut' ) if should_apply_shortcut else tf.keras.layers.Activation('linear' , name='shortcut' ) ) SCREAMING_SNAKE_CASE_ = [ TFRegNetConvLayer(_lowerCAmelCase , kernel_size=1 , activation=config.hidden_act , name='layer.0' ), TFRegNetConvLayer( _lowerCAmelCase , stride=_lowerCAmelCase , groups=_lowerCAmelCase , activation=config.hidden_act , name='layer.1' ), TFRegNetSELayer(_lowerCAmelCase , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ), TFRegNetConvLayer(_lowerCAmelCase , kernel_size=1 , activation=_lowerCAmelCase , name='layer.3' ), ] SCREAMING_SNAKE_CASE_ = ACTaFN[config.hidden_act] def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Dict ): SCREAMING_SNAKE_CASE_ = hidden_state for layer_module in self.layers: SCREAMING_SNAKE_CASE_ = layer_module(_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = self.shortcut(_lowerCAmelCase ) hidden_state += residual SCREAMING_SNAKE_CASE_ = self.activation(_lowerCAmelCase ) return hidden_state class lowerCamelCase_ ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : str , _lowerCAmelCase : RegNetConfig , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 2 , **_lowerCAmelCase : Optional[Any] ): super().__init__(**_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer SCREAMING_SNAKE_CASE_ = [ # downsampling is done in the first layer with stride of 2 layer(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , stride=_lowerCAmelCase , name='layers.0' ), *[layer(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , name=F"layers.{i+1}" ) for i in range(depth - 1 )], ] def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : Optional[int] ): for layer_module in self.layers: SCREAMING_SNAKE_CASE_ = layer_module(_lowerCAmelCase ) return hidden_state class lowerCamelCase_ ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : List[str] , _lowerCAmelCase : RegNetConfig , **_lowerCAmelCase : str ): super().__init__(**_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( _lowerCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) ) SCREAMING_SNAKE_CASE_ = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(_lowerCAmelCase , config.depths[1:] ) ): self.stages.append(TFRegNetStage(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , depth=_lowerCAmelCase , name=F"stages.{i+1}" ) ) def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : tf.Tensor , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = True ): SCREAMING_SNAKE_CASE_ = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: SCREAMING_SNAKE_CASE_ = hidden_states + (hidden_state,) SCREAMING_SNAKE_CASE_ = stage_module(_lowerCAmelCase ) if output_hidden_states: SCREAMING_SNAKE_CASE_ = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=_lowerCAmelCase , hidden_states=_lowerCAmelCase ) @keras_serializable class lowerCamelCase_ ( tf.keras.layers.Layer ): '''simple docstring''' lowercase_ = RegNetConfig def __init__( self : int , _lowerCAmelCase : Optional[int] , **_lowerCAmelCase : Optional[int] ): super().__init__(**_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = config SCREAMING_SNAKE_CASE_ = TFRegNetEmbeddings(_lowerCAmelCase , name='embedder' ) SCREAMING_SNAKE_CASE_ = TFRegNetEncoder(_lowerCAmelCase , name='encoder' ) SCREAMING_SNAKE_CASE_ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_lowerCAmelCase , name='pooler' ) @unpack_inputs def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : tf.Tensor , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : bool = False , ): SCREAMING_SNAKE_CASE_ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) SCREAMING_SNAKE_CASE_ = return_dict if return_dict is not None else self.config.use_return_dict SCREAMING_SNAKE_CASE_ = self.embedder(_lowerCAmelCase , training=_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = self.encoder( _lowerCAmelCase , output_hidden_states=_lowerCAmelCase , return_dict=_lowerCAmelCase , training=_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = encoder_outputs[0] SCREAMING_SNAKE_CASE_ = self.pooler(_lowerCAmelCase ) # Change to NCHW output format have uniformity in the modules SCREAMING_SNAKE_CASE_ = tf.transpose(_lowerCAmelCase , perm=(0, 3, 1, 2) ) SCREAMING_SNAKE_CASE_ = tf.transpose(_lowerCAmelCase , perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: SCREAMING_SNAKE_CASE_ = tuple([tf.transpose(_lowerCAmelCase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=_lowerCAmelCase , pooler_output=_lowerCAmelCase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , ) class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = RegNetConfig lowercase_ = "regnet" lowercase_ = "pixel_values" @property def lowerCAmelCase_ ( self : List[str] ): return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )} lowerCamelCase__ : int = r'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n' lowerCamelCase__ : Any = r'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings( "The bare RegNet model outputting raw features without any specific head on top." , _SCREAMING_SNAKE_CASE , ) class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self : Dict , _lowerCAmelCase : RegNetConfig , *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : Tuple ): super().__init__(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = TFRegNetMainLayer(_lowerCAmelCase , name='regnet' ) @unpack_inputs @add_start_docstrings_to_model_forward(_lowerCAmelCase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=_lowerCAmelCase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : tf.Tensor , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : str=False , ): SCREAMING_SNAKE_CASE_ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) SCREAMING_SNAKE_CASE_ = return_dict if return_dict is not None else self.config.use_return_dict SCREAMING_SNAKE_CASE_ = self.regnet( pixel_values=_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , return_dict=_lowerCAmelCase , training=_lowerCAmelCase , ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , ) @add_start_docstrings( "\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , _SCREAMING_SNAKE_CASE , ) class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self : Tuple , _lowerCAmelCase : RegNetConfig , *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : Optional[int] ): super().__init__(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = config.num_labels SCREAMING_SNAKE_CASE_ = TFRegNetMainLayer(_lowerCAmelCase , name='regnet' ) # classification head SCREAMING_SNAKE_CASE_ = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(_lowerCAmelCase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_lowerCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : tf.Tensor = None , _lowerCAmelCase : tf.Tensor = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : Dict=False , ): SCREAMING_SNAKE_CASE_ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) SCREAMING_SNAKE_CASE_ = return_dict if return_dict is not None else self.config.use_return_dict SCREAMING_SNAKE_CASE_ = self.regnet( _lowerCAmelCase , output_hidden_states=_lowerCAmelCase , return_dict=_lowerCAmelCase , training=_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = outputs.pooler_output if return_dict else outputs[1] SCREAMING_SNAKE_CASE_ = self.classifier[0](_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = self.classifier[1](_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = None if labels is None else self.hf_compute_loss(labels=_lowerCAmelCase , logits=_lowerCAmelCase ) if not return_dict: SCREAMING_SNAKE_CASE_ = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=_lowerCAmelCase , logits=_lowerCAmelCase , hidden_states=outputs.hidden_states )
31
import math def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> int: if not isinstance(lowerCamelCase__ , lowerCamelCase__ ): __lowerCamelCase : List[str] = F"Input value of [number={number}] must be an integer" raise TypeError(lowerCamelCase__ ) if number < 1: __lowerCamelCase : int = F"Input value of [number={number}] must be > 0" raise ValueError(lowerCamelCase__ ) elif number == 1: return 3 elif number == 2: return 5 else: __lowerCamelCase : Any = int(math.log(number // 3 , 2 ) ) + 2 __lowerCamelCase : List[Any] = [3, 5] __lowerCamelCase : Union[str, Any] = 2 __lowerCamelCase : List[str] = 3 for block in range(1 , lowerCamelCase__ ): for _ in range(lowerCamelCase__ ): proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] ) proth_index += 1 increment *= 2 return proth_list[number - 1] if __name__ == "__main__": import doctest doctest.testmod() for number in range(11): a =0 try: a =proth(number) except ValueError: print(F"""ValueError: there is no {number}th Proth number""") continue print(F"""The {number}th Proth number: {value}""")
652
0
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = "▁" UpperCAmelCase_ = {"vocab_file": "sentencepiece.bpe.model", "monolingual_vocab_file": "dict.txt"} UpperCAmelCase_ = { "vocab_file": { "vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model", }, "monolingual_vocab_file": { "vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt", }, } UpperCAmelCase_ = {"vinai/bartpho-syllable": 10_24} class __UpperCamelCase ( A__ ): __A : List[Any] = VOCAB_FILES_NAMES __A : Dict = PRETRAINED_VOCAB_FILES_MAP __A : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __A : int = ["""input_ids""", """attention_mask"""] def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase="<s>" , _UpperCamelCase="</s>" , _UpperCamelCase="</s>" , _UpperCamelCase="<s>" , _UpperCamelCase="<unk>" , _UpperCamelCase="<pad>" , _UpperCamelCase="<mask>" , _UpperCamelCase = None , **_UpperCamelCase , ): # Mask token behave like a normal word, i.e. include the space before it _UpperCAmelCase = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else mask_token _UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , cls_token=_UpperCamelCase , pad_token=_UpperCamelCase , mask_token=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , ) _UpperCAmelCase = vocab_file _UpperCAmelCase = monolingual_vocab_file _UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_UpperCamelCase ) ) # Load the reduced vocab # Keep order of special tokens for backward compatibility _UpperCAmelCase = {} _UpperCAmelCase = 0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(_UpperCamelCase ) not in self.fairseq_tokens_to_ids: _UpperCAmelCase = cnt cnt += 1 with open(_UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f: for line in f.readlines(): _UpperCAmelCase = line.strip().split()[0] _UpperCAmelCase = len(self.fairseq_tokens_to_ids ) if str(_UpperCamelCase ) not in self.fairseq_tokens_to_ids: _UpperCAmelCase = len(self.fairseq_tokens_to_ids ) _UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ): _UpperCAmelCase = self.__dict__.copy() _UpperCAmelCase = None _UpperCAmelCase = self.sp_model.serialized_model_proto() return state def __setstate__( self , _UpperCamelCase ): _UpperCAmelCase = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): _UpperCAmelCase = {} _UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _UpperCAmelCase = [self.cls_token_id] _UpperCAmelCase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase ) if token_ids_a is None: return [1] + ([0] * len(_UpperCamelCase )) + [1] return [1] + ([0] * len(_UpperCamelCase )) + [1, 1] + ([0] * len(_UpperCamelCase )) + [1] def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ): _UpperCAmelCase = [self.sep_token_id] _UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def UpperCamelCase( self ): return len(self.fairseq_ids_to_tokens ) def UpperCamelCase( self ): _UpperCAmelCase = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCamelCase( self , _UpperCamelCase ): return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase ) def UpperCamelCase( self , _UpperCamelCase ): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def UpperCamelCase( self , _UpperCamelCase ): return self.fairseq_ids_to_tokens[index] def UpperCamelCase( self , _UpperCamelCase ): _UpperCAmelCase = ''''''.join(_UpperCamelCase ).replace(_UpperCamelCase , ''' ''' ).strip() return out_string def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ): if not os.path.isdir(_UpperCamelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _UpperCAmelCase = os.path.join( _UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) _UpperCAmelCase = os.path.join( _UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _UpperCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(_UpperCamelCase , '''wb''' ) as fi: _UpperCAmelCase = self.sp_model.serialized_model_proto() fi.write(_UpperCamelCase ) if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath( _UpperCamelCase ) and os.path.isfile(self.monolingual_vocab_file ): copyfile(self.monolingual_vocab_file , _UpperCamelCase ) elif not os.path.isfile(self.monolingual_vocab_file ): with open(_UpperCamelCase , '''w''' , encoding='''utf-8''' ) as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(f'''{str(_UpperCamelCase )} \n''' ) return out_vocab_file, out_monolingual_vocab_file
32
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetaImageProcessor class A_ ( unittest.TestCase ): def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : str=7 ,SCREAMING_SNAKE_CASE__ : Any=3 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=3_0 ,SCREAMING_SNAKE_CASE__ : int=4_0_0 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=True ,SCREAMING_SNAKE_CASE__ : Optional[int]=None ,SCREAMING_SNAKE_CASE__ : Any=True ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=[0.5, 0.5, 0.5] ,SCREAMING_SNAKE_CASE__ : Dict=[0.5, 0.5, 0.5] ,SCREAMING_SNAKE_CASE__ : List[str]=True ,SCREAMING_SNAKE_CASE__ : List[str]=1 / 2_5_5 ,SCREAMING_SNAKE_CASE__ : Tuple=True ,): # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p __lowerCamelCase : List[Any] = size if size is not None else {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3} __lowerCamelCase : str = parent __lowerCamelCase : Union[str, Any] = batch_size __lowerCamelCase : int = num_channels __lowerCamelCase : Dict = min_resolution __lowerCamelCase : Tuple = max_resolution __lowerCamelCase : Dict = do_resize __lowerCamelCase : List[Any] = size __lowerCamelCase : Tuple = do_normalize __lowerCamelCase : Any = image_mean __lowerCamelCase : List[str] = image_std __lowerCamelCase : List[Any] = do_rescale __lowerCamelCase : str = rescale_factor __lowerCamelCase : Tuple = do_pad def lowerCAmelCase ( self : Dict): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : List[str]=False): if not batched: __lowerCamelCase : Optional[Any] = image_inputs[0] if isinstance(SCREAMING_SNAKE_CASE__ ,Image.Image): __lowerCamelCase , __lowerCamelCase : Any = image.size else: __lowerCamelCase , __lowerCamelCase : Any = image.shape[1], image.shape[2] if w < h: __lowerCamelCase : Optional[int] = int(self.size['shortest_edge'] * h / w) __lowerCamelCase : Tuple = self.size['shortest_edge'] elif w > h: __lowerCamelCase : Union[str, Any] = self.size['shortest_edge'] __lowerCamelCase : Union[str, Any] = int(self.size['shortest_edge'] * w / h) else: __lowerCamelCase : List[Any] = self.size['shortest_edge'] __lowerCamelCase : Optional[int] = self.size['shortest_edge'] else: __lowerCamelCase : List[str] = [] for image in image_inputs: __lowerCamelCase , __lowerCamelCase : List[Any] = self.get_expected_values([image]) expected_values.append((expected_height, expected_width)) __lowerCamelCase : Tuple = max(SCREAMING_SNAKE_CASE__ ,key=lambda SCREAMING_SNAKE_CASE__: item[0])[0] __lowerCamelCase : Dict = max(SCREAMING_SNAKE_CASE__ ,key=lambda SCREAMING_SNAKE_CASE__: item[1])[1] return expected_height, expected_width @require_torch @require_vision class A_ ( SCREAMING_SNAKE_CASE , unittest.TestCase ): _UpperCAmelCase : Optional[int] = DetaImageProcessor if is_vision_available() else None def lowerCAmelCase ( self : Optional[Any]): __lowerCamelCase : List[str] = DetaImageProcessingTester(self) @property def lowerCAmelCase ( self : Any): return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase ( self : Dict): __lowerCamelCase : Any = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'image_mean')) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'image_std')) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'do_normalize')) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'do_resize')) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'do_rescale')) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'do_pad')) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'size')) def lowerCAmelCase ( self : str): __lowerCamelCase : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size ,{'shortest_edge': 1_8, 'longest_edge': 1_3_3_3}) self.assertEqual(image_processor.do_pad ,SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Any): pass def lowerCAmelCase ( self : List[str]): # Initialize image_processing __lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict) # create random PIL images __lowerCamelCase : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE__) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,Image.Image) # Test not batched input __lowerCamelCase : List[Any] = image_processing(image_inputs[0] ,return_tensors='pt').pixel_values __lowerCamelCase , __lowerCamelCase : Tuple = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__) self.assertEqual( encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,) # Test batched __lowerCamelCase , __lowerCamelCase : Optional[Any] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ ,batched=SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[int] = image_processing(SCREAMING_SNAKE_CASE__ ,return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) ,) def lowerCAmelCase ( self : str): # Initialize image_processing __lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors __lowerCamelCase : str = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE__ ,numpify=SCREAMING_SNAKE_CASE__) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,np.ndarray) # Test not batched input __lowerCamelCase : Tuple = image_processing(image_inputs[0] ,return_tensors='pt').pixel_values __lowerCamelCase , __lowerCamelCase : int = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__) self.assertEqual( encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,) # Test batched __lowerCamelCase : str = image_processing(SCREAMING_SNAKE_CASE__ ,return_tensors='pt').pixel_values __lowerCamelCase , __lowerCamelCase : Optional[int] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ ,batched=SCREAMING_SNAKE_CASE__) self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) ,) def lowerCAmelCase ( self : int): # Initialize image_processing __lowerCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors __lowerCamelCase : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE__ ,torchify=SCREAMING_SNAKE_CASE__) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,torch.Tensor) # Test not batched input __lowerCamelCase : Union[str, Any] = image_processing(image_inputs[0] ,return_tensors='pt').pixel_values __lowerCamelCase , __lowerCamelCase : int = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__) self.assertEqual( encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,) # Test batched __lowerCamelCase : List[Any] = image_processing(SCREAMING_SNAKE_CASE__ ,return_tensors='pt').pixel_values __lowerCamelCase , __lowerCamelCase : Optional[int] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ ,batched=SCREAMING_SNAKE_CASE__) self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) ,) @slow def lowerCAmelCase ( self : Optional[Any]): # prepare image and target __lowerCamelCase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png') with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' ,'r') as f: __lowerCamelCase : List[str] = json.loads(f.read()) __lowerCamelCase : Union[str, Any] = {'image_id': 3_9_7_6_9, 'annotations': target} # encode them __lowerCamelCase : Optional[int] = DetaImageProcessor() __lowerCamelCase : int = image_processing(images=SCREAMING_SNAKE_CASE__ ,annotations=SCREAMING_SNAKE_CASE__ ,return_tensors='pt') # verify pixel values __lowerCamelCase : List[str] = torch.Size([1, 3, 8_0_0, 1_0_6_6]) self.assertEqual(encoding['pixel_values'].shape ,SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[Any] = torch.tensor([0.2796, 0.3138, 0.3481]) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4)) # verify area __lowerCamelCase : Dict = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438]) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,SCREAMING_SNAKE_CASE__)) # verify boxes __lowerCamelCase : int = torch.Size([6, 4]) self.assertEqual(encoding['labels'][0]['boxes'].shape ,SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[int] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215]) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,SCREAMING_SNAKE_CASE__ ,atol=1E-3)) # verify image_id __lowerCamelCase : Tuple = torch.tensor([3_9_7_6_9]) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,SCREAMING_SNAKE_CASE__)) # verify is_crowd __lowerCamelCase : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,SCREAMING_SNAKE_CASE__)) # verify class_labels __lowerCamelCase : List[Any] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7]) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,SCREAMING_SNAKE_CASE__)) # verify orig_size __lowerCamelCase : str = torch.tensor([4_8_0, 6_4_0]) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,SCREAMING_SNAKE_CASE__)) # verify size __lowerCamelCase : int = torch.tensor([8_0_0, 1_0_6_6]) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,SCREAMING_SNAKE_CASE__)) @slow def lowerCAmelCase ( self : str): # prepare image, target and masks_path __lowerCamelCase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png') with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' ,'r') as f: __lowerCamelCase : Tuple = json.loads(f.read()) __lowerCamelCase : List[Any] = {'file_name': '000000039769.png', 'image_id': 3_9_7_6_9, 'segments_info': target} __lowerCamelCase : Optional[int] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic') # encode them __lowerCamelCase : List[str] = DetaImageProcessor(format='coco_panoptic') __lowerCamelCase : Optional[Any] = image_processing(images=SCREAMING_SNAKE_CASE__ ,annotations=SCREAMING_SNAKE_CASE__ ,masks_path=SCREAMING_SNAKE_CASE__ ,return_tensors='pt') # verify pixel values __lowerCamelCase : List[str] = torch.Size([1, 3, 8_0_0, 1_0_6_6]) self.assertEqual(encoding['pixel_values'].shape ,SCREAMING_SNAKE_CASE__) __lowerCamelCase : Any = torch.tensor([0.2796, 0.3138, 0.3481]) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4)) # verify area __lowerCamelCase : Optional[Any] = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147]) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,SCREAMING_SNAKE_CASE__)) # verify boxes __lowerCamelCase : Tuple = torch.Size([6, 4]) self.assertEqual(encoding['labels'][0]['boxes'].shape ,SCREAMING_SNAKE_CASE__) __lowerCamelCase : Any = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625]) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,SCREAMING_SNAKE_CASE__ ,atol=1E-3)) # verify image_id __lowerCamelCase : int = torch.tensor([3_9_7_6_9]) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,SCREAMING_SNAKE_CASE__)) # verify is_crowd __lowerCamelCase : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,SCREAMING_SNAKE_CASE__)) # verify class_labels __lowerCamelCase : int = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3]) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,SCREAMING_SNAKE_CASE__)) # verify masks __lowerCamelCase : Optional[Any] = 8_2_2_8_7_3 self.assertEqual(encoding['labels'][0]['masks'].sum().item() ,SCREAMING_SNAKE_CASE__) # verify orig_size __lowerCamelCase : Any = torch.tensor([4_8_0, 6_4_0]) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,SCREAMING_SNAKE_CASE__)) # verify size __lowerCamelCase : Any = torch.tensor([8_0_0, 1_0_6_6]) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,SCREAMING_SNAKE_CASE__))
652
0
import torch from diffusers import EulerDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class __magic_name__ (snake_case_ ): '''simple docstring''' __lowercase : Union[str, Any] = (EulerDiscreteScheduler,) __lowercase : str = 10 def SCREAMING_SNAKE_CASE__ ( self:Tuple , **_a:str ): snake_case__ = { '''num_train_timesteps''': 11_00, '''beta_start''': 0.0001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', } config.update(**_a ) return config def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ): for timesteps in [10, 50, 1_00, 10_00]: self.check_over_configs(num_train_timesteps=_a ) def SCREAMING_SNAKE_CASE__ ( self:Any ): for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=_a , beta_end=_a ) def SCREAMING_SNAKE_CASE__ ( self:Any ): for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=_a ) def SCREAMING_SNAKE_CASE__ ( self:str ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_a ) def SCREAMING_SNAKE_CASE__ ( self:str ): snake_case__ = self.scheduler_classes[0] snake_case__ = self.get_scheduler_config() snake_case__ = scheduler_class(**_a ) scheduler.set_timesteps(self.num_inference_steps ) snake_case__ = torch.manual_seed(0 ) snake_case__ = self.dummy_model() snake_case__ = self.dummy_sample_deter * scheduler.init_noise_sigma snake_case__ = sample.to(_a ) for i, t in enumerate(scheduler.timesteps ): snake_case__ = scheduler.scale_model_input(_a , _a ) snake_case__ = model(_a , _a ) snake_case__ = scheduler.step(_a , _a , _a , generator=_a ) snake_case__ = output.prev_sample snake_case__ = torch.sum(torch.abs(_a ) ) snake_case__ = torch.mean(torch.abs(_a ) ) assert abs(result_sum.item() - 10.0807 ) < 1e-2 assert abs(result_mean.item() - 0.0131 ) < 1e-3 def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): snake_case__ = self.scheduler_classes[0] snake_case__ = self.get_scheduler_config(prediction_type='''v_prediction''' ) snake_case__ = scheduler_class(**_a ) scheduler.set_timesteps(self.num_inference_steps ) snake_case__ = torch.manual_seed(0 ) snake_case__ = self.dummy_model() snake_case__ = self.dummy_sample_deter * scheduler.init_noise_sigma snake_case__ = sample.to(_a ) for i, t in enumerate(scheduler.timesteps ): snake_case__ = scheduler.scale_model_input(_a , _a ) snake_case__ = model(_a , _a ) snake_case__ = scheduler.step(_a , _a , _a , generator=_a ) snake_case__ = output.prev_sample snake_case__ = torch.sum(torch.abs(_a ) ) snake_case__ = torch.mean(torch.abs(_a ) ) assert abs(result_sum.item() - 0.0002 ) < 1e-2 assert abs(result_mean.item() - 2.2_676e-06 ) < 1e-3 def SCREAMING_SNAKE_CASE__ ( self:List[str] ): snake_case__ = self.scheduler_classes[0] snake_case__ = self.get_scheduler_config() snake_case__ = scheduler_class(**_a ) scheduler.set_timesteps(self.num_inference_steps , device=_a ) snake_case__ = torch.manual_seed(0 ) snake_case__ = self.dummy_model() snake_case__ = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() snake_case__ = sample.to(_a ) for t in scheduler.timesteps: snake_case__ = scheduler.scale_model_input(_a , _a ) snake_case__ = model(_a , _a ) snake_case__ = scheduler.step(_a , _a , _a , generator=_a ) snake_case__ = output.prev_sample snake_case__ = torch.sum(torch.abs(_a ) ) snake_case__ = torch.mean(torch.abs(_a ) ) assert abs(result_sum.item() - 10.0807 ) < 1e-2 assert abs(result_mean.item() - 0.0131 ) < 1e-3 def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): snake_case__ = self.scheduler_classes[0] snake_case__ = self.get_scheduler_config() snake_case__ = scheduler_class(**_a , use_karras_sigmas=_a ) scheduler.set_timesteps(self.num_inference_steps , device=_a ) snake_case__ = torch.manual_seed(0 ) snake_case__ = self.dummy_model() snake_case__ = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() snake_case__ = sample.to(_a ) for t in scheduler.timesteps: snake_case__ = scheduler.scale_model_input(_a , _a ) snake_case__ = model(_a , _a ) snake_case__ = scheduler.step(_a , _a , _a , generator=_a ) snake_case__ = output.prev_sample snake_case__ = torch.sum(torch.abs(_a ) ) snake_case__ = torch.mean(torch.abs(_a ) ) assert abs(result_sum.item() - 124.52299499511719 ) < 1e-2 assert abs(result_mean.item() - 0.16213932633399963 ) < 1e-3
33
import gc import random import unittest import numpy as np import torch from diffusers import ( DDIMScheduler, KandinskyVaaControlnetPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class A_ ( SCREAMING_SNAKE_CASE , unittest.TestCase ): _UpperCAmelCase : Optional[Any] = KandinskyVaaControlnetPipeline _UpperCAmelCase : Optional[Any] = ['''image_embeds''', '''negative_image_embeds''', '''hint'''] _UpperCAmelCase : int = ['''image_embeds''', '''negative_image_embeds''', '''hint'''] _UpperCAmelCase : List[Any] = [ '''generator''', '''height''', '''width''', '''latents''', '''guidance_scale''', '''num_inference_steps''', '''return_dict''', '''guidance_scale''', '''num_images_per_prompt''', '''output_type''', '''return_dict''', ] _UpperCAmelCase : Tuple = False @property def lowerCAmelCase ( self : Tuple): return 3_2 @property def lowerCAmelCase ( self : List[Any]): return 3_2 @property def lowerCAmelCase ( self : str): return self.time_input_dim @property def lowerCAmelCase ( self : List[str]): return self.time_input_dim * 4 @property def lowerCAmelCase ( self : List[str]): return 1_0_0 @property def lowerCAmelCase ( self : Dict): torch.manual_seed(0) __lowerCamelCase : Optional[Any] = { 'in_channels': 8, # Out channels is double in channels because predicts mean and variance 'out_channels': 8, 'addition_embed_type': 'image_hint', 'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'), 'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'), 'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn', 'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2), 'layers_per_block': 1, 'encoder_hid_dim': self.text_embedder_hidden_size, 'encoder_hid_dim_type': 'image_proj', 'cross_attention_dim': self.cross_attention_dim, 'attention_head_dim': 4, 'resnet_time_scale_shift': 'scale_shift', 'class_embed_type': None, } __lowerCamelCase : Union[str, Any] = UNetaDConditionModel(**SCREAMING_SNAKE_CASE__) return model @property def lowerCAmelCase ( self : Union[str, Any]): return { "block_out_channels": [3_2, 3_2, 6_4, 6_4], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 1_2, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def lowerCAmelCase ( self : Optional[Any]): torch.manual_seed(0) __lowerCamelCase : int = VQModel(**self.dummy_movq_kwargs) return model def lowerCAmelCase ( self : Optional[Any]): __lowerCamelCase : Tuple = self.dummy_unet __lowerCamelCase : List[Any] = self.dummy_movq __lowerCamelCase : str = DDIMScheduler( num_train_timesteps=1_0_0_0 ,beta_schedule='linear' ,beta_start=0.00085 ,beta_end=0.012 ,clip_sample=SCREAMING_SNAKE_CASE__ ,set_alpha_to_one=SCREAMING_SNAKE_CASE__ ,steps_offset=1 ,prediction_type='epsilon' ,thresholding=SCREAMING_SNAKE_CASE__ ,) __lowerCamelCase : Dict = { 'unet': unet, 'scheduler': scheduler, 'movq': movq, } return components def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Optional[int]=0): __lowerCamelCase : str = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(SCREAMING_SNAKE_CASE__)).to(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1)).to( SCREAMING_SNAKE_CASE__) # create hint __lowerCamelCase : Optional[int] = floats_tensor((1, 3, 6_4, 6_4) ,rng=random.Random(SCREAMING_SNAKE_CASE__)).to(SCREAMING_SNAKE_CASE__) if str(SCREAMING_SNAKE_CASE__).startswith('mps'): __lowerCamelCase : int = torch.manual_seed(SCREAMING_SNAKE_CASE__) else: __lowerCamelCase : int = torch.Generator(device=SCREAMING_SNAKE_CASE__).manual_seed(SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[str] = { 'image_embeds': image_embeds, 'negative_image_embeds': negative_image_embeds, 'hint': hint, 'generator': generator, 'height': 6_4, 'width': 6_4, 'guidance_scale': 4.0, 'num_inference_steps': 2, 'output_type': 'np', } return inputs def lowerCAmelCase ( self : Optional[Any]): __lowerCamelCase : Dict = 'cpu' __lowerCamelCase : Tuple = self.get_dummy_components() __lowerCamelCase : Any = self.pipeline_class(**SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[str] = pipe.to(SCREAMING_SNAKE_CASE__) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[Any] = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__)) __lowerCamelCase : int = output.images __lowerCamelCase : Tuple = pipe( **self.get_dummy_inputs(SCREAMING_SNAKE_CASE__) ,return_dict=SCREAMING_SNAKE_CASE__ ,)[0] __lowerCamelCase : Dict = image[0, -3:, -3:, -1] __lowerCamelCase : int = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) __lowerCamelCase : List[str] = np.array( [0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1E-2 ), F" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2 ), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @slow @require_torch_gpu class A_ ( unittest.TestCase ): def lowerCAmelCase ( self : int): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase ( self : int): __lowerCamelCase : List[Any] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy') __lowerCamelCase : Union[str, Any] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinskyv22/hint_image_cat.png') __lowerCamelCase : Tuple = torch.from_numpy(np.array(SCREAMING_SNAKE_CASE__)).float() / 255.0 __lowerCamelCase : str = hint.permute(2 ,0 ,1).unsqueeze(0) __lowerCamelCase : Tuple = KandinskyVaaPriorPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-prior' ,torch_dtype=torch.floataa) pipe_prior.to(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Any = KandinskyVaaControlnetPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-controlnet-depth' ,torch_dtype=torch.floataa) __lowerCamelCase : int = pipeline.to(SCREAMING_SNAKE_CASE__) pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[Any] = 'A robot, 4k photo' __lowerCamelCase : List[str] = torch.Generator(device='cuda').manual_seed(0) __lowerCamelCase , __lowerCamelCase : Optional[Any] = pipe_prior( SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,num_inference_steps=5 ,negative_prompt='' ,).to_tuple() __lowerCamelCase : Optional[Any] = torch.Generator(device='cuda').manual_seed(0) __lowerCamelCase : Any = pipeline( image_embeds=SCREAMING_SNAKE_CASE__ ,negative_image_embeds=SCREAMING_SNAKE_CASE__ ,hint=SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,num_inference_steps=1_0_0 ,output_type='np' ,) __lowerCamelCase : List[Any] = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
652
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available SCREAMING_SNAKE_CASE_ = { 'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'], 'configuration_data2vec_text': [ 'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecTextConfig', 'Data2VecTextOnnxConfig', ], 'configuration_data2vec_vision': [ 'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecVisionConfig', 'Data2VecVisionOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_ = [ 'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST', 'Data2VecAudioForAudioFrameClassification', 'Data2VecAudioForCTC', 'Data2VecAudioForSequenceClassification', 'Data2VecAudioForXVector', 'Data2VecAudioModel', 'Data2VecAudioPreTrainedModel', ] SCREAMING_SNAKE_CASE_ = [ 'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'Data2VecTextForCausalLM', 'Data2VecTextForMaskedLM', 'Data2VecTextForMultipleChoice', 'Data2VecTextForQuestionAnswering', 'Data2VecTextForSequenceClassification', 'Data2VecTextForTokenClassification', 'Data2VecTextModel', 'Data2VecTextPreTrainedModel', ] SCREAMING_SNAKE_CASE_ = [ 'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST', 'Data2VecVisionForImageClassification', 'Data2VecVisionForMaskedImageModeling', 'Data2VecVisionForSemanticSegmentation', 'Data2VecVisionModel', 'Data2VecVisionPreTrainedModel', ] if is_tf_available(): SCREAMING_SNAKE_CASE_ = [ 'TFData2VecVisionForImageClassification', 'TFData2VecVisionForSemanticSegmentation', 'TFData2VecVisionModel', 'TFData2VecVisionPreTrainedModel', ] if TYPE_CHECKING: from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig from .configuration_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecTextConfig, DataaVecTextOnnxConfig, ) from .configuration_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecVisionConfig, DataaVecVisionOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dataavec_audio import ( DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecAudioForAudioFrameClassification, DataaVecAudioForCTC, DataaVecAudioForSequenceClassification, DataaVecAudioForXVector, DataaVecAudioModel, DataaVecAudioPreTrainedModel, ) from .modeling_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecTextForCausalLM, DataaVecTextForMaskedLM, DataaVecTextForMultipleChoice, DataaVecTextForQuestionAnswering, DataaVecTextForSequenceClassification, DataaVecTextForTokenClassification, DataaVecTextModel, DataaVecTextPreTrainedModel, ) from .modeling_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecVisionForImageClassification, DataaVecVisionForMaskedImageModeling, DataaVecVisionForSemanticSegmentation, DataaVecVisionModel, DataaVecVisionPreTrainedModel, ) if is_tf_available(): from .modeling_tf_dataavec_vision import ( TFDataaVecVisionForImageClassification, TFDataaVecVisionForSemanticSegmentation, TFDataaVecVisionModel, TFDataaVecVisionPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
34
from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class A_ : _UpperCAmelCase : int = XGLMConfig _UpperCAmelCase : List[Any] = {} _UpperCAmelCase : Tuple = '''gelu''' def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int]=1_4 ,SCREAMING_SNAKE_CASE__ : Tuple=7 ,SCREAMING_SNAKE_CASE__ : List[Any]=True ,SCREAMING_SNAKE_CASE__ : Any=True ,SCREAMING_SNAKE_CASE__ : Optional[Any]=True ,SCREAMING_SNAKE_CASE__ : Optional[int]=9_9 ,SCREAMING_SNAKE_CASE__ : str=3_2 ,SCREAMING_SNAKE_CASE__ : Tuple=2 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=4 ,SCREAMING_SNAKE_CASE__ : Tuple=3_7 ,SCREAMING_SNAKE_CASE__ : Tuple="gelu" ,SCREAMING_SNAKE_CASE__ : Any=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 ,SCREAMING_SNAKE_CASE__ : List[Any]=5_1_2 ,SCREAMING_SNAKE_CASE__ : str=0.02 ,): __lowerCamelCase : List[str] = parent __lowerCamelCase : List[str] = batch_size __lowerCamelCase : str = seq_length __lowerCamelCase : Optional[Any] = is_training __lowerCamelCase : Any = use_input_mask __lowerCamelCase : str = use_labels __lowerCamelCase : Any = vocab_size __lowerCamelCase : Dict = d_model __lowerCamelCase : int = num_hidden_layers __lowerCamelCase : List[Any] = num_attention_heads __lowerCamelCase : List[str] = ffn_dim __lowerCamelCase : Optional[Any] = activation_function __lowerCamelCase : Tuple = activation_dropout __lowerCamelCase : Union[str, Any] = attention_dropout __lowerCamelCase : List[str] = max_position_embeddings __lowerCamelCase : List[Any] = initializer_range __lowerCamelCase : Any = None __lowerCamelCase : List[str] = 0 __lowerCamelCase : List[str] = 2 __lowerCamelCase : Dict = 1 def lowerCAmelCase ( self : Any): return XGLMConfig.from_pretrained('facebook/xglm-564M') def lowerCAmelCase ( self : str): __lowerCamelCase : Any = tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size) ,clip_value_min=0 ,clip_value_max=3) __lowerCamelCase : Dict = None if self.use_input_mask: __lowerCamelCase : int = random_attention_mask([self.batch_size, self.seq_length]) __lowerCamelCase : int = self.get_config() __lowerCamelCase : Union[str, Any] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] ,2) return ( config, input_ids, input_mask, head_mask, ) def lowerCAmelCase ( self : List[Any]): return XGLMConfig( vocab_size=self.vocab_size ,d_model=self.hidden_size ,num_layers=self.num_hidden_layers ,attention_heads=self.num_attention_heads ,ffn_dim=self.ffn_dim ,activation_function=self.activation_function ,activation_dropout=self.activation_dropout ,attention_dropout=self.attention_dropout ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,use_cache=SCREAMING_SNAKE_CASE__ ,bos_token_id=self.bos_token_id ,eos_token_id=self.eos_token_id ,pad_token_id=self.pad_token_id ,return_dict=SCREAMING_SNAKE_CASE__ ,) def lowerCAmelCase ( self : int): __lowerCamelCase : int = self.prepare_config_and_inputs() ( ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ) : Any = config_and_inputs __lowerCamelCase : str = { 'input_ids': input_ids, 'head_mask': head_mask, } return config, inputs_dict @require_tf class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ): _UpperCAmelCase : str = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () _UpperCAmelCase : List[Any] = (TFXGLMForCausalLM,) if is_tf_available() else () _UpperCAmelCase : str = ( {'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {} ) _UpperCAmelCase : Tuple = False _UpperCAmelCase : Optional[int] = False _UpperCAmelCase : Union[str, Any] = False def lowerCAmelCase ( self : Tuple): __lowerCamelCase : Tuple = TFXGLMModelTester(self) __lowerCamelCase : int = ConfigTester(self ,config_class=SCREAMING_SNAKE_CASE__ ,n_embd=3_7) def lowerCAmelCase ( self : List[Any]): self.config_tester.run_common_tests() @slow def lowerCAmelCase ( self : str): for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase : Union[str, Any] = TFXGLMModel.from_pretrained(SCREAMING_SNAKE_CASE__) self.assertIsNotNone(SCREAMING_SNAKE_CASE__) @unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.') def lowerCAmelCase ( self : Union[str, Any]): super().test_resize_token_embeddings() @require_tf class A_ ( unittest.TestCase ): @slow def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=True): __lowerCamelCase : Any = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M') __lowerCamelCase : int = tf.convert_to_tensor([[2, 2_6_8, 9_8_6_5]] ,dtype=tf.intaa) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off __lowerCamelCase : Optional[Any] = [2, 2_6_8, 9_8_6_5, 6_7, 1_1, 1_9_8_8, 5_7_2_5_2, 9_8_6_5, 5, 9_8_4, 6_7, 1_9_8_8, 2_1_3_8_3_8, 1_6_5_8, 5_3, 7_0_4_4_6, 3_3, 6_6_5_7, 2_7_8, 1_5_8_1] # fmt: on __lowerCamelCase : int = model.generate(SCREAMING_SNAKE_CASE__ ,do_sample=SCREAMING_SNAKE_CASE__ ,num_beams=1) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist() ,SCREAMING_SNAKE_CASE__) @slow def lowerCAmelCase ( self : List[str]): __lowerCamelCase : Tuple = XGLMTokenizer.from_pretrained('facebook/xglm-564M') __lowerCamelCase : int = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M') tf.random.set_seed(0) __lowerCamelCase : Optional[Any] = tokenizer('Today is a nice day and' ,return_tensors='tf') __lowerCamelCase : List[Any] = tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(':/CPU:0'): __lowerCamelCase : Union[str, Any] = model.generate(SCREAMING_SNAKE_CASE__ ,do_sample=SCREAMING_SNAKE_CASE__ ,seed=[7, 0]) __lowerCamelCase : List[str] = tokenizer.decode(output_ids[0] ,skip_special_tokens=SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[int] = ( 'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due' ) self.assertEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) @slow def lowerCAmelCase ( self : Dict): __lowerCamelCase : Union[str, Any] = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M') __lowerCamelCase : Union[str, Any] = XGLMTokenizer.from_pretrained('facebook/xglm-564M') __lowerCamelCase : Union[str, Any] = 'left' # use different length sentences to test batching __lowerCamelCase : List[str] = [ 'This is an extremelly long sentence that only exists to test the ability of the model to cope with ' 'left-padding, such as in batched generation. The output for the sequence below should be the same ' 'regardless of whether left padding is applied or not. When', 'Hello, my dog is a little', ] __lowerCamelCase : List[Any] = tokenizer(SCREAMING_SNAKE_CASE__ ,return_tensors='tf' ,padding=SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[int] = inputs['input_ids'] __lowerCamelCase : Dict = model.generate(input_ids=SCREAMING_SNAKE_CASE__ ,attention_mask=inputs['attention_mask'] ,max_new_tokens=1_2) __lowerCamelCase : Tuple = tokenizer(sentences[0] ,return_tensors='tf').input_ids __lowerCamelCase : List[str] = model.generate(input_ids=SCREAMING_SNAKE_CASE__ ,max_new_tokens=1_2) __lowerCamelCase : Any = tokenizer(sentences[1] ,return_tensors='tf').input_ids __lowerCamelCase : List[Any] = model.generate(input_ids=SCREAMING_SNAKE_CASE__ ,max_new_tokens=1_2) __lowerCamelCase : int = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ ,skip_special_tokens=SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[Any] = tokenizer.decode(output_non_padded[0] ,skip_special_tokens=SCREAMING_SNAKE_CASE__) __lowerCamelCase : Any = tokenizer.decode(output_padded[0] ,skip_special_tokens=SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[int] = [ 'This is an extremelly long sentence that only exists to test the ability of the model to cope with ' 'left-padding, such as in batched generation. The output for the sequence below should be the same ' 'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be ' 'a single', 'Hello, my dog is a little bit of a shy one, but he is very friendly', ] self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) self.assertListEqual(SCREAMING_SNAKE_CASE__ ,[non_padded_sentence, padded_sentence])
652
0
import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy a_ :Optional[Any] = logging.getLogger(__name__) def a ( A__ , A__ , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = False , ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE__ : Union[str, Any] = bnb_quantization_config.load_in_abit SCREAMING_SNAKE_CASE__ : List[str] = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( '''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,''' ''' make sure you have the latest version of `bitsandbytes` installed.''' ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( '''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,''' '''make sure you have the latest version of `bitsandbytes` installed.''' ) SCREAMING_SNAKE_CASE__ : int = [] # custom device map if isinstance(A__ , A__ ) and len(device_map.keys() ) > 1: SCREAMING_SNAKE_CASE__ : List[Any] = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: SCREAMING_SNAKE_CASE__ : Optional[int] = get_keys_to_not_convert(A__ ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(A__ ) SCREAMING_SNAKE_CASE__ : Optional[int] = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: SCREAMING_SNAKE_CASE__ : List[Any] = [] SCREAMING_SNAKE_CASE__ : List[str] = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(A__ ) # compatibility with peft SCREAMING_SNAKE_CASE__ : Union[str, Any] = load_in_abit SCREAMING_SNAKE_CASE__ : int = load_in_abit SCREAMING_SNAKE_CASE__ : int = get_parameter_device(A__ ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( '''It is not recommended to quantize a loaded model. ''' '''The model should be instantiated under the `init_empty_weights` context manager.''' ) SCREAMING_SNAKE_CASE__ : List[str] = replace_with_bnb_layers(A__ , A__ , modules_to_not_convert=A__ ) # convert param to the right dtype SCREAMING_SNAKE_CASE__ : int = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: SCREAMING_SNAKE_CASE__ : int = name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' ) SCREAMING_SNAKE_CASE__ : List[str] = getattr(A__ , A__ , A__ ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(A__ ): param.to(A__ ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' ) logger.info( f"""The model device type is {model_device.type}. However, cuda is needed for quantization.""" '''We move the model to cuda.''' ) return model elif weights_location is None: raise RuntimeError( f"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ ) else: with init_empty_weights(): SCREAMING_SNAKE_CASE__ : List[Any] = replace_with_bnb_layers( A__ , A__ , modules_to_not_convert=A__ ) SCREAMING_SNAKE_CASE__ : List[Any] = get_quantized_model_device_map( A__ , A__ , A__ , max_memory=A__ , no_split_module_classes=A__ , ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): SCREAMING_SNAKE_CASE__ : Optional[int] = True SCREAMING_SNAKE_CASE__ : Any = any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] ) load_checkpoint_in_model( A__ , A__ , A__ , dtype=bnb_quantization_config.torch_dtype , offload_folder=A__ , offload_state_dict=A__ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , ) return dispatch_model(A__ , device_map=A__ , offload_dir=A__ ) def a ( A__ , A__ , A__=None , A__=None , A__=None ) -> Union[str, Any]: '''simple docstring''' if device_map is None: if torch.cuda.is_available(): SCREAMING_SNAKE_CASE__ : Dict = {'''''': torch.cuda.current_device()} else: raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' ) logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' ) if isinstance(A__ , A__ ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( '''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or ''' '''\'sequential\'.''' ) SCREAMING_SNAKE_CASE__ : List[Any] = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) SCREAMING_SNAKE_CASE__ : List[str] = {} SCREAMING_SNAKE_CASE__ : Dict = special_dtypes SCREAMING_SNAKE_CASE__ : Union[str, Any] = no_split_module_classes SCREAMING_SNAKE_CASE__ : Union[str, Any] = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": SCREAMING_SNAKE_CASE__ : int = get_balanced_memory( A__ , low_zero=(device_map == '''balanced_low_0''') , max_memory=A__ , **A__ , ) SCREAMING_SNAKE_CASE__ : str = max_memory SCREAMING_SNAKE_CASE__ : Dict = infer_auto_device_map(A__ , **A__ ) if isinstance(A__ , A__ ): # check if don't have any quantized module on the cpu SCREAMING_SNAKE_CASE__ : Optional[Any] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules SCREAMING_SNAKE_CASE__ : Optional[Any] = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( ''' Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit the quantized model. If you want to dispatch the model on the CPU or the disk while keeping these modules in `torch_dtype`, you need to pass a custom `device_map` to `load_and_quantize_model`. Check https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk for more details. ''' ) else: logger.info( '''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' ) del device_map_without_some_modules return device_map def a ( A__ , A__ , A__=None , A__=None ) -> str: '''simple docstring''' if modules_to_not_convert is None: SCREAMING_SNAKE_CASE__ : Optional[Any] = [] SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = _replace_with_bnb_layers( A__ , A__ , A__ , A__ ) if not has_been_replaced: logger.warning( '''You are loading your model in 8bit or 4bit but no linear modules were found in your model.''' ''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.''' ''' Please double check your model architecture, or submit an issue on github if you think this is''' ''' a bug.''' ) return model def a ( A__ , A__ , A__=None , A__=None , ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = False for name, module in model.named_children(): if current_key_name is None: SCREAMING_SNAKE_CASE__ : List[str] = [] current_key_name.append(A__ ) if isinstance(A__ , nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` SCREAMING_SNAKE_CASE__ : int = '''.'''.join(A__ ) SCREAMING_SNAKE_CASE__ : Dict = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: SCREAMING_SNAKE_CASE__ : Any = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: SCREAMING_SNAKE_CASE__ : Any = bnb.nn.LinearabitLt( module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=A__ , threshold=bnb_quantization_config.llm_inta_threshold , ) elif bnb_quantization_config.load_in_abit: SCREAMING_SNAKE_CASE__ : List[str] = bnb.nn.Linearabit( module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , ) else: raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' ) SCREAMING_SNAKE_CASE__ : Dict = module.weight.data if module.bias is not None: SCREAMING_SNAKE_CASE__ : Union[str, Any] = module.bias.data bnb_module.requires_grad_(A__ ) setattr(A__ , A__ , A__ ) SCREAMING_SNAKE_CASE__ : List[Any] = True if len(list(module.children() ) ) > 0: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = _replace_with_bnb_layers( A__ , A__ , A__ , A__ ) SCREAMING_SNAKE_CASE__ : int = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def a ( A__ ) -> str: '''simple docstring''' with init_empty_weights(): SCREAMING_SNAKE_CASE__ : Union[str, Any] = deepcopy(A__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager` SCREAMING_SNAKE_CASE__ : Dict = find_tied_parameters(A__ ) # For compatibility with Accelerate < 0.18 if isinstance(A__ , A__ ): SCREAMING_SNAKE_CASE__ : str = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: SCREAMING_SNAKE_CASE__ : List[str] = sum(A__ , [] ) SCREAMING_SNAKE_CASE__ : str = len(A__ ) > 0 # Check if it is a base model SCREAMING_SNAKE_CASE__ : Dict = False if hasattr(A__ , '''base_model_prefix''' ): SCREAMING_SNAKE_CASE__ : Optional[int] = not hasattr(A__ , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head SCREAMING_SNAKE_CASE__ : List[str] = list(model.named_children() ) SCREAMING_SNAKE_CASE__ : Dict = [list_modules[-1][0]] # add last module together with tied weights SCREAMING_SNAKE_CASE__ : Any = set(A__ ) - set(A__ ) SCREAMING_SNAKE_CASE__ : List[Any] = list(set(A__ ) ) + list(A__ ) # remove ".weight" from the keys SCREAMING_SNAKE_CASE__ : int = ['''.weight''', '''.bias'''] SCREAMING_SNAKE_CASE__ : Union[str, Any] = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: SCREAMING_SNAKE_CASE__ : List[str] = name.replace(A__ , '''''' ) filtered_module_names.append(A__ ) return filtered_module_names def a ( A__ ) -> Optional[Any]: '''simple docstring''' for m in model.modules(): if isinstance(A__ , bnb.nn.Linearabit ): return True return False def a ( A__ ) -> Optional[Any]: '''simple docstring''' return next(parameter.parameters() ).device def a ( A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> Tuple: '''simple docstring''' if fpaa_statistics is None: set_module_tensor_to_device(A__ , A__ , 0 , dtype=A__ , value=A__ ) SCREAMING_SNAKE_CASE__ : str = param_name SCREAMING_SNAKE_CASE__ : Dict = model if "." in tensor_name: SCREAMING_SNAKE_CASE__ : Optional[Any] = tensor_name.split('''.''' ) for split in splits[:-1]: SCREAMING_SNAKE_CASE__ : Union[str, Any] = getattr(A__ , A__ ) if new_module is None: raise ValueError(f"""{module} has no attribute {split}.""" ) SCREAMING_SNAKE_CASE__ : Tuple = new_module SCREAMING_SNAKE_CASE__ : Tuple = splits[-1] # offload weights SCREAMING_SNAKE_CASE__ : Union[str, Any] = False offload_weight(module._parameters[tensor_name] , A__ , A__ , index=A__ ) if hasattr(module._parameters[tensor_name] , '''SCB''' ): offload_weight( module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , A__ , index=A__ , ) else: offload_weight(A__ , A__ , A__ , index=A__ ) offload_weight(A__ , param_name.replace('''weight''' , '''SCB''' ) , A__ , index=A__ ) set_module_tensor_to_device(A__ , A__ , '''meta''' , dtype=A__ , value=torch.empty(*param.size() ) )
35
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline else: from .pipeline_kandinsky import KandinskyPipeline from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput from .text_encoder import MultilingualCLIP
652
0
import numpy as np import torch from torch.utils.data import DataLoader from accelerate.utils.dataclasses import DistributedType class _A : '''simple docstring''' def __init__( self ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=3 ,SCREAMING_SNAKE_CASE_=64 ,SCREAMING_SNAKE_CASE_=None ): '''simple docstring''' snake_case : List[Any] = np.random.default_rng(SCREAMING_SNAKE_CASE_ ) snake_case : Dict = length snake_case : Optional[Any] = rng.normal(size=(length,) ).astype(np.floataa ) snake_case : Dict = a * self.x + b + rng.normal(scale=0.1 ,size=(length,) ).astype(np.floataa ) def __len__( self ): '''simple docstring''' return self.length def __getitem__( self ,SCREAMING_SNAKE_CASE_ ): '''simple docstring''' return {"x": self.x[i], "y": self.y[i]} class _A ( torch.nn.Module ): '''simple docstring''' def __init__( self ,SCREAMING_SNAKE_CASE_=0 ,SCREAMING_SNAKE_CASE_=0 ,SCREAMING_SNAKE_CASE_=False ): '''simple docstring''' super().__init__() snake_case : List[Any] = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) snake_case : Optional[Any] = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) snake_case : Dict = True def snake_case_ ( self ,SCREAMING_SNAKE_CASE_=None ): '''simple docstring''' if self.first_batch: print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" ) snake_case : Tuple = False return x * self.a[0] + self.b[0] class _A ( torch.nn.Module ): '''simple docstring''' def __init__( self ,SCREAMING_SNAKE_CASE_=0 ,SCREAMING_SNAKE_CASE_=0 ,SCREAMING_SNAKE_CASE_=False ): '''simple docstring''' super().__init__() snake_case : Tuple = torch.nn.Parameter(torch.tensor(SCREAMING_SNAKE_CASE_ ).float() ) snake_case : Optional[Any] = torch.nn.Parameter(torch.tensor(SCREAMING_SNAKE_CASE_ ).float() ) snake_case : Dict = True def snake_case_ ( self ,SCREAMING_SNAKE_CASE_=None ): '''simple docstring''' if self.first_batch: print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" ) snake_case : int = False return x * self.a + self.b def lowercase ( __A : Dict , __A : int = 16 ) -> List[str]: '''simple docstring''' from datasets import load_dataset from transformers import AutoTokenizer snake_case : Tuple = AutoTokenizer.from_pretrained("""bert-base-cased""" ) snake_case : Tuple = {"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""} snake_case : List[str] = load_dataset("""csv""" , data_files=__A ) snake_case : str = datasets["""train"""].unique("""label""" ) snake_case : Any = {v: i for i, v in enumerate(__A )} def tokenize_function(__A : List[Any] ): # max_length=None => use the model max length (it's actually the default) snake_case : Tuple = tokenizer( examples["""sentence1"""] , examples["""sentence2"""] , truncation=__A , max_length=__A , padding="""max_length""" ) if "label" in examples: snake_case : str = [label_to_id[l] for l in examples["""label"""]] return outputs # Apply the method we just defined to all the examples in all the splits of the dataset snake_case : Dict = datasets.map( __A , batched=__A , remove_columns=["""sentence1""", """sentence2""", """label"""] , ) def collate_fn(__A : Dict ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(__A , padding="""max_length""" , max_length=128 , return_tensors="""pt""" ) return tokenizer.pad(__A , padding="""longest""" , return_tensors="""pt""" ) # Instantiate dataloaders. snake_case : str = DataLoader(tokenized_datasets["""train"""] , shuffle=__A , collate_fn=__A , batch_size=2 ) snake_case : Union[str, Any] = DataLoader(tokenized_datasets["""validation"""] , shuffle=__A , collate_fn=__A , batch_size=1 ) return train_dataloader, eval_dataloader
36
from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig a =logging.get_logger(__name__) # General docstring a ="""RegNetConfig""" # Base docstring a ="""facebook/regnet-y-040""" a =[1, 1088, 7, 7] # Image classification docstring a ="""facebook/regnet-y-040""" a ="""tabby, tabby cat""" a =[ """facebook/regnet-y-040""", # See all regnet models at https://huggingface.co/models?filter=regnet ] class A_ ( tf.keras.layers.Layer ): def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int = 3 ,SCREAMING_SNAKE_CASE__ : int = 1 ,SCREAMING_SNAKE_CASE__ : int = 1 ,SCREAMING_SNAKE_CASE__ : Optional[str] = "relu" ,**SCREAMING_SNAKE_CASE__ : Optional[int] ,): super().__init__(**SCREAMING_SNAKE_CASE__) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb __lowerCamelCase : Optional[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2) __lowerCamelCase : Union[str, Any] = tf.keras.layers.ConvaD( filters=SCREAMING_SNAKE_CASE__ ,kernel_size=SCREAMING_SNAKE_CASE__ ,strides=SCREAMING_SNAKE_CASE__ ,padding='VALID' ,groups=SCREAMING_SNAKE_CASE__ ,use_bias=SCREAMING_SNAKE_CASE__ ,name='convolution' ,) __lowerCamelCase : int = tf.keras.layers.BatchNormalization(epsilon=1E-5 ,momentum=0.9 ,name='normalization') __lowerCamelCase : Union[str, Any] = ACTaFN[activation] if activation is not None else tf.identity def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : List[str]): __lowerCamelCase : List[Any] = self.convolution(self.padding(SCREAMING_SNAKE_CASE__)) __lowerCamelCase : Union[str, Any] = self.normalization(SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[Any] = self.activation(SCREAMING_SNAKE_CASE__) return hidden_state class A_ ( tf.keras.layers.Layer ): def __init__( self : str ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,**SCREAMING_SNAKE_CASE__ : Dict): super().__init__(**SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[int] = config.num_channels __lowerCamelCase : Dict = TFRegNetConvLayer( out_channels=config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act ,name='embedder' ,) def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[Any]): __lowerCamelCase : Optional[int] = shape_list(SCREAMING_SNAKE_CASE__)[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( 'Make sure that the channel dimension of the pixel values match with the one set in the configuration.') # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) __lowerCamelCase : Optional[int] = tf.transpose(SCREAMING_SNAKE_CASE__ ,perm=(0, 2, 3, 1)) __lowerCamelCase : List[Any] = self.embedder(SCREAMING_SNAKE_CASE__) return hidden_state class A_ ( tf.keras.layers.Layer ): def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int = 2 ,**SCREAMING_SNAKE_CASE__ : Tuple): super().__init__(**SCREAMING_SNAKE_CASE__) __lowerCamelCase : Any = tf.keras.layers.ConvaD( filters=SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,strides=SCREAMING_SNAKE_CASE__ ,use_bias=SCREAMING_SNAKE_CASE__ ,name='convolution') __lowerCamelCase : Optional[Any] = tf.keras.layers.BatchNormalization(epsilon=1E-5 ,momentum=0.9 ,name='normalization') def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : tf.Tensor ,SCREAMING_SNAKE_CASE__ : bool = False): return self.normalization(self.convolution(SCREAMING_SNAKE_CASE__) ,training=SCREAMING_SNAKE_CASE__) class A_ ( tf.keras.layers.Layer ): def __init__( self : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : Any): super().__init__(**SCREAMING_SNAKE_CASE__) __lowerCamelCase : int = tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE__ ,name='pooler') __lowerCamelCase : Dict = [ tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation='relu' ,name='attention.0'), tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation='sigmoid' ,name='attention.2'), ] def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int): # [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels] __lowerCamelCase : Optional[Any] = self.pooler(SCREAMING_SNAKE_CASE__) for layer_module in self.attention: __lowerCamelCase : Any = layer_module(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[int] = hidden_state * pooled return hidden_state class A_ ( tf.keras.layers.Layer ): def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int = 1 ,**SCREAMING_SNAKE_CASE__ : List[Any]): super().__init__(**SCREAMING_SNAKE_CASE__) __lowerCamelCase : Tuple = in_channels != out_channels or stride != 1 __lowerCamelCase : Union[str, Any] = max(1 ,out_channels // config.groups_width) __lowerCamelCase : Dict = ( TFRegNetShortCut(SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,name='shortcut') if should_apply_shortcut else tf.keras.layers.Activation('linear' ,name='shortcut') ) # `self.layers` instead of `self.layer` because that is a reserved argument. __lowerCamelCase : Optional[int] = [ TFRegNetConvLayer(SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation=config.hidden_act ,name='layer.0'), TFRegNetConvLayer( SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,groups=SCREAMING_SNAKE_CASE__ ,activation=config.hidden_act ,name='layer.1'), TFRegNetConvLayer(SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation=SCREAMING_SNAKE_CASE__ ,name='layer.2'), ] __lowerCamelCase : Dict = ACTaFN[config.hidden_act] def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Optional[int]): __lowerCamelCase : int = hidden_state for layer_module in self.layers: __lowerCamelCase : List[str] = layer_module(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[int] = self.shortcut(SCREAMING_SNAKE_CASE__) hidden_state += residual __lowerCamelCase : int = self.activation(SCREAMING_SNAKE_CASE__) return hidden_state class A_ ( tf.keras.layers.Layer ): def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int = 1 ,**SCREAMING_SNAKE_CASE__ : List[str]): super().__init__(**SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[Any] = in_channels != out_channels or stride != 1 __lowerCamelCase : Tuple = max(1 ,out_channels // config.groups_width) __lowerCamelCase : int = ( TFRegNetShortCut(SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,name='shortcut') if should_apply_shortcut else tf.keras.layers.Activation('linear' ,name='shortcut') ) __lowerCamelCase : Optional[int] = [ TFRegNetConvLayer(SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation=config.hidden_act ,name='layer.0'), TFRegNetConvLayer( SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,groups=SCREAMING_SNAKE_CASE__ ,activation=config.hidden_act ,name='layer.1'), TFRegNetSELayer(SCREAMING_SNAKE_CASE__ ,reduced_channels=int(round(in_channels / 4)) ,name='layer.2'), TFRegNetConvLayer(SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation=SCREAMING_SNAKE_CASE__ ,name='layer.3'), ] __lowerCamelCase : List[Any] = ACTaFN[config.hidden_act] def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : str): __lowerCamelCase : Optional[int] = hidden_state for layer_module in self.layers: __lowerCamelCase : Dict = layer_module(SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[Any] = self.shortcut(SCREAMING_SNAKE_CASE__) hidden_state += residual __lowerCamelCase : Any = self.activation(SCREAMING_SNAKE_CASE__) return hidden_state class A_ ( tf.keras.layers.Layer ): def __init__( self : int ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int = 2 ,SCREAMING_SNAKE_CASE__ : int = 2 ,**SCREAMING_SNAKE_CASE__ : Tuple): super().__init__(**SCREAMING_SNAKE_CASE__) __lowerCamelCase : Any = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer __lowerCamelCase : Tuple = [ # downsampling is done in the first layer with stride of 2 layer(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,name='layers.0'), *[layer(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,name=F"layers.{i+1}") for i in range(depth - 1)], ] def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : List[Any]): for layer_module in self.layers: __lowerCamelCase : Any = layer_module(SCREAMING_SNAKE_CASE__) return hidden_state class A_ ( tf.keras.layers.Layer ): def __init__( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,**SCREAMING_SNAKE_CASE__ : Any): super().__init__(**SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[Any] = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( SCREAMING_SNAKE_CASE__ ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,name='stages.0' ,)) __lowerCamelCase : Optional[int] = zip(config.hidden_sizes ,config.hidden_sizes[1:]) for i, ((in_channels, out_channels), depth) in enumerate(zip(SCREAMING_SNAKE_CASE__ ,config.depths[1:])): self.stages.append(TFRegNetStage(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,depth=SCREAMING_SNAKE_CASE__ ,name=F"stages.{i+1}")) def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : tf.Tensor ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = True): __lowerCamelCase : Optional[Any] = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: __lowerCamelCase : Optional[Any] = hidden_states + (hidden_state,) __lowerCamelCase : str = stage_module(SCREAMING_SNAKE_CASE__) if output_hidden_states: __lowerCamelCase : Tuple = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None) return TFBaseModelOutputWithNoAttention(last_hidden_state=SCREAMING_SNAKE_CASE__ ,hidden_states=SCREAMING_SNAKE_CASE__) @keras_serializable class A_ ( tf.keras.layers.Layer ): _UpperCAmelCase : List[Any] = RegNetConfig def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : Optional[int]): super().__init__(**SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[Any] = config __lowerCamelCase : Optional[int] = TFRegNetEmbeddings(SCREAMING_SNAKE_CASE__ ,name='embedder') __lowerCamelCase : Union[str, Any] = TFRegNetEncoder(SCREAMING_SNAKE_CASE__ ,name='encoder') __lowerCamelCase : int = tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE__ ,name='pooler') @unpack_inputs def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : tf.Tensor ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : bool = False ,): __lowerCamelCase : Dict = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __lowerCamelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict __lowerCamelCase : Union[str, Any] = self.embedder(SCREAMING_SNAKE_CASE__ ,training=SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[Any] = self.encoder( SCREAMING_SNAKE_CASE__ ,output_hidden_states=SCREAMING_SNAKE_CASE__ ,return_dict=SCREAMING_SNAKE_CASE__ ,training=SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[int] = encoder_outputs[0] __lowerCamelCase : int = self.pooler(SCREAMING_SNAKE_CASE__) # Change to NCHW output format have uniformity in the modules __lowerCamelCase : Union[str, Any] = tf.transpose(SCREAMING_SNAKE_CASE__ ,perm=(0, 3, 1, 2)) __lowerCamelCase : str = tf.transpose(SCREAMING_SNAKE_CASE__ ,perm=(0, 3, 1, 2)) # Change the other hidden state outputs to NCHW as well if output_hidden_states: __lowerCamelCase : Union[str, Any] = tuple([tf.transpose(SCREAMING_SNAKE_CASE__ ,perm=(0, 3, 1, 2)) for h in encoder_outputs[1]]) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=SCREAMING_SNAKE_CASE__ ,pooler_output=SCREAMING_SNAKE_CASE__ ,hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states ,) class A_ ( SCREAMING_SNAKE_CASE ): _UpperCAmelCase : Any = RegNetConfig _UpperCAmelCase : Optional[int] = '''regnet''' _UpperCAmelCase : List[Any] = '''pixel_values''' @property def lowerCAmelCase ( self : int): return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) ,dtype=tf.floataa)} a =r""" Parameters: This model is a Tensorflow [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and behavior. config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. """ a =r""" Args: pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConveNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( '''The bare RegNet model outputting raw features without any specific head on top.''' , SCREAMING_SNAKE_CASE , ) class A_ ( SCREAMING_SNAKE_CASE ): def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,*SCREAMING_SNAKE_CASE__ : Tuple ,**SCREAMING_SNAKE_CASE__ : Tuple): super().__init__(SCREAMING_SNAKE_CASE__ ,*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[str] = TFRegNetMainLayer(SCREAMING_SNAKE_CASE__ ,name='regnet') @unpack_inputs @add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC ,output_type=SCREAMING_SNAKE_CASE__ ,config_class=_CONFIG_FOR_DOC ,modality='vision' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,) def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : tf.Tensor ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : int=False ,): __lowerCamelCase : List[Any] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __lowerCamelCase : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict __lowerCamelCase : Tuple = self.regnet( pixel_values=SCREAMING_SNAKE_CASE__ ,output_hidden_states=SCREAMING_SNAKE_CASE__ ,return_dict=SCREAMING_SNAKE_CASE__ ,training=SCREAMING_SNAKE_CASE__ ,) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state ,pooler_output=outputs.pooler_output ,hidden_states=outputs.hidden_states ,) @add_start_docstrings( ''' RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. ''' , SCREAMING_SNAKE_CASE , ) class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,*SCREAMING_SNAKE_CASE__ : List[str] ,**SCREAMING_SNAKE_CASE__ : str): super().__init__(SCREAMING_SNAKE_CASE__ ,*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__) __lowerCamelCase : Union[str, Any] = config.num_labels __lowerCamelCase : Union[str, Any] = TFRegNetMainLayer(SCREAMING_SNAKE_CASE__ ,name='regnet') # classification head __lowerCamelCase : Optional[Any] = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels ,name='classifier.1') if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=SCREAMING_SNAKE_CASE__ ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,) def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : tf.Tensor = None ,SCREAMING_SNAKE_CASE__ : tf.Tensor = None ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : Any=False ,): __lowerCamelCase : Any = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __lowerCamelCase : Tuple = return_dict if return_dict is not None else self.config.use_return_dict __lowerCamelCase : str = self.regnet( SCREAMING_SNAKE_CASE__ ,output_hidden_states=SCREAMING_SNAKE_CASE__ ,return_dict=SCREAMING_SNAKE_CASE__ ,training=SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[Any] = outputs.pooler_output if return_dict else outputs[1] __lowerCamelCase : Optional[Any] = self.classifier[0](SCREAMING_SNAKE_CASE__) __lowerCamelCase : Dict = self.classifier[1](SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[str] = None if labels is None else self.hf_compute_loss(labels=SCREAMING_SNAKE_CASE__ ,logits=SCREAMING_SNAKE_CASE__) if not return_dict: __lowerCamelCase : Union[str, Any] = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=SCREAMING_SNAKE_CASE__ ,logits=SCREAMING_SNAKE_CASE__ ,hidden_states=outputs.hidden_states)
652
0
import copy import os from typing import TYPE_CHECKING, List, Union if TYPE_CHECKING: pass from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase : Tuple = logging.get_logger(__name__) UpperCamelCase : str = { """kakaobrain/align-base""": """https://huggingface.co/kakaobrain/align-base/resolve/main/config.json""", } class A__ ( A__ ): """simple docstring""" _lowercase = 'align_text_model' def __init__( self : Dict , lowerCamelCase__ : Dict=30_522 , lowerCamelCase__ : Optional[int]=768 , lowerCamelCase__ : Tuple=12 , lowerCamelCase__ : Tuple=12 , lowerCamelCase__ : List[str]=3_072 , lowerCamelCase__ : Union[str, Any]="gelu" , lowerCamelCase__ : str=0.1 , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : Any=512 , lowerCamelCase__ : Optional[int]=2 , lowerCamelCase__ : Optional[int]=0.02 , lowerCamelCase__ : Optional[Any]=1E-12 , lowerCamelCase__ : Dict=0 , lowerCamelCase__ : Optional[int]="absolute" , lowerCamelCase__ : List[Any]=True , **lowerCamelCase__ : Optional[Any] , ): super().__init__(**lowerCamelCase__ ) a__ : Optional[Any] = vocab_size a__ : Optional[int] = hidden_size a__ : str = num_hidden_layers a__ : int = num_attention_heads a__ : Any = hidden_act a__ : str = intermediate_size a__ : List[Any] = hidden_dropout_prob a__ : str = attention_probs_dropout_prob a__ : List[Any] = max_position_embeddings a__ : List[Any] = type_vocab_size a__ : Optional[int] = initializer_range a__ : int = layer_norm_eps a__ : Optional[Any] = position_embedding_type a__ : Dict = use_cache a__ : Optional[int] = pad_token_id @classmethod def _UpperCamelCase( cls : int , lowerCamelCase__ : Union[str, os.PathLike] , **lowerCamelCase__ : List[str] ): cls._set_token_in_kwargs(lowerCamelCase__ ) a__, a__ : Dict = cls.get_config_dict(lowerCamelCase__ , **lowerCamelCase__ ) # get the text config dict if we are loading from AlignConfig if config_dict.get("model_type" ) == "align": a__ : int = config_dict["text_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(lowerCamelCase__ , **lowerCamelCase__ ) class A__ ( A__ ): """simple docstring""" _lowercase = 'align_vision_model' def __init__( self : List[Any] , lowerCamelCase__ : int = 3 , lowerCamelCase__ : int = 600 , lowerCamelCase__ : float = 2.0 , lowerCamelCase__ : float = 3.1 , lowerCamelCase__ : int = 8 , lowerCamelCase__ : List[int] = [3, 3, 5, 3, 5, 5, 3] , lowerCamelCase__ : List[int] = [32, 16, 24, 40, 80, 112, 192] , lowerCamelCase__ : List[int] = [16, 24, 40, 80, 112, 192, 320] , lowerCamelCase__ : List[int] = [] , lowerCamelCase__ : List[int] = [1, 2, 2, 2, 1, 2, 1] , lowerCamelCase__ : List[int] = [1, 2, 2, 3, 3, 4, 1] , lowerCamelCase__ : List[int] = [1, 6, 6, 6, 6, 6, 6] , lowerCamelCase__ : float = 0.25 , lowerCamelCase__ : str = "swish" , lowerCamelCase__ : int = 2_560 , lowerCamelCase__ : str = "mean" , lowerCamelCase__ : float = 0.02 , lowerCamelCase__ : float = 0.001 , lowerCamelCase__ : float = 0.99 , lowerCamelCase__ : float = 0.2 , **lowerCamelCase__ : List[str] , ): super().__init__(**lowerCamelCase__ ) a__ : Union[str, Any] = num_channels a__ : List[Any] = image_size a__ : int = width_coefficient a__ : int = depth_coefficient a__ : Union[str, Any] = depth_divisor a__ : Optional[Any] = kernel_sizes a__ : Union[str, Any] = in_channels a__ : Optional[Any] = out_channels a__ : Any = depthwise_padding a__ : Any = strides a__ : int = num_block_repeats a__ : List[Any] = expand_ratios a__ : int = squeeze_expansion_ratio a__ : Optional[Any] = hidden_act a__ : List[str] = hidden_dim a__ : List[str] = pooling_type a__ : List[str] = initializer_range a__ : Union[str, Any] = batch_norm_eps a__ : int = batch_norm_momentum a__ : List[str] = drop_connect_rate a__ : Dict = sum(lowerCamelCase__ ) * 4 @classmethod def _UpperCamelCase( cls : Dict , lowerCamelCase__ : Union[str, os.PathLike] , **lowerCamelCase__ : Dict ): cls._set_token_in_kwargs(lowerCamelCase__ ) a__, a__ : Union[str, Any] = cls.get_config_dict(lowerCamelCase__ , **lowerCamelCase__ ) # get the vision config dict if we are loading from AlignConfig if config_dict.get("model_type" ) == "align": a__ : List[str] = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(lowerCamelCase__ , **lowerCamelCase__ ) class A__ ( A__ ): """simple docstring""" _lowercase = 'align' _lowercase = True def __init__( self : str , lowerCamelCase__ : Optional[int]=None , lowerCamelCase__ : Any=None , lowerCamelCase__ : Dict=640 , lowerCamelCase__ : Tuple=1.0 , lowerCamelCase__ : Any=0.02 , **lowerCamelCase__ : Optional[int] , ): super().__init__(**lowerCamelCase__ ) if text_config is None: a__ : Optional[int] = {} logger.info("text_config is None. Initializing the AlignTextConfig with default values." ) if vision_config is None: a__ : Union[str, Any] = {} logger.info("vision_config is None. Initializing the AlignVisionConfig with default values." ) a__ : Optional[Any] = AlignTextConfig(**lowerCamelCase__ ) a__ : str = AlignVisionConfig(**lowerCamelCase__ ) a__ : Dict = projection_dim a__ : List[str] = temperature_init_value a__ : Dict = initializer_range @classmethod def _UpperCamelCase( cls : Tuple , lowerCamelCase__ : AlignTextConfig , lowerCamelCase__ : AlignVisionConfig , **lowerCamelCase__ : List[str] ): return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowerCamelCase__ ) def _UpperCamelCase( self : Any ): a__ : int = copy.deepcopy(self.__dict__ ) a__ : int = self.text_config.to_dict() a__ : Tuple = self.vision_config.to_dict() a__ : Optional[Any] = self.__class__.model_type return output
37
import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # Sentinels a =object() # For specifying empty leaf dict `{}` a =object() def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> int: __lowerCamelCase : Tuple = tuple((re.compile(x + '$' ) for x in qs) ) for i in range(len(lowerCamelCase__ ) - len(lowerCamelCase__ ) + 1 ): __lowerCamelCase : str = [x.match(lowerCamelCase__ ) for x, y in zip(lowerCamelCase__ , ks[i:] )] if matches and all(lowerCamelCase__ ): return True return False def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple: def replace(lowerCamelCase__ , lowerCamelCase__ ): for rule, replacement in rules: if _match(lowerCamelCase__ , lowerCamelCase__ ): return replacement return val return replace def SCREAMING_SNAKE_CASE__ ( ) -> str: return [ # embeddings (("transformer", "wpe", "embedding"), P('mp' , lowerCamelCase__ )), (("transformer", "wte", "embedding"), P('mp' , lowerCamelCase__ )), # atention (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(lowerCamelCase__ , 'mp' )), (("attention", "out_proj", "kernel"), P('mp' , lowerCamelCase__ )), (("attention", "out_proj", "bias"), None), # mlp (("mlp", "c_fc", "kernel"), P(lowerCamelCase__ , 'mp' )), (("mlp", "c_fc", "bias"), P('mp' )), (("mlp", "c_proj", "kernel"), P('mp' , lowerCamelCase__ )), (("mlp", "c_proj", "bias"), None), # layer norms ((r"ln_\d+", "bias"), None), ((r"\d+", r"ln_\d+", "scale"), None), (("ln_f", "bias"), None), (("ln_f", "scale"), None), ] def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str: __lowerCamelCase : List[str] = _get_partition_rules() __lowerCamelCase : Optional[Any] = _replacement_rules(lowerCamelCase__ ) __lowerCamelCase : Tuple = {k: _unmatched for k in flatten_dict(lowerCamelCase__ )} __lowerCamelCase : List[Any] = {k: replace(lowerCamelCase__ , lowerCamelCase__ ) for k, v in initd.items()} assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(lowerCamelCase__ ) )
652
0
'''simple docstring''' A_ : Union[str, Any] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] A_ : str = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] A_ : Tuple = { 0: "Sunday", 1: "Monday", 2: "Tuesday", 3: "Wednesday", 4: "Thursday", 5: "Friday", 6: "Saturday", } def UpperCamelCase__ ( __magic_name__ : int , __magic_name__ : int , __magic_name__ : int ) -> str: '''simple docstring''' assert len(str(__magic_name__ ) ) > 2, "year should be in YYYY format" assert 1 <= month <= 12, "month should be between 1 to 12" assert 1 <= day <= 31, "day should be between 1 to 31" # Doomsday algorithm: snake_case__ : Optional[Any] = year // 1_00 snake_case__ : int = (5 * (century % 4) + 2) % 7 snake_case__ : Optional[Any] = year % 1_00 snake_case__ : List[str] = centurian % 12 snake_case__ : Tuple = ( (centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor ) % 7 snake_case__ : List[Any] = ( DOOMSDAY_NOT_LEAP[month - 1] if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0) else DOOMSDAY_LEAP[month - 1] ) snake_case__ : Optional[Any] = (dooms_day + day - day_anchor) % 7 return WEEK_DAY_NAMES[week_day] if __name__ == "__main__": import doctest doctest.testmod()
38
import math def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> list: __lowerCamelCase : Union[str, Any] = [True] * n __lowerCamelCase : List[Any] = False __lowerCamelCase : int = False __lowerCamelCase : Any = True for i in range(3 , int(n**0.5 + 1 ) , 2 ): __lowerCamelCase : List[str] = i * 2 while index < n: __lowerCamelCase : Optional[int] = False __lowerCamelCase : List[Any] = index + i __lowerCamelCase : Optional[Any] = [2] for i in range(3 , lowerCamelCase__ , 2 ): if is_prime[i]: primes.append(lowerCamelCase__ ) return primes def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = 9_9_9_9_6_6_6_6_3_3_3_3 ) -> int: __lowerCamelCase : Optional[Any] = math.floor(math.sqrt(lowerCamelCase__ ) ) + 1_0_0 __lowerCamelCase : Dict = prime_sieve(lowerCamelCase__ ) __lowerCamelCase : Tuple = 0 __lowerCamelCase : Dict = 0 __lowerCamelCase : Any = primes[prime_index] while (last_prime**2) <= limit: __lowerCamelCase : Any = primes[prime_index + 1] __lowerCamelCase : Optional[Any] = last_prime**2 __lowerCamelCase : Dict = next_prime**2 # Get numbers divisible by lps(current) __lowerCamelCase : Tuple = lower_bound + last_prime while upper_bound > current <= limit: matches_sum += current current += last_prime # Reset the upper_bound while (upper_bound - next_prime) > limit: upper_bound -= next_prime # Add the numbers divisible by ups(current) __lowerCamelCase : Any = upper_bound - next_prime while current > lower_bound: matches_sum += current current -= next_prime # Remove the numbers divisible by both ups and lps __lowerCamelCase : List[Any] = 0 while upper_bound > current <= limit: if current <= lower_bound: # Increment the current number current += last_prime * next_prime continue if current > limit: break # Remove twice since it was added by both ups and lps matches_sum -= current * 2 # Increment the current number current += last_prime * next_prime # Setup for next pair __lowerCamelCase : Dict = next_prime prime_index += 1 return matches_sum if __name__ == "__main__": print(solution())
652
0
import logging from transformers.configuration_utils import PretrainedConfig lowerCAmelCase_ = logging.getLogger(__name__) class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = "masked_bert" def __init__( self : Optional[Any] , _UpperCamelCase : Tuple=3_0_5_2_2 , _UpperCamelCase : str=7_6_8 , _UpperCamelCase : Tuple=1_2 , _UpperCamelCase : List[Any]=1_2 , _UpperCamelCase : Union[str, Any]=3_0_7_2 , _UpperCamelCase : Optional[int]="gelu" , _UpperCamelCase : int=0.1 , _UpperCamelCase : Dict=0.1 , _UpperCamelCase : Optional[int]=5_1_2 , _UpperCamelCase : int=2 , _UpperCamelCase : List[str]=0.02 , _UpperCamelCase : int=1e-12 , _UpperCamelCase : List[str]=0 , _UpperCamelCase : int="topK" , _UpperCamelCase : Dict="constant" , _UpperCamelCase : Tuple=0.0 , **_UpperCamelCase : Any , ) ->Dict: super().__init__(pad_token_id=_UpperCamelCase , **_UpperCamelCase ) snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = hidden_act snake_case_ = intermediate_size snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = max_position_embeddings snake_case_ = type_vocab_size snake_case_ = initializer_range snake_case_ = layer_norm_eps snake_case_ = pruning_method snake_case_ = mask_init snake_case_ = mask_scale
39
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin, SchedulerOutput @dataclass class A_ ( SCREAMING_SNAKE_CASE ): _UpperCAmelCase : torch.FloatTensor _UpperCAmelCase : torch.FloatTensor class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): _UpperCAmelCase : Dict = 1 @register_to_config def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : int = 2_0_0_0 ,SCREAMING_SNAKE_CASE__ : float = 0.15 ,SCREAMING_SNAKE_CASE__ : float = 0.01 ,SCREAMING_SNAKE_CASE__ : float = 1348.0 ,SCREAMING_SNAKE_CASE__ : float = 1E-5 ,SCREAMING_SNAKE_CASE__ : int = 1 ,): # standard deviation of the initial noise distribution __lowerCamelCase : int = sigma_max # setable values __lowerCamelCase : List[str] = None self.set_sigmas(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : Optional[int] = None): return sample def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : float = None ,SCREAMING_SNAKE_CASE__ : Union[str, torch.device] = None): __lowerCamelCase : Dict = sampling_eps if sampling_eps is not None else self.config.sampling_eps __lowerCamelCase : Optional[int] = torch.linspace(1 ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,device=SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : float = None ,SCREAMING_SNAKE_CASE__ : float = None ,SCREAMING_SNAKE_CASE__ : float = None): __lowerCamelCase : Optional[int] = sigma_min if sigma_min is not None else self.config.sigma_min __lowerCamelCase : Optional[int] = sigma_max if sigma_max is not None else self.config.sigma_max __lowerCamelCase : Any = sampling_eps if sampling_eps is not None else self.config.sampling_eps if self.timesteps is None: self.set_timesteps(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[int] = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps) __lowerCamelCase : Optional[Any] = torch.exp(torch.linspace(math.log(SCREAMING_SNAKE_CASE__) ,math.log(SCREAMING_SNAKE_CASE__) ,SCREAMING_SNAKE_CASE__)) __lowerCamelCase : str = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps]) def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : List[str]): return torch.where( timesteps == 0 ,torch.zeros_like(t.to(timesteps.device)) ,self.discrete_sigmas[timesteps - 1].to(timesteps.device) ,) def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : Optional[torch.Generator] = None ,SCREAMING_SNAKE_CASE__ : bool = True ,): if self.timesteps is None: raise ValueError( '`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler') __lowerCamelCase : List[str] = timestep * torch.ones( sample.shape[0] ,device=sample.device) # torch.repeat_interleave(timestep, sample.shape[0]) __lowerCamelCase : str = (timestep * (len(self.timesteps) - 1)).long() # mps requires indices to be in the same device, so we use cpu as is the default with cuda __lowerCamelCase : Dict = timesteps.to(self.discrete_sigmas.device) __lowerCamelCase : Optional[Any] = self.discrete_sigmas[timesteps].to(sample.device) __lowerCamelCase : Optional[Any] = self.get_adjacent_sigma(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__).to(sample.device) __lowerCamelCase : int = torch.zeros_like(SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[Any] = (sigma**2 - adjacent_sigma**2) ** 0.5 # equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x) # also equation 47 shows the analog from SDE models to ancestral sampling methods __lowerCamelCase : int = diffusion.flatten() while len(diffusion.shape) < len(sample.shape): __lowerCamelCase : List[Any] = diffusion.unsqueeze(-1) __lowerCamelCase : Any = drift - diffusion**2 * model_output # equation 6: sample noise for the diffusion term of __lowerCamelCase : int = randn_tensor( sample.shape ,layout=sample.layout ,generator=SCREAMING_SNAKE_CASE__ ,device=sample.device ,dtype=sample.dtype) __lowerCamelCase : Optional[int] = sample - drift # subtract because `dt` is a small negative timestep # TODO is the variable diffusion the correct scaling term for the noise? __lowerCamelCase : Union[str, Any] = prev_sample_mean + diffusion * noise # add impact of diffusion field g if not return_dict: return (prev_sample, prev_sample_mean) return SdeVeOutput(prev_sample=SCREAMING_SNAKE_CASE__ ,prev_sample_mean=SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : Optional[torch.Generator] = None ,SCREAMING_SNAKE_CASE__ : bool = True ,): if self.timesteps is None: raise ValueError( '`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler') # For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z" # sample noise for correction __lowerCamelCase : Optional[int] = randn_tensor(sample.shape ,layout=sample.layout ,generator=SCREAMING_SNAKE_CASE__).to(sample.device) # compute step size from the model_output, the noise, and the snr __lowerCamelCase : str = torch.norm(model_output.reshape(model_output.shape[0] ,-1) ,dim=-1).mean() __lowerCamelCase : Tuple = torch.norm(noise.reshape(noise.shape[0] ,-1) ,dim=-1).mean() __lowerCamelCase : Tuple = (self.config.snr * noise_norm / grad_norm) ** 2 * 2 __lowerCamelCase : Optional[int] = step_size * torch.ones(sample.shape[0]).to(sample.device) # self.repeat_scalar(step_size, sample.shape[0]) # compute corrected sample: model_output term and noise term __lowerCamelCase : Union[str, Any] = step_size.flatten() while len(step_size.shape) < len(sample.shape): __lowerCamelCase : List[str] = step_size.unsqueeze(-1) __lowerCamelCase : str = sample + step_size * model_output __lowerCamelCase : Any = prev_sample_mean + ((step_size * 2) ** 0.5) * noise if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,): # Make sure sigmas and timesteps have the same device and dtype as original_samples __lowerCamelCase : int = timesteps.to(original_samples.device) __lowerCamelCase : Any = self.discrete_sigmas.to(original_samples.device)[timesteps] __lowerCamelCase : Optional[Any] = ( noise * sigmas[:, None, None, None] if noise is not None else torch.randn_like(SCREAMING_SNAKE_CASE__) * sigmas[:, None, None, None] ) __lowerCamelCase : str = noise + original_samples return noisy_samples def __len__( self : Optional[int]): return self.config.num_train_timesteps
652
0
from timeit import timeit __UpperCAmelCase = { '''MALAYALAM''': True, '''String''': False, '''rotor''': True, '''level''': True, '''A''': True, '''BB''': True, '''ABC''': False, '''amanaplanacanalpanama''': True, # "a man a plan a canal panama" } # Ensure our test data is valid assert all((key == key[::-1]) is value for key, value in test_data.items()) def UpperCamelCase ( snake_case__ : str ) -> bool: UpperCamelCase : str = 0 UpperCamelCase : Optional[int] = len(snake_case__ ) - 1 while start_i < end_i: if s[start_i] == s[end_i]: start_i += 1 end_i -= 1 else: return False return True def UpperCamelCase ( snake_case__ : str ) -> bool: UpperCamelCase : List[str] = len(snake_case__ ) // 2 UpperCamelCase : Optional[int] = len(snake_case__ ) # We need to traverse till half of the length of string # as we can get access of the i'th last element from # i'th index. # eg: [0,1,2,3,4,5] => 4th index can be accessed # with the help of 1st index (i==n-i-1) # where n is length of string return all(s[i] == s[n - i - 1] for i in range(snake_case__ ) ) def UpperCamelCase ( snake_case__ : str ) -> bool: if len(snake_case__ ) <= 2: return True if s[0] == s[len(snake_case__ ) - 1]: return is_palindrome_recursive(s[1:-1] ) else: return False def UpperCamelCase ( snake_case__ : str ) -> bool: return s == s[::-1] def UpperCamelCase ( snake_case__ : str ) -> None: UpperCamelCase : List[Any] = F"""all({name}(key) is value for key, value in test_data.items())""" UpperCamelCase : Optional[Any] = F"""from __main__ import test_data, {name}""" UpperCamelCase : Dict = 500000 UpperCamelCase : Optional[int] = timeit(stmt=snake_case__ , setup=snake_case__ , number=snake_case__ ) print(F"""{name:<35} finished {number:,} runs in {result:.5f} seconds""" ) if __name__ == "__main__": for key, value in test_data.items(): assert is_palindrome(key) is is_palindrome_recursive(key) assert is_palindrome(key) is is_palindrome_slice(key) print(F"""{key:21} {value}""") print('''a man a plan a canal panama''') # finished 500,000 runs in 0.46793 seconds benchmark_function('''is_palindrome_slice''') # finished 500,000 runs in 0.85234 seconds benchmark_function('''is_palindrome''') # finished 500,000 runs in 1.32028 seconds benchmark_function('''is_palindrome_recursive''') # finished 500,000 runs in 2.08679 seconds benchmark_function('''is_palindrome_traversal''')
40
import logging import numpy as np import pytest from scipy.linalg import eigh logging.basicConfig(level=logging.INFO, format="""%(message)s""") def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> np.ndarray: return input_array.reshape((input_array.size, 1) ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray: __lowerCamelCase : str = np.nan for i in range(lowerCamelCase__ ): __lowerCamelCase : int = features[:, labels == i] __lowerCamelCase : Optional[int] = data.mean(1 ) # Centralize the data of class i __lowerCamelCase : int = data - column_reshape(lowerCamelCase__ ) if i > 0: # If covariance_sum is not None covariance_sum += np.dot(lowerCamelCase__ , centered_data.T ) else: # If covariance_sum is np.nan (i.e. first loop) __lowerCamelCase : Union[str, Any] = np.dot(lowerCamelCase__ , centered_data.T ) return covariance_sum / features.shape[1] def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray: __lowerCamelCase : Optional[Any] = features.mean(1 ) __lowerCamelCase : Union[str, Any] = np.nan for i in range(lowerCamelCase__ ): __lowerCamelCase : Optional[Any] = features[:, labels == i] __lowerCamelCase : Union[str, Any] = data.shape[1] __lowerCamelCase : Union[str, Any] = data.mean(1 ) if i > 0: # If covariance_sum is not None covariance_sum += device_data * np.dot( column_reshape(lowerCamelCase__ ) - column_reshape(lowerCamelCase__ ) , (column_reshape(lowerCamelCase__ ) - column_reshape(lowerCamelCase__ )).T , ) else: # If covariance_sum is np.nan (i.e. first loop) __lowerCamelCase : List[str] = device_data * np.dot( column_reshape(lowerCamelCase__ ) - column_reshape(lowerCamelCase__ ) , (column_reshape(lowerCamelCase__ ) - column_reshape(lowerCamelCase__ )).T , ) return covariance_sum / features.shape[1] def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray: # Check if the features have been loaded if features.any(): __lowerCamelCase : Tuple = features.mean(1 ) # Center the dataset __lowerCamelCase : Any = features - np.reshape(lowerCamelCase__ , (data_mean.size, 1) ) __lowerCamelCase : Optional[int] = np.dot(lowerCamelCase__ , centered_data.T ) / features.shape[1] __lowerCamelCase , __lowerCamelCase : List[Any] = np.linalg.eigh(lowerCamelCase__ ) # Take all the columns in the reverse order (-1), and then takes only the first __lowerCamelCase : Dict = eigenvectors[:, ::-1][:, 0:dimensions] # Project the database on the new space __lowerCamelCase : int = np.dot(filtered_eigenvectors.T , lowerCamelCase__ ) logging.info('Principal Component Analysis computed' ) return projected_data else: logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=lowerCamelCase__ ) logging.error('Dataset empty' ) raise AssertionError def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray: assert classes > dimensions # Check if features have been already loaded if features.any: __lowerCamelCase , __lowerCamelCase : Dict = eigh( covariance_between_classes(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) , covariance_within_classes(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) , ) __lowerCamelCase : Union[str, Any] = eigenvectors[:, ::-1][:, :dimensions] __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = np.linalg.svd(lowerCamelCase__ ) __lowerCamelCase : int = svd_matrix[:, 0:dimensions] __lowerCamelCase : Optional[int] = np.dot(filtered_svd_matrix.T , lowerCamelCase__ ) logging.info('Linear Discriminant Analysis computed' ) return projected_data else: logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=lowerCamelCase__ ) logging.error('Dataset empty' ) raise AssertionError def SCREAMING_SNAKE_CASE__ ( ) -> None: # Create dummy dataset with 2 classes and 3 features __lowerCamelCase : Optional[int] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] ) __lowerCamelCase : Optional[int] = np.array([0, 0, 0, 1, 1] ) __lowerCamelCase : Optional[Any] = 2 __lowerCamelCase : Tuple = 2 # Assert that the function raises an AssertionError if dimensions > classes with pytest.raises(lowerCamelCase__ ) as error_info: __lowerCamelCase : int = linear_discriminant_analysis( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) if isinstance(lowerCamelCase__ , np.ndarray ): raise AssertionError( 'Did not raise AssertionError for dimensions > classes' ) assert error_info.type is AssertionError def SCREAMING_SNAKE_CASE__ ( ) -> None: __lowerCamelCase : Dict = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] ) __lowerCamelCase : Dict = 2 __lowerCamelCase : int = np.array([[6.9282_0323, 8.6602_5404, 10.3923_0485], [3.0, 3.0, 3.0]] ) with pytest.raises(lowerCamelCase__ ) as error_info: __lowerCamelCase : Optional[Any] = principal_component_analysis(lowerCamelCase__ , lowerCamelCase__ ) if not np.allclose(lowerCamelCase__ , lowerCamelCase__ ): raise AssertionError assert error_info.type is AssertionError if __name__ == "__main__": import doctest doctest.testmod()
652
0
'''simple docstring''' import sacrebleu as scb from packaging import version from sacrebleu import CHRF import datasets lowerCAmelCase__ = '''\ @inproceedings{popovic-2015-chrf, title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation", author = "Popovi{\'c}, Maja", booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation", month = sep, year = "2015", address = "Lisbon, Portugal", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/W15-3049", doi = "10.18653/v1/W15-3049", pages = "392--395", } @inproceedings{popovic-2017-chrf, title = "chr{F}++: words helping character n-grams", author = "Popovi{\'c}, Maja", booktitle = "Proceedings of the Second Conference on Machine Translation", month = sep, year = "2017", address = "Copenhagen, Denmark", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/W17-4770", doi = "10.18653/v1/W17-4770", pages = "612--618", } @inproceedings{post-2018-call, title = "A Call for Clarity in Reporting {BLEU} Scores", author = "Post, Matt", booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers", month = oct, year = "2018", address = "Belgium, Brussels", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W18-6319", pages = "186--191", } ''' lowerCAmelCase__ = '''\ ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches, and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation that is already present in sacrebleu. The implementation here is slightly different from sacrebleu in terms of the required input format. The length of the references and hypotheses lists need to be the same, so you may need to transpose your references compared to sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534 See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information. ''' lowerCAmelCase__ = ''' Produces ChrF(++) scores for hypotheses given reference translations. Args: predictions (list of str): The predicted sentences. references (list of list of str): The references. There should be one reference sub-list for each prediction sentence. char_order (int): Character n-gram order. Defaults to `6`. word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`. beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`. lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`. whitespace (bool): If `True`, include whitespaces when extracting character n-grams. eps_smoothing (bool): If `True`, applies epsilon smoothing similar to reference chrF++.py, NLTK and Moses implementations. If `False`, it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`. Returns: \'score\' (float): The chrF (chrF++) score, \'char_order\' (int): The character n-gram order, \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++, \'beta\' (int): Determine the importance of recall w.r.t precision Examples: Example 1--a simple example of calculating chrF: >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."] >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]] >>> chrf = datasets.load_metric("chrf") >>> results = chrf.compute(predictions=prediction, references=reference) >>> print(results) {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2} Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF: >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."] >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]] >>> chrf = datasets.load_metric("chrf") >>> results = chrf.compute(predictions=prediction, ... references=reference, ... word_order=2) >>> print(results) {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2} Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case: >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."] >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]] >>> chrf = datasets.load_metric("chrf") >>> results = chrf.compute(predictions=prediction, ... references=reference, ... word_order=2, ... lowercase=True) >>> print(results) {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase_ (datasets.Metric ): """simple docstring""" def SCREAMING_SNAKE_CASE ( self : List[Any] ): if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ): raise ImportWarning( '''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n''' '''You can install it with `pip install "sacrebleu>=1.4.12"`.''' ) return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,homepage='''https://github.com/mjpost/sacreBLEU#chrf--chrf''' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { '''predictions''': datasets.Value('''string''' ,id='''sequence''' ), '''references''': datasets.Sequence(datasets.Value('''string''' ,id='''sequence''' ) ,id='''references''' ), } ) ,codebase_urls=['''https://github.com/mjpost/sacreBLEU#chrf--chrf'''] ,reference_urls=[ '''https://github.com/m-popovic/chrF''', ] ,) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : str ,lowercase__ : List[Any] ,lowercase__ : int = CHRF.CHAR_ORDER ,lowercase__ : int = CHRF.WORD_ORDER ,lowercase__ : int = CHRF.BETA ,lowercase__ : bool = False ,lowercase__ : bool = False ,lowercase__ : bool = False ,): __lowercase = len(references[0] ) if any(len(lowercase__ ) != references_per_prediction for refs in references ): raise ValueError('''Sacrebleu requires the same number of references for each prediction''' ) __lowercase = [[refs[i] for refs in references] for i in range(lowercase__ )] __lowercase = CHRF(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) __lowercase = sb_chrf.corpus_score(lowercase__ ,lowercase__ ) return { "score": output.score, "char_order": output.char_order, "word_order": output.word_order, "beta": output.beta, }
41
import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow a =logging.getLogger() @unittest.skip('''Temporarily disable the doc tests.''' ) @require_torch @require_tf @slow class A_ ( unittest.TestCase ): def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Path ,SCREAMING_SNAKE_CASE__ : Union[str, None] = None ,SCREAMING_SNAKE_CASE__ : Union[List[str], None] = None ,SCREAMING_SNAKE_CASE__ : Union[str, List[str], None] = None ,SCREAMING_SNAKE_CASE__ : bool = True ,): __lowerCamelCase : List[str] = [file for file in os.listdir(SCREAMING_SNAKE_CASE__) if os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__))] if identifier is not None: __lowerCamelCase : str = [file for file in files if identifier in file] if n_identifier is not None: if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__): for n_ in n_identifier: __lowerCamelCase : Optional[int] = [file for file in files if n_ not in file] else: __lowerCamelCase : Dict = [file for file in files if n_identifier not in file] __lowerCamelCase : str = ignore_files or [] ignore_files.append('__init__.py') __lowerCamelCase : Tuple = [file for file in files if file not in ignore_files] for file in files: # Open all files print('Testing' ,SCREAMING_SNAKE_CASE__) if only_modules: __lowerCamelCase : Optional[int] = file.split('.')[0] try: __lowerCamelCase : Optional[Any] = getattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[str] = doctest.DocTestSuite(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[int] = unittest.TextTestRunner().run(SCREAMING_SNAKE_CASE__) self.assertIs(len(result.failures) ,0) except AttributeError: logger.info(F"{module_identifier} is not a module.") else: __lowerCamelCase : int = doctest.testfile(str('..' / directory / file) ,optionflags=doctest.ELLIPSIS) self.assertIs(result.failed ,0) def lowerCAmelCase ( self : List[Any]): __lowerCamelCase : Dict = Path('src/transformers') __lowerCamelCase : Any = 'modeling' __lowerCamelCase : Dict = [ 'modeling_ctrl.py', 'modeling_tf_ctrl.py', ] self.analyze_directory(SCREAMING_SNAKE_CASE__ ,identifier=SCREAMING_SNAKE_CASE__ ,ignore_files=SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Dict): __lowerCamelCase : Tuple = Path('src/transformers') __lowerCamelCase : Optional[int] = 'tokenization' self.analyze_directory(SCREAMING_SNAKE_CASE__ ,identifier=SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Tuple): __lowerCamelCase : List[Any] = Path('src/transformers') __lowerCamelCase : str = 'configuration' self.analyze_directory(SCREAMING_SNAKE_CASE__ ,identifier=SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : int): __lowerCamelCase : Dict = Path('src/transformers') __lowerCamelCase : Any = ['configuration', 'modeling', 'tokenization'] self.analyze_directory(SCREAMING_SNAKE_CASE__ ,n_identifier=SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : int): __lowerCamelCase : List[Any] = Path('docs/source') __lowerCamelCase : str = ['favicon.ico'] self.analyze_directory(SCREAMING_SNAKE_CASE__ ,ignore_files=SCREAMING_SNAKE_CASE__ ,only_modules=SCREAMING_SNAKE_CASE__)
652
0
'''simple docstring''' import math import tensorflow as tf from packaging import version def _UpperCamelCase ( __UpperCamelCase ) -> Any: lowerCamelCase_ = tf.convert_to_tensor(__UpperCamelCase ) lowerCamelCase_ = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) ,x.dtype ) )) return x * cdf def _UpperCamelCase ( __UpperCamelCase ) -> Optional[int]: lowerCamelCase_ = tf.convert_to_tensor(__UpperCamelCase ) lowerCamelCase_ = tf.cast(math.pi ,x.dtype ) lowerCamelCase_ = tf.cast(0.04_4715 ,x.dtype ) lowerCamelCase_ = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(__UpperCamelCase ,3 )) )) return x * cdf def _UpperCamelCase ( __UpperCamelCase ) -> Optional[Any]: lowerCamelCase_ = tf.convert_to_tensor(__UpperCamelCase ) return x * tf.tanh(tf.math.softplus(__UpperCamelCase ) ) def _UpperCamelCase ( __UpperCamelCase ) -> str: lowerCamelCase_ = tf.convert_to_tensor(__UpperCamelCase ) lowerCamelCase_ = tf.cast(0.04_4715 ,x.dtype ) lowerCamelCase_ = tf.cast(0.79_7884_5608 ,x.dtype ) return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) )) def _UpperCamelCase ( __UpperCamelCase ) -> str: lowerCamelCase_ = tf.convert_to_tensor(__UpperCamelCase ) lowerCamelCase_ = tf.cast(1.702 ,x.dtype ) return x * tf.math.sigmoid(coeff * x ) def _UpperCamelCase ( __UpperCamelCase ) -> int: return tf.clip_by_value(_gelu(__UpperCamelCase ) ,-10 ,10 ) def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase=-1 ) -> List[Any]: lowerCamelCase_ ,lowerCamelCase_ = tf.split(__UpperCamelCase ,2 ,axis=__UpperCamelCase ) return a * tf.math.sigmoid(__UpperCamelCase ) if version.parse(tf.version.VERSION) >= version.parse("2.4"): def _UpperCamelCase ( __UpperCamelCase ) -> Dict: return tf.keras.activations.gelu(__UpperCamelCase ,approximate=__UpperCamelCase ) A_ = tf.keras.activations.gelu A_ = approximate_gelu_wrap else: A_ = _gelu A_ = _gelu_new A_ = { "gelu": gelu, "gelu_10": gelu_aa, "gelu_fast": gelu_fast, "gelu_new": gelu_new, "glu": glu, "mish": mish, "quick_gelu": quick_gelu, "relu": tf.keras.activations.relu, "sigmoid": tf.keras.activations.sigmoid, "silu": tf.keras.activations.swish, "swish": tf.keras.activations.swish, "tanh": tf.keras.activations.tanh, } def _UpperCamelCase ( __UpperCamelCase ) -> Tuple: if activation_string in ACTaFN: return ACTaFN[activation_string] else: raise KeyError(f'''function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}''' )
42
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging a =logging.get_logger(__name__) a ="""▁""" a ={"""vocab_file""": """sentencepiece.bpe.model""", """monolingual_vocab_file""": """dict.txt"""} a ={ """vocab_file""": { """vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model""", }, """monolingual_vocab_file""": { """vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt""", }, } a ={"""vinai/bartpho-syllable""": 1024} class A_ ( SCREAMING_SNAKE_CASE ): _UpperCAmelCase : List[str] = VOCAB_FILES_NAMES _UpperCAmelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase : Dict = ['''input_ids''', '''attention_mask'''] def __init__( self : Dict ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : List[Any]="<s>" ,SCREAMING_SNAKE_CASE__ : Any="</s>" ,SCREAMING_SNAKE_CASE__ : List[str]="</s>" ,SCREAMING_SNAKE_CASE__ : List[str]="<s>" ,SCREAMING_SNAKE_CASE__ : int="<unk>" ,SCREAMING_SNAKE_CASE__ : Dict="<pad>" ,SCREAMING_SNAKE_CASE__ : List[str]="<mask>" ,SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None ,**SCREAMING_SNAKE_CASE__ : Dict ,): # Mask token behave like a normal word, i.e. include the space before it __lowerCamelCase : Union[str, Any] = AddedToken(SCREAMING_SNAKE_CASE__ ,lstrip=SCREAMING_SNAKE_CASE__ ,rstrip=SCREAMING_SNAKE_CASE__) if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) else mask_token __lowerCamelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=SCREAMING_SNAKE_CASE__ ,eos_token=SCREAMING_SNAKE_CASE__ ,unk_token=SCREAMING_SNAKE_CASE__ ,sep_token=SCREAMING_SNAKE_CASE__ ,cls_token=SCREAMING_SNAKE_CASE__ ,pad_token=SCREAMING_SNAKE_CASE__ ,mask_token=SCREAMING_SNAKE_CASE__ ,sp_model_kwargs=self.sp_model_kwargs ,**SCREAMING_SNAKE_CASE__ ,) __lowerCamelCase : int = vocab_file __lowerCamelCase : Tuple = monolingual_vocab_file __lowerCamelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(str(SCREAMING_SNAKE_CASE__)) # Load the reduced vocab # Keep order of special tokens for backward compatibility __lowerCamelCase : Optional[int] = {} __lowerCamelCase : List[Any] = 0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(SCREAMING_SNAKE_CASE__) not in self.fairseq_tokens_to_ids: __lowerCamelCase : Any = cnt cnt += 1 with open(SCREAMING_SNAKE_CASE__ ,'r' ,encoding='utf-8') as f: for line in f.readlines(): __lowerCamelCase : Any = line.strip().split()[0] __lowerCamelCase : List[str] = len(self.fairseq_tokens_to_ids) if str(SCREAMING_SNAKE_CASE__) not in self.fairseq_tokens_to_ids: __lowerCamelCase : Dict = len(self.fairseq_tokens_to_ids) __lowerCamelCase : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : int): __lowerCamelCase : Tuple = self.__dict__.copy() __lowerCamelCase : Optional[int] = None __lowerCamelCase : Optional[int] = self.sp_model.serialized_model_proto() return state def __setstate__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any]): __lowerCamelCase : List[str] = d # for backward compatibility if not hasattr(self ,'sp_model_kwargs'): __lowerCamelCase : str = {} __lowerCamelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.LoadFromSerializedProto(self.sp_model_proto) def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __lowerCamelCase : Tuple = [self.cls_token_id] __lowerCamelCase : int = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ,SCREAMING_SNAKE_CASE__ : bool = False): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE__ ,token_ids_a=SCREAMING_SNAKE_CASE__ ,already_has_special_tokens=SCREAMING_SNAKE_CASE__) if token_ids_a is None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1] return [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1] def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None): __lowerCamelCase : Dict = [self.sep_token_id] __lowerCamelCase : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] @property def lowerCAmelCase ( self : List[str]): return len(self.fairseq_ids_to_tokens) def lowerCAmelCase ( self : Dict): __lowerCamelCase : Optional[int] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : str): return self.sp_model.encode(SCREAMING_SNAKE_CASE__ ,out_type=SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[Any]): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Dict): return self.fairseq_ids_to_tokens[index] def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : str): __lowerCamelCase : Any = ''.join(SCREAMING_SNAKE_CASE__).replace(SCREAMING_SNAKE_CASE__ ,' ').strip() return out_string def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[str] = None): if not os.path.isdir(SCREAMING_SNAKE_CASE__): logger.error(F"Vocabulary path ({save_directory}) should be a directory") return __lowerCamelCase : Union[str, Any] = os.path.join( SCREAMING_SNAKE_CASE__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) __lowerCamelCase : Union[str, Any] = os.path.join( SCREAMING_SNAKE_CASE__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['monolingual_vocab_file'] ,) if os.path.abspath(self.vocab_file) != os.path.abspath(SCREAMING_SNAKE_CASE__) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file ,SCREAMING_SNAKE_CASE__) elif not os.path.isfile(self.vocab_file): with open(SCREAMING_SNAKE_CASE__ ,'wb') as fi: __lowerCamelCase : List[str] = self.sp_model.serialized_model_proto() fi.write(SCREAMING_SNAKE_CASE__) if os.path.abspath(self.monolingual_vocab_file) != os.path.abspath( SCREAMING_SNAKE_CASE__) and os.path.isfile(self.monolingual_vocab_file): copyfile(self.monolingual_vocab_file ,SCREAMING_SNAKE_CASE__) elif not os.path.isfile(self.monolingual_vocab_file): with open(SCREAMING_SNAKE_CASE__ ,'w' ,encoding='utf-8') as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(F"{str(SCREAMING_SNAKE_CASE__)} \n") return out_vocab_file, out_monolingual_vocab_file
652
0
from typing import Tuple, Union from ...modeling_outputs import BackboneOutput from ...modeling_utils import PreTrainedModel from ...utils import is_timm_available, is_torch_available, requires_backends from ...utils.backbone_utils import BackboneMixin from .configuration_timm_backbone import TimmBackboneConfig if is_timm_available(): import timm if is_torch_available(): from torch import Tensor class _a ( UpperCamelCase__ , UpperCamelCase__ ): _lowercase : int = '''pixel_values''' _lowercase : Optional[int] = False _lowercase : Optional[Any] = TimmBackboneConfig def __init__( self: Union[str, Any] , UpperCamelCase_: Tuple , **UpperCamelCase_: Optional[int] ) -> Optional[int]: """simple docstring""" requires_backends(self , '''timm''' ) super().__init__(UpperCamelCase_ ) lowercase__ = config if config.backbone is None: raise ValueError('''backbone is not set in the config. Please set it to a timm model name.''' ) if config.backbone not in timm.list_models(): raise ValueError(f'backbone {config.backbone} is not supported by timm.' ) if hasattr(UpperCamelCase_ , '''out_features''' ) and config.out_features is not None: raise ValueError('''out_features is not supported by TimmBackbone. Please use out_indices instead.''' ) lowercase__ = getattr(UpperCamelCase_ , '''use_pretrained_backbone''' , UpperCamelCase_ ) if pretrained is None: raise ValueError('''use_pretrained_backbone is not set in the config. Please set it to True or False.''' ) # We just take the final layer by default. This matches the default for the transformers models. lowercase__ = config.out_indices if getattr(UpperCamelCase_ , '''out_indices''' , UpperCamelCase_ ) is not None else (-1,) lowercase__ = timm.create_model( config.backbone , pretrained=UpperCamelCase_ , features_only=config.features_only , in_chans=config.num_channels , out_indices=UpperCamelCase_ , **UpperCamelCase_ , ) # These are used to control the output of the model when called. If output_hidden_states is True, then # return_layers is modified to include all layers. lowercase__ = self._backbone.return_layers lowercase__ = {layer['''module''']: str(UpperCamelCase_ ) for i, layer in enumerate(self._backbone.feature_info.info )} super()._init_backbone(UpperCamelCase_ ) @classmethod def lowerCamelCase_ ( cls: Any , UpperCamelCase_: List[str] , *UpperCamelCase_: List[str] , **UpperCamelCase_: Optional[Any] ) -> int: """simple docstring""" requires_backends(cls , ['''vision''', '''timm'''] ) from ...models.timm_backbone import TimmBackboneConfig lowercase__ = kwargs.pop('''config''' , TimmBackboneConfig() ) lowercase__ = kwargs.pop('''use_timm_backbone''' , UpperCamelCase_ ) if not use_timm: raise ValueError('''use_timm_backbone must be True for timm backbones''' ) lowercase__ = kwargs.pop('''num_channels''' , config.num_channels ) lowercase__ = kwargs.pop('''features_only''' , config.features_only ) lowercase__ = kwargs.pop('''use_pretrained_backbone''' , config.use_pretrained_backbone ) lowercase__ = kwargs.pop('''out_indices''' , config.out_indices ) lowercase__ = TimmBackboneConfig( backbone=UpperCamelCase_ , num_channels=UpperCamelCase_ , features_only=UpperCamelCase_ , use_pretrained_backbone=UpperCamelCase_ , out_indices=UpperCamelCase_ , ) return super()._from_config(UpperCamelCase_ , **UpperCamelCase_ ) def lowerCamelCase_ ( self: int , UpperCamelCase_: List[Any] ) -> Any: """simple docstring""" pass def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: Any , UpperCamelCase_: Tuple=None , UpperCamelCase_: List[Any]=None , UpperCamelCase_: Tuple=None , **UpperCamelCase_: Optional[int] ) -> Union[BackboneOutput, Tuple[Tensor, ...]]: """simple docstring""" lowercase__ = return_dict if return_dict is not None else self.config.use_return_dict lowercase__ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowercase__ = output_attentions if output_attentions is not None else self.config.output_attentions if output_attentions: raise ValueError('''Cannot output attentions for timm backbones at the moment''' ) if output_hidden_states: # We modify the return layers to include all the stages of the backbone lowercase__ = self._all_layers lowercase__ = self._backbone(UpperCamelCase_ , **UpperCamelCase_ ) lowercase__ = self._return_layers lowercase__ = tuple(hidden_states[i] for i in self.out_indices ) else: lowercase__ = self._backbone(UpperCamelCase_ , **UpperCamelCase_ ) lowercase__ = None lowercase__ = tuple(UpperCamelCase_ ) lowercase__ = tuple(UpperCamelCase_ ) if hidden_states is not None else None if not return_dict: lowercase__ = (feature_maps,) if output_hidden_states: lowercase__ = output + (hidden_states,) return output return BackboneOutput(feature_maps=UpperCamelCase_ , hidden_states=UpperCamelCase_ , attentions=UpperCamelCase_ )
43
from __future__ import annotations import inspect import unittest import numpy as np from transformers import DeiTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, ) from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class A_ : def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Dict=1_3 ,SCREAMING_SNAKE_CASE__ : int=3_0 ,SCREAMING_SNAKE_CASE__ : int=2 ,SCREAMING_SNAKE_CASE__ : List[Any]=3 ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : int=True ,SCREAMING_SNAKE_CASE__ : List[str]=3_2 ,SCREAMING_SNAKE_CASE__ : Any=2 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=4 ,SCREAMING_SNAKE_CASE__ : List[str]=3_7 ,SCREAMING_SNAKE_CASE__ : Optional[Any]="gelu" ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_0 ,SCREAMING_SNAKE_CASE__ : int=0.02 ,SCREAMING_SNAKE_CASE__ : str=3 ,SCREAMING_SNAKE_CASE__ : Dict=None ,SCREAMING_SNAKE_CASE__ : Optional[Any]=2 ,): __lowerCamelCase : Optional[int] = parent __lowerCamelCase : Optional[Any] = batch_size __lowerCamelCase : Dict = image_size __lowerCamelCase : Optional[Any] = patch_size __lowerCamelCase : Optional[Any] = num_channels __lowerCamelCase : str = is_training __lowerCamelCase : List[Any] = use_labels __lowerCamelCase : Any = hidden_size __lowerCamelCase : Optional[int] = num_hidden_layers __lowerCamelCase : Any = num_attention_heads __lowerCamelCase : Tuple = intermediate_size __lowerCamelCase : Dict = hidden_act __lowerCamelCase : Optional[Any] = hidden_dropout_prob __lowerCamelCase : List[Any] = attention_probs_dropout_prob __lowerCamelCase : Dict = type_sequence_label_size __lowerCamelCase : Optional[Any] = initializer_range __lowerCamelCase : List[str] = scope __lowerCamelCase : Union[str, Any] = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) __lowerCamelCase : str = (image_size // patch_size) ** 2 __lowerCamelCase : str = num_patches + 2 def lowerCAmelCase ( self : Optional[Any]): __lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) __lowerCamelCase : List[Any] = None if self.use_labels: __lowerCamelCase : int = ids_tensor([self.batch_size] ,self.type_sequence_label_size) __lowerCamelCase : List[str] = self.get_config() return config, pixel_values, labels def lowerCAmelCase ( self : List[Any]): return DeiTConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=SCREAMING_SNAKE_CASE__ ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,) def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Dict): __lowerCamelCase : Optional[Any] = TFDeiTModel(config=SCREAMING_SNAKE_CASE__) __lowerCamelCase : Dict = model(SCREAMING_SNAKE_CASE__) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size)) def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Any): __lowerCamelCase : Optional[int] = TFDeiTForMaskedImageModeling(config=SCREAMING_SNAKE_CASE__) __lowerCamelCase : str = model(SCREAMING_SNAKE_CASE__) self.parent.assertEqual( result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size)) # test greyscale images __lowerCamelCase : int = 1 __lowerCamelCase : Tuple = TFDeiTForMaskedImageModeling(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) __lowerCamelCase : str = model(SCREAMING_SNAKE_CASE__) self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size)) def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any]): __lowerCamelCase : Dict = self.type_sequence_label_size __lowerCamelCase : List[Any] = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE__) __lowerCamelCase : str = model(SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size)) # test greyscale images __lowerCamelCase : List[Any] = 1 __lowerCamelCase : Tuple = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) __lowerCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size)) def lowerCAmelCase ( self : Dict): __lowerCamelCase : Optional[Any] = self.prepare_config_and_inputs() __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = config_and_inputs __lowerCamelCase : Optional[int] = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ): _UpperCAmelCase : Union[str, Any] = ( ( TFDeiTModel, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, ) if is_tf_available() else () ) _UpperCAmelCase : List[Any] = ( { '''feature-extraction''': TFDeiTModel, '''image-classification''': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher), } if is_tf_available() else {} ) _UpperCAmelCase : Optional[int] = False _UpperCAmelCase : Optional[Any] = False _UpperCAmelCase : Optional[int] = False _UpperCAmelCase : Optional[int] = False def lowerCAmelCase ( self : Any): __lowerCamelCase : str = TFDeiTModelTester(self) __lowerCamelCase : Optional[int] = ConfigTester(self ,config_class=SCREAMING_SNAKE_CASE__ ,has_text_modality=SCREAMING_SNAKE_CASE__ ,hidden_size=3_7) def lowerCAmelCase ( self : str): self.config_tester.run_common_tests() @unittest.skip(reason='DeiT does not use inputs_embeds') def lowerCAmelCase ( self : List[Any]): pass def lowerCAmelCase ( self : Dict): __lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase : Dict = model_class(SCREAMING_SNAKE_CASE__) self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer)) __lowerCamelCase : int = model.get_output_embeddings() self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE__ ,tf.keras.layers.Dense)) def lowerCAmelCase ( self : List[Any]): __lowerCamelCase , __lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase : List[Any] = model_class(SCREAMING_SNAKE_CASE__) __lowerCamelCase : str = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCamelCase : Any = [*signature.parameters.keys()] __lowerCamelCase : Union[str, Any] = ['pixel_values'] self.assertListEqual(arg_names[:1] ,SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : str): __lowerCamelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : str): __lowerCamelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Dict): __lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : str=False): __lowerCamelCase : Optional[Any] = super()._prepare_for_class(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,return_labels=SCREAMING_SNAKE_CASE__) if return_labels: if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call).parameters: del inputs_dict["labels"] return inputs_dict @slow def lowerCAmelCase ( self : Optional[int]): for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase : Union[str, Any] = TFDeiTModel.from_pretrained(SCREAMING_SNAKE_CASE__) self.assertIsNotNone(SCREAMING_SNAKE_CASE__) def SCREAMING_SNAKE_CASE__ ( ) -> Tuple: __lowerCamelCase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class A_ ( unittest.TestCase ): @cached_property def lowerCAmelCase ( self : List[Any]): return ( DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224') if is_vision_available() else None ) @slow def lowerCAmelCase ( self : List[Any]): __lowerCamelCase : Optional[int] = TFDeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224') __lowerCamelCase : int = self.default_image_processor __lowerCamelCase : Tuple = prepare_img() __lowerCamelCase : Tuple = image_processor(images=SCREAMING_SNAKE_CASE__ ,return_tensors='tf') # forward pass __lowerCamelCase : int = model(**SCREAMING_SNAKE_CASE__) # verify the logits __lowerCamelCase : Optional[int] = tf.TensorShape((1, 1_0_0_0)) self.assertEqual(outputs.logits.shape ,SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[Any] = tf.constant([-1.0266, 0.1912, -1.2861]) self.assertTrue(np.allclose(outputs.logits[0, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4))
652
0
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_nllb import NllbTokenizer else: UpperCAmelCase_ : Optional[Any] = None UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__) UpperCAmelCase_ : str = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'} UpperCAmelCase_ : str = { 'vocab_file': { 'facebook/nllb-200-distilled-600M': ( 'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model' ), }, 'tokenizer_file': { 'facebook/nllb-200-distilled-600M': ( 'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json' ), }, } UpperCAmelCase_ : int = { 'facebook/nllb-large-en-ro': 1024, 'facebook/nllb-200-distilled-600M': 1024, } # fmt: off UpperCAmelCase_ : List[str] = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn'] class UpperCAmelCase__ ( A ): lowerCAmelCase_ = VOCAB_FILES_NAMES lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase_ = ['input_ids', 'attention_mask'] lowerCAmelCase_ = NllbTokenizer lowerCAmelCase_ = [] lowerCAmelCase_ = [] def __init__( self : Dict,__A : Any=None,__A : List[Any]=None,__A : List[str]="<s>",__A : Union[str, Any]="</s>",__A : Optional[int]="</s>",__A : str="<s>",__A : List[Any]="<unk>",__A : Any="<pad>",__A : List[str]="<mask>",__A : Any=None,__A : List[Any]=None,__A : Union[str, Any]=None,__A : Dict=False,**__A : Union[str, Any],): # Mask token behave like a normal word, i.e. include the space before it _lowerCamelCase : Optional[int] = AddedToken(__A,lstrip=__A,rstrip=__A ) if isinstance(__A,__A ) else mask_token _lowerCamelCase : Tuple = legacy_behaviour super().__init__( vocab_file=__A,tokenizer_file=__A,bos_token=__A,eos_token=__A,sep_token=__A,cls_token=__A,unk_token=__A,pad_token=__A,mask_token=__A,src_lang=__A,tgt_lang=__A,additional_special_tokens=__A,legacy_behaviour=__A,**__A,) _lowerCamelCase : Dict = vocab_file _lowerCamelCase : int = False if not self.vocab_file else True _lowerCamelCase : Dict = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} ) _lowerCamelCase : Dict = { lang_code: self.convert_tokens_to_ids(__A ) for lang_code in FAIRSEQ_LANGUAGE_CODES } _lowerCamelCase : Union[str, Any] = src_lang if src_lang is not None else "eng_Latn" _lowerCamelCase : Optional[int] = self.convert_tokens_to_ids(self._src_lang ) _lowerCamelCase : str = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def lowerCamelCase_ ( self : Optional[int] ): return self._src_lang @src_lang.setter def lowerCamelCase_ ( self : List[str],__A : str ): _lowerCamelCase : List[Any] = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def lowerCamelCase_ ( self : List[Any],__A : List[int],__A : Optional[List[int]] = None ): if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def lowerCamelCase_ ( self : List[str],__A : List[int],__A : Optional[List[int]] = None ): _lowerCamelCase : Optional[int] = [self.sep_token_id] _lowerCamelCase : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCamelCase_ ( self : str,__A : Tuple,__A : str,__A : Optional[str],__A : Optional[str],**__A : str ): if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" ) _lowerCamelCase : int = src_lang _lowerCamelCase : Optional[int] = self(__A,add_special_tokens=__A,return_tensors=__A,**__A ) _lowerCamelCase : Optional[Any] = self.convert_tokens_to_ids(__A ) _lowerCamelCase : Any = tgt_lang_id return inputs def lowerCamelCase_ ( self : int,__A : List[str],__A : str = "eng_Latn",__A : Optional[List[str]] = None,__A : str = "fra_Latn",**__A : Tuple,): _lowerCamelCase : Any = src_lang _lowerCamelCase : Tuple = tgt_lang return super().prepare_seqaseq_batch(__A,__A,**__A ) def lowerCamelCase_ ( self : List[Any] ): return self.set_src_lang_special_tokens(self.src_lang ) def lowerCamelCase_ ( self : Optional[Any] ): return self.set_tgt_lang_special_tokens(self.tgt_lang ) def lowerCamelCase_ ( self : Any,__A : str ): _lowerCamelCase : int = self.convert_tokens_to_ids(__A ) if self.legacy_behaviour: _lowerCamelCase : Optional[int] = [] _lowerCamelCase : Dict = [self.eos_token_id, self.cur_lang_code] else: _lowerCamelCase : Optional[int] = [self.cur_lang_code] _lowerCamelCase : Any = [self.eos_token_id] _lowerCamelCase : int = self.convert_ids_to_tokens(self.prefix_tokens ) _lowerCamelCase : List[str] = self.convert_ids_to_tokens(self.suffix_tokens ) _lowerCamelCase : int = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str,pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str,special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str,self.prefix_tokens + self.suffix_tokens ) ),) def lowerCamelCase_ ( self : List[str],__A : str ): _lowerCamelCase : Dict = self.convert_tokens_to_ids(__A ) if self.legacy_behaviour: _lowerCamelCase : Dict = [] _lowerCamelCase : Union[str, Any] = [self.eos_token_id, self.cur_lang_code] else: _lowerCamelCase : List[Any] = [self.cur_lang_code] _lowerCamelCase : Tuple = [self.eos_token_id] _lowerCamelCase : int = self.convert_ids_to_tokens(self.prefix_tokens ) _lowerCamelCase : Tuple = self.convert_ids_to_tokens(self.suffix_tokens ) _lowerCamelCase : List[str] = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str,pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str,special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str,self.prefix_tokens + self.suffix_tokens ) ),) def lowerCamelCase_ ( self : str,__A : str,__A : Optional[str] = None ): if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(__A ): logger.error(f'Vocabulary path ({save_directory}) should be a directory.' ) return _lowerCamelCase : List[str] = os.path.join( __A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ): copyfile(self.vocab_file,__A ) return (out_vocab_file,)
44
from dataclasses import dataclass from enum import Enum from typing import List, Optional, Union import numpy as np import PIL from PIL import Image from ...utils import BaseOutput, is_torch_available, is_transformers_available @dataclass class A_ ( SCREAMING_SNAKE_CASE ): _UpperCAmelCase : Union[List[PIL.Image.Image], np.ndarray] _UpperCAmelCase : Optional[List[bool]] if is_transformers_available() and is_torch_available(): from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
652
0
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets, # U and V such that every edge (u, v) either connects a vertex from U to V or a vertex # from V to U. In other words, for every edge (u, v), either u belongs to U and v to V, # or u belongs to V and v to U. We can also say that there is no edge that connects # vertices of same set. def A ( lowercase__ : Dict ) -> List[Any]: UpperCamelCase__ :List[str] = [False] * len(lowercase__ ) UpperCamelCase__ :List[str] = [-1] * len(lowercase__ ) def dfs(lowercase__ : Optional[Any] , lowercase__ : List[str] ): UpperCamelCase__ :List[Any] = True UpperCamelCase__ :List[str] = c for u in graph[v]: if not visited[u]: dfs(lowercase__ , 1 - c ) for i in range(len(lowercase__ ) ): if not visited[i]: dfs(lowercase__ , 0 ) for i in range(len(lowercase__ ) ): for j in graph[i]: if color[i] == color[j]: return False return True # Adjacency list of graph UpperCamelCase = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []} print(check_bipartite_dfs(graph))
45
# NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from ...utils import deprecate from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401 from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401 deprecate( """stable diffusion controlnet""", """0.22.0""", """Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""", standard_warn=False, stacklevel=3, )
652
0
"""simple docstring""" import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class A_ : def __init__( self: Optional[int] ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: int=2 ,__lowerCAmelCase: Dict=8 ,__lowerCAmelCase: List[Any]=True ,__lowerCAmelCase: List[str]=True ,__lowerCAmelCase: List[Any]=True ,__lowerCAmelCase: Dict=True ,__lowerCAmelCase: Any=99 ,__lowerCAmelCase: int=16 ,__lowerCAmelCase: List[Any]=5 ,__lowerCAmelCase: List[str]=2 ,__lowerCAmelCase: str=36 ,__lowerCAmelCase: List[str]="gelu" ,__lowerCAmelCase: Any=0.0 ,__lowerCAmelCase: List[str]=0.0 ,__lowerCAmelCase: Any=512 ,__lowerCAmelCase: Union[str, Any]=16 ,__lowerCAmelCase: str=2 ,__lowerCAmelCase: str=0.02 ,__lowerCAmelCase: Dict=3 ,__lowerCAmelCase: List[str]=4 ,__lowerCAmelCase: Tuple=None ,): '''simple docstring''' _lowerCamelCase : Tuple = parent _lowerCamelCase : Union[str, Any] = batch_size _lowerCamelCase : Optional[Any] = seq_length _lowerCamelCase : int = is_training _lowerCamelCase : List[Any] = use_input_mask _lowerCamelCase : Dict = use_token_type_ids _lowerCamelCase : Tuple = use_labels _lowerCamelCase : List[str] = vocab_size _lowerCamelCase : List[str] = hidden_size _lowerCamelCase : List[Any] = num_hidden_layers _lowerCamelCase : List[str] = num_attention_heads _lowerCamelCase : Dict = intermediate_size _lowerCamelCase : List[str] = hidden_act _lowerCamelCase : List[Any] = hidden_dropout_prob _lowerCamelCase : Optional[int] = attention_probs_dropout_prob _lowerCamelCase : List[str] = max_position_embeddings _lowerCamelCase : Optional[Any] = type_vocab_size _lowerCamelCase : Dict = type_sequence_label_size _lowerCamelCase : Dict = initializer_range _lowerCamelCase : List[Any] = num_labels _lowerCamelCase : Any = num_choices _lowerCamelCase : List[Any] = scope def _lowercase ( self: List[Any] ): '''simple docstring''' _lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) _lowerCamelCase : str = None if self.use_input_mask: _lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] ) _lowerCamelCase : Optional[int] = None if self.use_token_type_ids: _lowerCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) _lowerCamelCase : Dict = None _lowerCamelCase : Union[str, Any] = None _lowerCamelCase : int = None if self.use_labels: _lowerCamelCase : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) _lowerCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) _lowerCamelCase : Dict = ids_tensor([self.batch_size] ,self.num_choices ) _lowerCamelCase : List[Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _lowercase ( self: Union[str, Any] ): '''simple docstring''' return MraConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__lowerCAmelCase ,initializer_range=self.initializer_range ,) def _lowercase ( self: int ): '''simple docstring''' _lowerCamelCase : str = self.get_config() _lowerCamelCase : Dict = 300 return config def _lowercase ( self: Union[str, Any] ): '''simple docstring''' ( ( _lowerCamelCase ), ( _lowerCamelCase ), ( _lowerCamelCase ), ( _lowerCamelCase ), ( _lowerCamelCase ), ( _lowerCamelCase ), ( _lowerCamelCase ), ) : int = self.prepare_config_and_inputs() _lowerCamelCase : Tuple = True _lowerCamelCase : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) _lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def _lowercase ( self: List[Any] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: str ,__lowerCAmelCase: Dict ,__lowerCAmelCase: int ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Tuple ): '''simple docstring''' _lowerCamelCase : Optional[Any] = MraModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : Optional[int] = model(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase ,token_type_ids=__lowerCAmelCase ) _lowerCamelCase : List[Any] = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Any ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: int ,__lowerCAmelCase: Any ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: List[str] ,): '''simple docstring''' _lowerCamelCase : List[Any] = True _lowerCamelCase : int = MraModel(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : int = model( __lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase ,encoder_hidden_states=__lowerCAmelCase ,encoder_attention_mask=__lowerCAmelCase ,) _lowerCamelCase : str = model( __lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase ,encoder_hidden_states=__lowerCAmelCase ,) _lowerCamelCase : Optional[int] = model(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase ( self: int ,__lowerCAmelCase: Any ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: int ,__lowerCAmelCase: List[str] ): '''simple docstring''' _lowerCamelCase : Tuple = MraForMaskedLM(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : Any = model(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase ,labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: int ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Optional[int] ): '''simple docstring''' _lowerCamelCase : Dict = MraForQuestionAnswering(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : int = model( __lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase ,start_positions=__lowerCAmelCase ,end_positions=__lowerCAmelCase ,) self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) ) def _lowercase ( self: Tuple ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: str ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Optional[int] ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = self.num_labels _lowerCamelCase : Any = MraForSequenceClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : str = model(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase ,labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def _lowercase ( self: str ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Dict ,__lowerCAmelCase: int ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: str ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = self.num_labels _lowerCamelCase : int = MraForTokenClassification(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : str = model(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase ,labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Any ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Tuple ): '''simple docstring''' _lowerCamelCase : Optional[Any] = self.num_choices _lowerCamelCase : Optional[Any] = MraForMultipleChoice(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() _lowerCamelCase : List[str] = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() _lowerCamelCase : List[str] = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() _lowerCamelCase : List[Any] = model( __lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase ,labels=__lowerCAmelCase ,) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) ) def _lowercase ( self: Optional[Any] ): '''simple docstring''' _lowerCamelCase : Optional[Any] = self.prepare_config_and_inputs() ( ( _lowerCamelCase ), ( _lowerCamelCase ), ( _lowerCamelCase ), ( _lowerCamelCase ), ( _lowerCamelCase ), ( _lowerCamelCase ), ( _lowerCamelCase ), ) : Any = config_and_inputs _lowerCamelCase : Union[str, Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class A_ ( _a , unittest.TestCase ): lowerCAmelCase__ = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = () def _lowercase ( self: Dict ): '''simple docstring''' _lowerCamelCase : str = MraModelTester(self ) _lowerCamelCase : List[str] = ConfigTester(self ,config_class=__lowerCAmelCase ,hidden_size=37 ) def _lowercase ( self: Union[str, Any] ): '''simple docstring''' self.config_tester.run_common_tests() def _lowercase ( self: Dict ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def _lowercase ( self: Union[str, Any] ): '''simple docstring''' _lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _lowerCamelCase : Dict = type self.model_tester.create_and_check_model(*__lowerCAmelCase ) def _lowercase ( self: Union[str, Any] ): '''simple docstring''' _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase ) def _lowercase ( self: int ): '''simple docstring''' _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__lowerCAmelCase ) def _lowercase ( self: Tuple ): '''simple docstring''' _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase ) def _lowercase ( self: Dict ): '''simple docstring''' _lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase ) def _lowercase ( self: List[Any] ): '''simple docstring''' _lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase ) @slow def _lowercase ( self: Tuple ): '''simple docstring''' for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCamelCase : Dict = MraModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) @unittest.skip(reason="MRA does not output attentions" ) def _lowercase ( self: str ): '''simple docstring''' return @require_torch class A_ ( unittest.TestCase ): @slow def _lowercase ( self: Any ): '''simple docstring''' _lowerCamelCase : Dict = MraModel.from_pretrained("uw-madison/mra-base-512-4" ) _lowerCamelCase : List[Any] = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): _lowerCamelCase : List[str] = model(__lowerCAmelCase )[0] _lowerCamelCase : str = torch.Size((1, 256, 768) ) self.assertEqual(output.shape ,__lowerCAmelCase ) _lowerCamelCase : List[Any] = torch.tensor( [[[-0.01_40, 0.08_30, -0.03_81], [0.15_46, 0.14_02, 0.02_20], [0.11_62, 0.08_51, 0.01_65]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,__lowerCAmelCase ,atol=1e-4 ) ) @slow def _lowercase ( self: Optional[int] ): '''simple docstring''' _lowerCamelCase : List[Any] = MraForMaskedLM.from_pretrained("uw-madison/mra-base-512-4" ) _lowerCamelCase : List[str] = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): _lowerCamelCase : Dict = model(__lowerCAmelCase )[0] _lowerCamelCase : Optional[Any] = 50_265 _lowerCamelCase : Dict = torch.Size((1, 256, vocab_size) ) self.assertEqual(output.shape ,__lowerCAmelCase ) _lowerCamelCase : Optional[int] = torch.tensor( [[[9.25_95, -3.60_38, 11.88_19], [9.38_69, -3.26_93, 11.09_56], [11.85_24, -3.49_38, 13.12_10]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,__lowerCAmelCase ,atol=1e-4 ) ) @slow def _lowercase ( self: Dict ): '''simple docstring''' _lowerCamelCase : Any = MraForMaskedLM.from_pretrained("uw-madison/mra-base-4096-8-d3" ) _lowerCamelCase : str = torch.arange(4_096 ).unsqueeze(0 ) with torch.no_grad(): _lowerCamelCase : Any = model(__lowerCAmelCase )[0] _lowerCamelCase : Any = 50_265 _lowerCamelCase : Dict = torch.Size((1, 4_096, vocab_size) ) self.assertEqual(output.shape ,__lowerCAmelCase ) _lowerCamelCase : int = torch.tensor( [[[5.47_89, -2.35_64, 7.50_64], [7.90_67, -1.33_69, 9.96_68], [9.07_12, -1.81_06, 7.03_80]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,__lowerCAmelCase ,atol=1e-4 ) )
46
import collections import os from typing import List, Optional, Tuple from transformers.utils import is_jieba_available, requires_backends if is_jieba_available(): import jieba from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging a =logging.get_logger(__name__) a ={"""vocab_file""": """vocab.txt"""} a ={ """vocab_file""": { """openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""", }, } a ={ """openbmb/cpm-ant-10b""": 1024, } def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple: __lowerCamelCase : int = collections.OrderedDict() with open(lowerCamelCase__ , 'r' , encoding='utf-8' ) as reader: __lowerCamelCase : Optional[int] = reader.readlines() for index, token in enumerate(lowerCamelCase__ ): __lowerCamelCase : Optional[Any] = token.rstrip('\n' ) __lowerCamelCase : Union[str, Any] = index return vocab class A_ ( SCREAMING_SNAKE_CASE ): def __init__( self : int ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Optional[int]="<unk>" ,SCREAMING_SNAKE_CASE__ : Optional[int]=2_0_0): __lowerCamelCase : str = vocab __lowerCamelCase : Dict = unk_token __lowerCamelCase : int = max_input_chars_per_word def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any]): __lowerCamelCase : int = list(SCREAMING_SNAKE_CASE__) if len(SCREAMING_SNAKE_CASE__) > self.max_input_chars_per_word: return [self.unk_token] __lowerCamelCase : Tuple = 0 __lowerCamelCase : str = [] while start < len(SCREAMING_SNAKE_CASE__): __lowerCamelCase : List[Any] = len(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Any = None while start < end: __lowerCamelCase : Any = ''.join(chars[start:end]) if substr in self.vocab: __lowerCamelCase : Optional[Any] = substr break end -= 1 if cur_substr is None: sub_tokens.append(self.unk_token) start += 1 else: sub_tokens.append(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Dict = end return sub_tokens class A_ ( SCREAMING_SNAKE_CASE ): _UpperCAmelCase : List[str] = VOCAB_FILES_NAMES _UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase : str = ['''input_ids''', '''attention_mask'''] _UpperCAmelCase : Optional[int] = False def __init__( self : Tuple ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Tuple="<d>" ,SCREAMING_SNAKE_CASE__ : Tuple="</d>" ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="<s>" ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="</s>" ,SCREAMING_SNAKE_CASE__ : str="<pad>" ,SCREAMING_SNAKE_CASE__ : List[str]="<unk>" ,SCREAMING_SNAKE_CASE__ : List[Any]="</n>" ,SCREAMING_SNAKE_CASE__ : int="</_>" ,SCREAMING_SNAKE_CASE__ : List[Any]="left" ,**SCREAMING_SNAKE_CASE__ : List[str] ,): requires_backends(self ,['jieba']) super().__init__( bod_token=SCREAMING_SNAKE_CASE__ ,eod_token=SCREAMING_SNAKE_CASE__ ,bos_token=SCREAMING_SNAKE_CASE__ ,eos_token=SCREAMING_SNAKE_CASE__ ,pad_token=SCREAMING_SNAKE_CASE__ ,unk_token=SCREAMING_SNAKE_CASE__ ,line_token=SCREAMING_SNAKE_CASE__ ,space_token=SCREAMING_SNAKE_CASE__ ,padding_side=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,) __lowerCamelCase : Optional[Any] = bod_token __lowerCamelCase : Dict = eod_token __lowerCamelCase : Any = load_vocab(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Any = self.encoder[space_token] __lowerCamelCase : Dict = self.encoder[line_token] del self.encoder[space_token] del self.encoder[line_token] __lowerCamelCase : Optional[Any] = collections.OrderedDict(sorted(self.encoder.items() ,key=lambda SCREAMING_SNAKE_CASE__: x[1])) __lowerCamelCase : int = {v: k for k, v in self.encoder.items()} __lowerCamelCase : Union[str, Any] = WordpieceTokenizer(vocab=self.encoder ,unk_token=self.unk_token) @property def lowerCAmelCase ( self : List[Any]): return self.encoder[self.bod_token] @property def lowerCAmelCase ( self : Tuple): return self.encoder[self.eod_token] @property def lowerCAmelCase ( self : Union[str, Any]): return self.encoder["\n"] @property def lowerCAmelCase ( self : str): return len(self.encoder) def lowerCAmelCase ( self : str): return dict(self.encoder ,**self.added_tokens_encoder) def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Optional[int]): __lowerCamelCase : Any = [] for x in jieba.cut(SCREAMING_SNAKE_CASE__ ,cut_all=SCREAMING_SNAKE_CASE__): output_tokens.extend(self.wordpiece_tokenizer.tokenize(SCREAMING_SNAKE_CASE__)) return output_tokens def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Tuple ,**SCREAMING_SNAKE_CASE__ : List[Any]): __lowerCamelCase : Tuple = [i for i in token_ids if i >= 0] __lowerCamelCase : str = [ x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id ] return super()._decode(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : List[Any]): return token in self.encoder def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[str]): return "".join(SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[Any]): return self.encoder.get(SCREAMING_SNAKE_CASE__ ,self.encoder.get(self.unk_token)) def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[Any]): return self.decoder.get(SCREAMING_SNAKE_CASE__ ,self.unk_token) def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[str] = None): if os.path.isdir(SCREAMING_SNAKE_CASE__): __lowerCamelCase : Any = os.path.join( SCREAMING_SNAKE_CASE__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) else: __lowerCamelCase : int = (filename_prefix + '-' if filename_prefix else '') + save_directory __lowerCamelCase : Any = 0 if " " in self.encoder: __lowerCamelCase : Any = self.encoder[' '] del self.encoder[" "] if "\n" in self.encoder: __lowerCamelCase : str = self.encoder['\n'] del self.encoder["\n"] __lowerCamelCase : str = collections.OrderedDict(sorted(self.encoder.items() ,key=lambda SCREAMING_SNAKE_CASE__: x[1])) with open(SCREAMING_SNAKE_CASE__ ,'w' ,encoding='utf-8') as writer: for token, token_index in self.encoder.items(): if index != token_index: logger.warning( F"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive." ' Please check that the vocabulary is not corrupted!') __lowerCamelCase : Any = token_index writer.write(token + '\n') index += 1 return (vocab_file,) def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : List[int] = None): if token_ids_a is None: return [self.bos_token_id] + token_ids_a return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ,SCREAMING_SNAKE_CASE__ : bool = False): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE__ ,token_ids_a=SCREAMING_SNAKE_CASE__ ,already_has_special_tokens=SCREAMING_SNAKE_CASE__) if token_ids_a is not None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) return [1] + ([0] * len(SCREAMING_SNAKE_CASE__))
652
0
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device if is_torch_available(): from transformers import AutoModelForSeqaSeqLM, AutoTokenizer @require_torch @require_sentencepiece @require_tokenizers class _UpperCamelCase( unittest.TestCase ): @slow def __lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' __a : str = AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ) __a : Union[str, Any] = AutoTokenizer.from_pretrained('google/mt5-small' ) __a : List[Any] = tokenizer('Hello there' , return_tensors='pt' ).input_ids __a : Dict = tokenizer('Hi I am' , return_tensors='pt' ).input_ids __a : List[Any] = model(input_ids.to(SCREAMING_SNAKE_CASE__ ) , labels=labels.to(SCREAMING_SNAKE_CASE__ ) ).loss __a : Union[str, Any] = -(labels.shape[-1] * loss.item()) __a : str = -84.9_127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
47
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a ={"""configuration_mmbt""": ["""MMBTConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a =["""MMBTForClassification""", """MMBTModel""", """ModalEmbeddings"""] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys a =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
652
0
'''simple docstring''' import argparse import shutil from pathlib import Path from tqdm import tqdm from transformers import AutoTokenizer def A ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any]=10_24 ) -> Any: '''simple docstring''' lowerCAmelCase__ ,lowerCAmelCase__ = [], [] lowerCAmelCase__ = list(zip(UpperCamelCase_ , UpperCamelCase_ ) ) lowerCAmelCase__ ,lowerCAmelCase__ = sorted_examples[0] def is_too_big(UpperCamelCase_ : Union[str, Any] ): return tok(UpperCamelCase_ , return_tensors="pt" ).input_ids.shape[1] > max_tokens for src, tgt in tqdm(sorted_examples[1:] ): lowerCAmelCase__ = new_src + " " + src lowerCAmelCase__ = new_tgt + " " + tgt if is_too_big(UpperCamelCase_ ) or is_too_big(UpperCamelCase_ ): # cant fit, finalize example finished_src.append(UpperCamelCase_ ) finished_tgt.append(UpperCamelCase_ ) lowerCAmelCase__ ,lowerCAmelCase__ = src, tgt else: # can fit, keep adding lowerCAmelCase__ ,lowerCAmelCase__ = cand_src, cand_tgt # cleanup if new_src: assert new_tgt finished_src.append(UpperCamelCase_ ) finished_tgt.append(UpperCamelCase_ ) return finished_src, finished_tgt def A ( UpperCamelCase_ : Tuple , UpperCamelCase_ : Path , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[int] ) -> List[str]: '''simple docstring''' lowerCAmelCase__ = Path(UpperCamelCase_ ) save_path.mkdir(exist_ok=UpperCamelCase_ ) for split in ["train"]: lowerCAmelCase__ ,lowerCAmelCase__ = data_dir / F"""{split}.source""", data_dir / F"""{split}.target""" lowerCAmelCase__ = [x.rstrip() for x in Path(UpperCamelCase_ ).open().readlines()] lowerCAmelCase__ = [x.rstrip() for x in Path(UpperCamelCase_ ).open().readlines()] lowerCAmelCase__ ,lowerCAmelCase__ = pack_examples(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) print(F"""packed {split} split from {len(UpperCamelCase_ )} examples -> {len(UpperCamelCase_ )}.""" ) Path(save_path / F"""{split}.source""" ).open("w" ).write("\n".join(UpperCamelCase_ ) ) Path(save_path / F"""{split}.target""" ).open("w" ).write("\n".join(UpperCamelCase_ ) ) for split in ["val", "test"]: lowerCAmelCase__ ,lowerCAmelCase__ = data_dir / F"""{split}.source""", data_dir / F"""{split}.target""" shutil.copyfile(UpperCamelCase_ , save_path / F"""{split}.source""" ) shutil.copyfile(UpperCamelCase_ , save_path / F"""{split}.target""" ) def A ( ) -> int: '''simple docstring''' lowerCAmelCase__ = argparse.ArgumentParser() parser.add_argument("--tok_name" , type=UpperCamelCase_ , help="like facebook/bart-large-cnn,t5-base, etc." ) parser.add_argument("--max_seq_len" , type=UpperCamelCase_ , default=1_28 ) parser.add_argument("--data_dir" , type=UpperCamelCase_ ) parser.add_argument("--save_path" , type=UpperCamelCase_ ) lowerCAmelCase__ = parser.parse_args() lowerCAmelCase__ = AutoTokenizer.from_pretrained(args.tok_name ) return pack_data_dir(UpperCamelCase_ , Path(args.data_dir ) , args.max_seq_len , args.save_path ) if __name__ == "__main__": packer_cli()
48
import torch from diffusers import UnCLIPScheduler from .test_schedulers import SchedulerCommonTest class A_ ( SCREAMING_SNAKE_CASE ): _UpperCAmelCase : int = (UnCLIPScheduler,) def lowerCAmelCase ( self : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : List[Any]): __lowerCamelCase : Any = { 'num_train_timesteps': 1_0_0_0, 'variance_type': 'fixed_small_log', 'clip_sample': True, 'clip_sample_range': 1.0, 'prediction_type': 'epsilon', } config.update(**SCREAMING_SNAKE_CASE__) return config def lowerCAmelCase ( self : Optional[Any]): for timesteps in [1, 5, 1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Optional[Any]): for variance in ["fixed_small_log", "learned_range"]: self.check_over_configs(variance_type=SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Union[str, Any]): for clip_sample in [True, False]: self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Tuple): for clip_sample_range in [1, 5, 1_0, 2_0]: self.check_over_configs(clip_sample_range=SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : List[Any]): for prediction_type in ["epsilon", "sample"]: self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Dict): for time_step in [0, 5_0_0, 9_9_9]: for prev_timestep in [None, 5, 1_0_0, 2_5_0, 5_0_0, 7_5_0]: if prev_timestep is not None and prev_timestep >= time_step: continue self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ ,prev_timestep=SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Optional[int]): __lowerCamelCase : Optional[int] = self.scheduler_classes[0] __lowerCamelCase : Any = self.get_scheduler_config(variance_type='fixed_small_log') __lowerCamelCase : Dict = scheduler_class(**SCREAMING_SNAKE_CASE__) assert torch.sum(torch.abs(scheduler._get_variance(0) - 1.00_00E-10)) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(4_8_7) - 0.0549625)) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(9_9_9) - 0.9994987)) < 1E-5 def lowerCAmelCase ( self : Any): __lowerCamelCase : Dict = self.scheduler_classes[0] __lowerCamelCase : List[str] = self.get_scheduler_config(variance_type='learned_range') __lowerCamelCase : int = scheduler_class(**SCREAMING_SNAKE_CASE__) __lowerCamelCase : Tuple = 0.5 assert scheduler._get_variance(1 ,predicted_variance=SCREAMING_SNAKE_CASE__) - -10.1712790 < 1E-5 assert scheduler._get_variance(4_8_7 ,predicted_variance=SCREAMING_SNAKE_CASE__) - -5.7998052 < 1E-5 assert scheduler._get_variance(9_9_9 ,predicted_variance=SCREAMING_SNAKE_CASE__) - -0.0010011 < 1E-5 def lowerCAmelCase ( self : List[str]): __lowerCamelCase : str = self.scheduler_classes[0] __lowerCamelCase : str = self.get_scheduler_config() __lowerCamelCase : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[Any] = scheduler.timesteps __lowerCamelCase : Union[str, Any] = self.dummy_model() __lowerCamelCase : Optional[Any] = self.dummy_sample_deter __lowerCamelCase : List[str] = torch.manual_seed(0) for i, t in enumerate(SCREAMING_SNAKE_CASE__): # 1. predict noise residual __lowerCamelCase : int = model(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) # 2. predict previous mean of sample x_t-1 __lowerCamelCase : Optional[int] = scheduler.step(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__).prev_sample __lowerCamelCase : Optional[Any] = pred_prev_sample __lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__)) __lowerCamelCase : Tuple = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__)) assert abs(result_sum.item() - 252.2682495) < 1E-2 assert abs(result_mean.item() - 0.3284743) < 1E-3 def lowerCAmelCase ( self : str): __lowerCamelCase : str = self.scheduler_classes[0] __lowerCamelCase : List[Any] = self.get_scheduler_config() __lowerCamelCase : int = scheduler_class(**SCREAMING_SNAKE_CASE__) scheduler.set_timesteps(2_5) __lowerCamelCase : int = scheduler.timesteps __lowerCamelCase : Tuple = self.dummy_model() __lowerCamelCase : Any = self.dummy_sample_deter __lowerCamelCase : Any = torch.manual_seed(0) for i, t in enumerate(SCREAMING_SNAKE_CASE__): # 1. predict noise residual __lowerCamelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) if i + 1 == timesteps.shape[0]: __lowerCamelCase : Optional[Any] = None else: __lowerCamelCase : Union[str, Any] = timesteps[i + 1] # 2. predict previous mean of sample x_t-1 __lowerCamelCase : int = scheduler.step( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,prev_timestep=SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__).prev_sample __lowerCamelCase : Union[str, Any] = pred_prev_sample __lowerCamelCase : Tuple = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__)) __lowerCamelCase : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__)) assert abs(result_sum.item() - 258.2044983) < 1E-2 assert abs(result_mean.item() - 0.3362038) < 1E-3 def lowerCAmelCase ( self : List[Any]): pass def lowerCAmelCase ( self : Union[str, Any]): pass
652
0
"""simple docstring""" from typing import Dict, Iterable, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging _lowercase : Optional[int] = logging.get_logger(__name__) class _UpperCAmelCase ( _lowerCAmelCase ): a__ : str = ["pixel_values"] def __init__( self : Optional[Any] , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : bool = True , _lowercase : Union[int, float] = 1 / 2_55 , _lowercase : bool = True , _lowercase : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , _lowercase : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **_lowercase : List[Any] , ): super().__init__(**_lowercase ) __UpperCAmelCase = size if size is not None else {'''shortest_edge''': 2_24} __UpperCAmelCase = get_size_dict(_lowercase , default_to_square=_lowercase ) __UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24} __UpperCAmelCase = get_size_dict(_lowercase , param_name='''crop_size''' ) __UpperCAmelCase = do_resize __UpperCAmelCase = size __UpperCAmelCase = resample __UpperCAmelCase = do_center_crop __UpperCAmelCase = crop_size __UpperCAmelCase = do_rescale __UpperCAmelCase = rescale_factor __UpperCAmelCase = do_normalize __UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN __UpperCAmelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD def a ( self : Dict , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : str , ): __UpperCAmelCase = get_size_dict(_lowercase , default_to_square=_lowercase ) # size_dict is a dict with either keys "height" and "width" or "shortest_edge" if "shortest_edge" in size: __UpperCAmelCase = int((2_56 / 2_24) * size['''shortest_edge'''] ) __UpperCAmelCase = get_resize_output_image_size(_lowercase , size=_lowercase , default_to_square=_lowercase ) __UpperCAmelCase = {'''height''': output_size[0], '''width''': output_size[1]} if "height" not in size_dict or "width" not in size_dict: raise ValueError( F'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' ) return resize( _lowercase , size=(size_dict['''height'''], size_dict['''width''']) , resample=_lowercase , data_format=_lowercase , **_lowercase ) def a ( self : Union[str, Any] , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Dict , ): __UpperCAmelCase = get_size_dict(_lowercase ) if "height" not in size or "width" not in size: raise ValueError(F'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' ) return center_crop(_lowercase , size=(size['''height'''], size['''width''']) , data_format=_lowercase , **_lowercase ) def a ( self : str , _lowercase : np.ndarray , _lowercase : Union[int, float] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Union[str, Any] , ): return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase ) def a ( self : Union[str, Any] , _lowercase : np.ndarray , _lowercase : Union[float, List[float]] , _lowercase : Union[float, List[float]] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Dict , ): return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase ) def a ( self : Any , _lowercase : ImageInput , _lowercase : Optional[bool] = None , _lowercase : Optional[Dict[str, int]] = None , _lowercase : PILImageResampling = None , _lowercase : Optional[bool] = None , _lowercase : Optional[Dict[str, int]] = None , _lowercase : Optional[bool] = None , _lowercase : Optional[float] = None , _lowercase : Optional[bool] = None , _lowercase : Optional[Union[float, Iterable[float]]] = None , _lowercase : Optional[Union[float, Iterable[float]]] = None , _lowercase : Optional[TensorType] = None , _lowercase : ChannelDimension = ChannelDimension.FIRST , **_lowercase : Optional[Any] , ): __UpperCAmelCase = do_resize if do_resize is not None else self.do_resize __UpperCAmelCase = resample if resample is not None else self.resample __UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop __UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale __UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor __UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize __UpperCAmelCase = image_mean if image_mean is not None else self.image_mean __UpperCAmelCase = image_std if image_std is not None else self.image_std __UpperCAmelCase = size if size is not None else self.size __UpperCAmelCase = get_size_dict(_lowercase , default_to_square=_lowercase ) __UpperCAmelCase = crop_size if crop_size is not None else self.crop_size __UpperCAmelCase = get_size_dict(_lowercase , param_name='''crop_size''' ) __UpperCAmelCase = make_list_of_images(_lowercase ) if not valid_images(_lowercase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. __UpperCAmelCase = [to_numpy_array(_lowercase ) for image in images] if do_resize: __UpperCAmelCase = [self.resize(_lowercase , _lowercase , _lowercase ) for image in images] if do_center_crop: __UpperCAmelCase = [self.center_crop(_lowercase , _lowercase ) for image in images] if do_rescale: __UpperCAmelCase = [self.rescale(_lowercase , _lowercase ) for image in images] if do_normalize: __UpperCAmelCase = [self.normalize(_lowercase , _lowercase , _lowercase ) for image in images] __UpperCAmelCase = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images] __UpperCAmelCase = {'''pixel_values''': images} return BatchFeature(data=_lowercase , tensor_type=_lowercase )
49
from ...configuration_utils import PretrainedConfig from ...utils import logging a =logging.get_logger(__name__) a ={ """caidas/swin2sr-classicalsr-x2-64""": ( """https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json""" ), } class A_ ( SCREAMING_SNAKE_CASE ): _UpperCAmelCase : Optional[int] = '''swin2sr''' _UpperCAmelCase : Any = { '''hidden_size''': '''embed_dim''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any]=6_4 ,SCREAMING_SNAKE_CASE__ : Optional[int]=1 ,SCREAMING_SNAKE_CASE__ : List[Any]=3 ,SCREAMING_SNAKE_CASE__ : Tuple=1_8_0 ,SCREAMING_SNAKE_CASE__ : Any=[6, 6, 6, 6, 6, 6] ,SCREAMING_SNAKE_CASE__ : int=[6, 6, 6, 6, 6, 6] ,SCREAMING_SNAKE_CASE__ : Optional[Any]=8 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=2.0 ,SCREAMING_SNAKE_CASE__ : Optional[int]=True ,SCREAMING_SNAKE_CASE__ : Any=0.0 ,SCREAMING_SNAKE_CASE__ : Any=0.0 ,SCREAMING_SNAKE_CASE__ : List[str]=0.1 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="gelu" ,SCREAMING_SNAKE_CASE__ : Any=False ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.02 ,SCREAMING_SNAKE_CASE__ : Dict=1E-5 ,SCREAMING_SNAKE_CASE__ : Dict=2 ,SCREAMING_SNAKE_CASE__ : Tuple=1.0 ,SCREAMING_SNAKE_CASE__ : int="1conv" ,SCREAMING_SNAKE_CASE__ : Optional[int]="pixelshuffle" ,**SCREAMING_SNAKE_CASE__ : Optional[int] ,): super().__init__(**SCREAMING_SNAKE_CASE__) __lowerCamelCase : Dict = image_size __lowerCamelCase : str = patch_size __lowerCamelCase : List[Any] = num_channels __lowerCamelCase : Dict = embed_dim __lowerCamelCase : Dict = depths __lowerCamelCase : Any = len(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[Any] = num_heads __lowerCamelCase : Tuple = window_size __lowerCamelCase : Dict = mlp_ratio __lowerCamelCase : str = qkv_bias __lowerCamelCase : Optional[int] = hidden_dropout_prob __lowerCamelCase : Optional[Any] = attention_probs_dropout_prob __lowerCamelCase : List[Any] = drop_path_rate __lowerCamelCase : Optional[int] = hidden_act __lowerCamelCase : Dict = use_absolute_embeddings __lowerCamelCase : Optional[Any] = layer_norm_eps __lowerCamelCase : str = initializer_range __lowerCamelCase : List[Any] = upscale __lowerCamelCase : List[Any] = img_range __lowerCamelCase : List[str] = resi_connection __lowerCamelCase : Union[str, Any] = upsampler
652
0
'''simple docstring''' import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): lowerCamelCase__ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) lowerCamelCase__ = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(_lowerCAmelCase ) lowerCamelCase__ = -1 lowerCamelCase__ = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(_lowerCAmelCase ) lowerCamelCase__ = model.generate(_lowerCAmelCase ,max_new_tokens=10 ,do_sample=_lowerCAmelCase ) lowerCamelCase__ = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: lowerCamelCase__ = TextStreamer(_lowerCAmelCase ) model.generate(_lowerCAmelCase ,max_new_tokens=10 ,do_sample=_lowerCAmelCase ,streamer=_lowerCAmelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer lowerCamelCase__ = cs.out[:-1] self.assertEqual(_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) lowerCamelCase__ = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(_lowerCAmelCase ) lowerCamelCase__ = -1 lowerCamelCase__ = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(_lowerCAmelCase ) lowerCamelCase__ = model.generate(_lowerCAmelCase ,max_new_tokens=10 ,do_sample=_lowerCAmelCase ) lowerCamelCase__ = tokenizer.decode(greedy_ids[0] ) lowerCamelCase__ = TextIteratorStreamer(_lowerCAmelCase ) lowerCamelCase__ = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer} lowerCamelCase__ = Thread(target=model.generate ,kwargs=_lowerCAmelCase ) thread.start() lowerCamelCase__ = """""" for new_text in streamer: streamer_text += new_text self.assertEqual(_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) lowerCamelCase__ = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(_lowerCAmelCase ) lowerCamelCase__ = -1 lowerCamelCase__ = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(_lowerCAmelCase ) lowerCamelCase__ = model.generate(_lowerCAmelCase ,max_new_tokens=10 ,do_sample=_lowerCAmelCase ) lowerCamelCase__ = greedy_ids[:, input_ids.shape[1] :] lowerCamelCase__ = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: lowerCamelCase__ = TextStreamer(_lowerCAmelCase ,skip_prompt=_lowerCAmelCase ) model.generate(_lowerCAmelCase ,max_new_tokens=10 ,do_sample=_lowerCAmelCase ,streamer=_lowerCAmelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer lowerCamelCase__ = cs.out[:-1] self.assertEqual(_lowerCAmelCase ,_lowerCAmelCase ) def UpperCamelCase_ ( self ): # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them lowerCamelCase__ = AutoTokenizer.from_pretrained("""distilgpt2""" ) lowerCamelCase__ = AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(_lowerCAmelCase ) lowerCamelCase__ = -1 lowerCamelCase__ = torch.ones((1, 5) ,device=_lowerCAmelCase ).long() * model.config.bos_token_id with CaptureStdout() as cs: lowerCamelCase__ = TextStreamer(_lowerCAmelCase ,skip_special_tokens=_lowerCAmelCase ) model.generate(_lowerCAmelCase ,max_new_tokens=1 ,do_sample=_lowerCAmelCase ,streamer=_lowerCAmelCase ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token lowerCamelCase__ = cs.out[:-1] # Remove the final "\n" lowerCamelCase__ = tokenizer(_lowerCAmelCase ,return_tensors="""pt""" ) self.assertEqual(streamer_text_tokenized.input_ids.shape ,(1, 1) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) lowerCamelCase__ = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(_lowerCAmelCase ) lowerCamelCase__ = -1 lowerCamelCase__ = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(_lowerCAmelCase ) lowerCamelCase__ = TextIteratorStreamer(_lowerCAmelCase ,timeout=0.001 ) lowerCamelCase__ = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer} lowerCamelCase__ = Thread(target=model.generate ,kwargs=_lowerCAmelCase ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(_lowerCAmelCase ): lowerCamelCase__ = """""" for new_text in streamer: streamer_text += new_text
50
import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=True , lowerCamelCase__="pt" ) -> Dict: __lowerCamelCase : Any = {'add_prefix_space': True} if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and not line.startswith(' ' ) else {} __lowerCamelCase : int = padding_side return tokenizer( [line] , max_length=lowerCamelCase__ , padding='max_length' if pad_to_max_length else None , truncation=lowerCamelCase__ , return_tensors=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , **lowerCamelCase__ , ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , ) -> List[str]: __lowerCamelCase : List[str] = input_ids.ne(lowerCamelCase__ ).any(dim=0 ) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class A_ ( SCREAMING_SNAKE_CASE ): def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int]="train" ,SCREAMING_SNAKE_CASE__ : Tuple=None ,SCREAMING_SNAKE_CASE__ : Dict=None ,SCREAMING_SNAKE_CASE__ : int=None ,SCREAMING_SNAKE_CASE__ : List[Any]="" ,): super().__init__() __lowerCamelCase : Optional[Any] = Path(SCREAMING_SNAKE_CASE__).joinpath(type_path + '.source') __lowerCamelCase : Any = Path(SCREAMING_SNAKE_CASE__).joinpath(type_path + '.target') __lowerCamelCase : List[Any] = self.get_char_lens(self.src_file) __lowerCamelCase : List[Any] = max_source_length __lowerCamelCase : List[str] = max_target_length assert min(self.src_lens) > 0, F"found empty line in {self.src_file}" __lowerCamelCase : Any = tokenizer __lowerCamelCase : Optional[int] = prefix if n_obs is not None: __lowerCamelCase : Dict = self.src_lens[:n_obs] __lowerCamelCase : str = src_lang __lowerCamelCase : Any = tgt_lang def __len__( self : Tuple): return len(self.src_lens) def __getitem__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : str): __lowerCamelCase : Dict = index + 1 # linecache starts at 1 __lowerCamelCase : Any = self.prefix + linecache.getline(str(self.src_file) ,SCREAMING_SNAKE_CASE__).rstrip('\n') __lowerCamelCase : int = linecache.getline(str(self.tgt_file) ,SCREAMING_SNAKE_CASE__).rstrip('\n') assert source_line, F"empty source line for index {index}" assert tgt_line, F"empty tgt line for index {index}" # Need to add eos token manually for T5 if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right __lowerCamelCase : Dict = ( self.tokenizer.question_encoder if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__) else self.tokenizer ) __lowerCamelCase : Optional[int] = self.tokenizer.generator if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__) else self.tokenizer __lowerCamelCase : List[str] = encode_line(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,self.max_source_length ,'right') __lowerCamelCase : Any = encode_line(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,self.max_target_length ,'right') __lowerCamelCase : List[Any] = source_inputs['input_ids'].squeeze() __lowerCamelCase : Tuple = target_inputs['input_ids'].squeeze() __lowerCamelCase : Tuple = source_inputs['attention_mask'].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def lowerCAmelCase ( SCREAMING_SNAKE_CASE__ : int): return [len(SCREAMING_SNAKE_CASE__) for x in Path(SCREAMING_SNAKE_CASE__).open().readlines()] def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : List[Any]): __lowerCamelCase : Optional[Any] = torch.stack([x['input_ids'] for x in batch]) __lowerCamelCase : Any = torch.stack([x['attention_mask'] for x in batch]) __lowerCamelCase : Union[str, Any] = torch.stack([x['decoder_input_ids'] for x in batch]) __lowerCamelCase : Optional[int] = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__) else self.tokenizer.pad_token_id ) __lowerCamelCase : int = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__) else self.tokenizer.pad_token_id ) __lowerCamelCase : int = trim_batch(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) __lowerCamelCase , __lowerCamelCase : int = trim_batch(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__) __lowerCamelCase : Union[str, Any] = { 'input_ids': source_ids, 'attention_mask': source_mask, 'decoder_input_ids': y, } return batch a =getLogger(__name__) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Any: return list(itertools.chain.from_iterable(lowerCamelCase__ ) ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> None: __lowerCamelCase : str = get_git_info() save_json(lowerCamelCase__ , os.path.join(lowerCamelCase__ , 'git_log.json' ) ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=4 , **lowerCamelCase__ ) -> List[str]: with open(lowerCamelCase__ , 'w' ) as f: json.dump(lowerCamelCase__ , lowerCamelCase__ , indent=lowerCamelCase__ , **lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple: with open(lowerCamelCase__ ) as f: return json.load(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( ) -> List[str]: __lowerCamelCase : str = git.Repo(search_parent_directories=lowerCamelCase__ ) __lowerCamelCase : Any = { 'repo_id': str(lowerCamelCase__ ), 'repo_sha': str(repo.head.object.hexsha ), 'repo_branch': str(repo.active_branch ), 'hostname': str(socket.gethostname() ), } return repo_infos def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List: return list(map(lowerCamelCase__ , lowerCamelCase__ ) ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]: with open(lowerCamelCase__ , 'wb' ) as f: return pickle.dump(lowerCamelCase__ , lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str: def remove_articles(lowerCamelCase__ ): return re.sub(R'\b(a|an|the)\b' , ' ' , lowerCamelCase__ ) def white_space_fix(lowerCamelCase__ ): return " ".join(text.split() ) def remove_punc(lowerCamelCase__ ): __lowerCamelCase : Dict = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(lowerCamelCase__ ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase__ ) ) ) ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> int: __lowerCamelCase : str = normalize_answer(lowerCamelCase__ ).split() __lowerCamelCase : Optional[int] = normalize_answer(lowerCamelCase__ ).split() __lowerCamelCase : Union[str, Any] = Counter(lowerCamelCase__ ) & Counter(lowerCamelCase__ ) __lowerCamelCase : Any = sum(common.values() ) if num_same == 0: return 0 __lowerCamelCase : List[Any] = 1.0 * num_same / len(lowerCamelCase__ ) __lowerCamelCase : int = 1.0 * num_same / len(lowerCamelCase__ ) __lowerCamelCase : Optional[Any] = (2 * precision * recall) / (precision + recall) return fa def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Dict: return normalize_answer(lowerCamelCase__ ) == normalize_answer(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Dict: assert len(lowerCamelCase__ ) == len(lowerCamelCase__ ) __lowerCamelCase : Dict = 0 for hypo, pred in zip(lowerCamelCase__ , lowerCamelCase__ ): em += exact_match_score(lowerCamelCase__ , lowerCamelCase__ ) if len(lowerCamelCase__ ) > 0: em /= len(lowerCamelCase__ ) return {"em": em} def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple: return model_prefix.startswith('rag' ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]: __lowerCamelCase : Optional[int] = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead __lowerCamelCase : List[str] = 'dropout_rate' for p in extra_params: if getattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): if not hasattr(lowerCamelCase__ , lowerCamelCase__ ) and not hasattr(lowerCamelCase__ , equivalent_param[p] ): logger.info('config doesn\'t have a `{}` attribute'.format(lowerCamelCase__ ) ) delattr(lowerCamelCase__ , lowerCamelCase__ ) continue __lowerCamelCase : List[Any] = p if hasattr(lowerCamelCase__ , lowerCamelCase__ ) else equivalent_param[p] setattr(lowerCamelCase__ , lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) ) delattr(lowerCamelCase__ , lowerCamelCase__ ) return hparams, config
652
0
'''simple docstring''' import math import time from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class lowerCAmelCase__ ( UpperCAmelCase_ ): '''simple docstring''' def __init__( self : Optional[int] , *a__ : List[Any] , a__ : Tuple=None , a__ : List[str]=None , **a__ : Any ): super().__init__(*a__ , **a__ ) UpperCAmelCase = eval_examples UpperCAmelCase = post_process_function def __snake_case ( self : Any , a__ : Any=None , a__ : Optional[Any]=None , a__ : Dict=None , a__ : str = "eval" ): UpperCAmelCase = self.eval_dataset if eval_dataset is None else eval_dataset UpperCAmelCase = self.get_eval_dataloader(a__ ) UpperCAmelCase = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. UpperCAmelCase = self.compute_metrics UpperCAmelCase = None UpperCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop UpperCAmelCase = time.time() try: UpperCAmelCase = eval_loop( a__ , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=a__ , metric_key_prefix=a__ , ) finally: UpperCAmelCase = compute_metrics UpperCAmelCase = self.args.eval_batch_size * self.args.world_size if f"{metric_key_prefix}_jit_compilation_time" in output.metrics: start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"] output.metrics.update( speed_metrics( a__ , a__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default UpperCAmelCase = self.post_process_function(a__ , a__ , output.predictions ) UpperCAmelCase = self.compute_metrics(a__ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"{metric_key_prefix}_" ): UpperCAmelCase = metrics.pop(a__ ) metrics.update(output.metrics ) else: UpperCAmelCase = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(a__ ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) UpperCAmelCase = self.callback_handler.on_evaluate(self.args , self.state , self.control , a__ ) return metrics def __snake_case ( self : str , a__ : Optional[int] , a__ : List[Any] , a__ : List[str]=None , a__ : str = "test" ): UpperCAmelCase = self.get_test_dataloader(a__ ) # Temporarily disable metric computation, we will do it in the loop here. UpperCAmelCase = self.compute_metrics UpperCAmelCase = None UpperCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop UpperCAmelCase = time.time() try: UpperCAmelCase = eval_loop( a__ , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=a__ , metric_key_prefix=a__ , ) finally: UpperCAmelCase = compute_metrics UpperCAmelCase = self.args.eval_batch_size * self.args.world_size if f"{metric_key_prefix}_jit_compilation_time" in output.metrics: start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"] output.metrics.update( speed_metrics( a__ , a__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output UpperCAmelCase = self.post_process_function(a__ , a__ , output.predictions , '''predict''' ) UpperCAmelCase = self.compute_metrics(a__ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"{metric_key_prefix}_" ): UpperCAmelCase = metrics.pop(a__ ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=a__ )
51
from typing import Optional, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_mobilenet_va import MobileNetVaConfig a =logging.get_logger(__name__) # General docstring a ="""MobileNetV1Config""" # Base docstring a ="""google/mobilenet_v1_1.0_224""" a =[1, 1024, 7, 7] # Image classification docstring a ="""google/mobilenet_v1_1.0_224""" a ="""tabby, tabby cat""" a =[ """google/mobilenet_v1_1.0_224""", """google/mobilenet_v1_0.75_192""", # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 ] def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ) -> str: __lowerCamelCase : str = {} if isinstance(lowerCamelCase__ , lowerCamelCase__ ): __lowerCamelCase : int = model.mobilenet_va else: __lowerCamelCase : List[str] = model __lowerCamelCase : List[Any] = 'MobilenetV1/Conv2d_0/' __lowerCamelCase : List[Any] = backbone.conv_stem.convolution.weight __lowerCamelCase : List[str] = backbone.conv_stem.normalization.bias __lowerCamelCase : Tuple = backbone.conv_stem.normalization.weight __lowerCamelCase : Union[str, Any] = backbone.conv_stem.normalization.running_mean __lowerCamelCase : Optional[int] = backbone.conv_stem.normalization.running_var for i in range(1_3 ): __lowerCamelCase : Any = i + 1 __lowerCamelCase : Union[str, Any] = i * 2 __lowerCamelCase : Optional[Any] = backbone.layer[pt_index] __lowerCamelCase : Optional[int] = F"MobilenetV1/Conv2d_{tf_index}_depthwise/" __lowerCamelCase : Tuple = pointer.convolution.weight __lowerCamelCase : Optional[Any] = pointer.normalization.bias __lowerCamelCase : Union[str, Any] = pointer.normalization.weight __lowerCamelCase : List[str] = pointer.normalization.running_mean __lowerCamelCase : Union[str, Any] = pointer.normalization.running_var __lowerCamelCase : int = backbone.layer[pt_index + 1] __lowerCamelCase : Union[str, Any] = F"MobilenetV1/Conv2d_{tf_index}_pointwise/" __lowerCamelCase : Optional[Any] = pointer.convolution.weight __lowerCamelCase : Any = pointer.normalization.bias __lowerCamelCase : str = pointer.normalization.weight __lowerCamelCase : Dict = pointer.normalization.running_mean __lowerCamelCase : List[str] = pointer.normalization.running_var if isinstance(lowerCamelCase__ , lowerCamelCase__ ): __lowerCamelCase : Union[str, Any] = 'MobilenetV1/Logits/Conv2d_1c_1x1/' __lowerCamelCase : Any = model.classifier.weight __lowerCamelCase : int = model.classifier.bias return tf_to_pt_map def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]: try: import numpy as np import tensorflow as tf except ImportError: logger.error( 'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see ' 'https://www.tensorflow.org/install/ for installation instructions.' ) raise # Load weights from TF model __lowerCamelCase : List[str] = tf.train.list_variables(lowerCamelCase__ ) __lowerCamelCase : List[str] = {} for name, shape in init_vars: logger.info(F"Loading TF weight {name} with shape {shape}" ) __lowerCamelCase : Any = tf.train.load_variable(lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase : List[Any] = array # Build TF to PyTorch weights loading map __lowerCamelCase : Tuple = _build_tf_to_pytorch_map(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) for name, pointer in tf_to_pt_map.items(): logger.info(F"Importing {name}" ) if name not in tf_weights: logger.info(F"{name} not in tf pre-trained weights, skipping" ) continue __lowerCamelCase : Optional[int] = tf_weights[name] if "depthwise_weights" in name: logger.info('Transposing depthwise' ) __lowerCamelCase : List[str] = np.transpose(lowerCamelCase__ , (2, 3, 0, 1) ) elif "weights" in name: logger.info('Transposing' ) if len(pointer.shape ) == 2: # copying into linear layer __lowerCamelCase : Any = array.squeeze().transpose() else: __lowerCamelCase : Tuple = np.transpose(lowerCamelCase__ , (3, 2, 0, 1) ) if pointer.shape != array.shape: raise ValueError(F"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" ) logger.info(F"Initialize PyTorch weight {name} {array.shape}" ) __lowerCamelCase : Optional[Any] = torch.from_numpy(lowerCamelCase__ ) tf_weights.pop(lowerCamelCase__ , lowerCamelCase__ ) tf_weights.pop(name + '/RMSProp' , lowerCamelCase__ ) tf_weights.pop(name + '/RMSProp_1' , lowerCamelCase__ ) tf_weights.pop(name + '/ExponentialMovingAverage' , lowerCamelCase__ ) logger.info(F"Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}" ) return model def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> torch.Tensor: __lowerCamelCase , __lowerCamelCase : int = features.shape[-2:] __lowerCamelCase , __lowerCamelCase : List[str] = conv_layer.stride __lowerCamelCase , __lowerCamelCase : str = conv_layer.kernel_size if in_height % stride_height == 0: __lowerCamelCase : Optional[int] = max(kernel_height - stride_height , 0 ) else: __lowerCamelCase : Union[str, Any] = max(kernel_height - (in_height % stride_height) , 0 ) if in_width % stride_width == 0: __lowerCamelCase : List[str] = max(kernel_width - stride_width , 0 ) else: __lowerCamelCase : List[str] = max(kernel_width - (in_width % stride_width) , 0 ) __lowerCamelCase : List[str] = pad_along_width // 2 __lowerCamelCase : Optional[int] = pad_along_width - pad_left __lowerCamelCase : Any = pad_along_height // 2 __lowerCamelCase : List[Any] = pad_along_height - pad_top __lowerCamelCase : Union[str, Any] = (pad_left, pad_right, pad_top, pad_bottom) return nn.functional.pad(lowerCamelCase__ , lowerCamelCase__ , 'constant' , 0.0 ) class A_ ( nn.Module ): def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : MobileNetVaConfig ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Optional[int] = 1 ,SCREAMING_SNAKE_CASE__ : Optional[int] = 1 ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : Optional[bool] = True ,SCREAMING_SNAKE_CASE__ : Optional[bool or str] = True ,): super().__init__() __lowerCamelCase : Dict = config if in_channels % groups != 0: raise ValueError(F"Input channels ({in_channels}) are not divisible by {groups} groups.") if out_channels % groups != 0: raise ValueError(F"Output channels ({out_channels}) are not divisible by {groups} groups.") __lowerCamelCase : Optional[Any] = 0 if config.tf_padding else int((kernel_size - 1) / 2) __lowerCamelCase : Optional[int] = nn.Convad( in_channels=SCREAMING_SNAKE_CASE__ ,out_channels=SCREAMING_SNAKE_CASE__ ,kernel_size=SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,padding=SCREAMING_SNAKE_CASE__ ,groups=SCREAMING_SNAKE_CASE__ ,bias=SCREAMING_SNAKE_CASE__ ,padding_mode='zeros' ,) if use_normalization: __lowerCamelCase : Optional[int] = nn.BatchNormad( num_features=SCREAMING_SNAKE_CASE__ ,eps=config.layer_norm_eps ,momentum=0.9997 ,affine=SCREAMING_SNAKE_CASE__ ,track_running_stats=SCREAMING_SNAKE_CASE__ ,) else: __lowerCamelCase : Dict = None if use_activation: if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__): __lowerCamelCase : Dict = ACTaFN[use_activation] elif isinstance(config.hidden_act ,SCREAMING_SNAKE_CASE__): __lowerCamelCase : str = ACTaFN[config.hidden_act] else: __lowerCamelCase : List[str] = config.hidden_act else: __lowerCamelCase : List[str] = None def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : torch.Tensor): if self.config.tf_padding: __lowerCamelCase : Any = apply_tf_padding(SCREAMING_SNAKE_CASE__ ,self.convolution) __lowerCamelCase : Optional[int] = self.convolution(SCREAMING_SNAKE_CASE__) if self.normalization is not None: __lowerCamelCase : Dict = self.normalization(SCREAMING_SNAKE_CASE__) if self.activation is not None: __lowerCamelCase : List[str] = self.activation(SCREAMING_SNAKE_CASE__) return features class A_ ( SCREAMING_SNAKE_CASE ): _UpperCAmelCase : Union[str, Any] = MobileNetVaConfig _UpperCAmelCase : List[str] = load_tf_weights_in_mobilenet_va _UpperCAmelCase : List[str] = '''mobilenet_v1''' _UpperCAmelCase : Any = '''pixel_values''' _UpperCAmelCase : int = False def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Union[nn.Linear, nn.Convad]): if isinstance(SCREAMING_SNAKE_CASE__ ,(nn.Linear, nn.Convad)): module.weight.data.normal_(mean=0.0 ,std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(SCREAMING_SNAKE_CASE__ ,nn.BatchNormad): module.bias.data.zero_() module.weight.data.fill_(1.0) a =r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ a =r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`MobileNetV1ImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( '''The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.''' , SCREAMING_SNAKE_CASE , ) class A_ ( SCREAMING_SNAKE_CASE ): def __init__( self : int ,SCREAMING_SNAKE_CASE__ : MobileNetVaConfig ,SCREAMING_SNAKE_CASE__ : bool = True): super().__init__(SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[str] = config __lowerCamelCase : Optional[int] = 3_2 __lowerCamelCase : List[str] = max(int(depth * config.depth_multiplier) ,config.min_depth) __lowerCamelCase : Optional[Any] = MobileNetVaConvLayer( SCREAMING_SNAKE_CASE__ ,in_channels=config.num_channels ,out_channels=SCREAMING_SNAKE_CASE__ ,kernel_size=3 ,stride=2 ,) __lowerCamelCase : Any = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1] __lowerCamelCase : str = nn.ModuleList() for i in range(1_3): __lowerCamelCase : str = out_channels if strides[i] == 2 or i == 0: depth *= 2 __lowerCamelCase : str = max(int(depth * config.depth_multiplier) ,config.min_depth) self.layer.append( MobileNetVaConvLayer( SCREAMING_SNAKE_CASE__ ,in_channels=SCREAMING_SNAKE_CASE__ ,out_channels=SCREAMING_SNAKE_CASE__ ,kernel_size=3 ,stride=strides[i] ,groups=SCREAMING_SNAKE_CASE__ ,)) self.layer.append( MobileNetVaConvLayer( SCREAMING_SNAKE_CASE__ ,in_channels=SCREAMING_SNAKE_CASE__ ,out_channels=SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,)) __lowerCamelCase : Optional[int] = nn.AdaptiveAvgPoolad((1, 1)) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Dict): raise NotImplementedError @add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC ,output_type=SCREAMING_SNAKE_CASE__ ,config_class=_CONFIG_FOR_DOC ,modality='vision' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,) def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,): __lowerCamelCase : int = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __lowerCamelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('You have to specify pixel_values') __lowerCamelCase : Optional[Any] = self.conv_stem(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[Any] = () if output_hidden_states else None for i, layer_module in enumerate(self.layer): __lowerCamelCase : Dict = layer_module(SCREAMING_SNAKE_CASE__) if output_hidden_states: __lowerCamelCase : Any = all_hidden_states + (hidden_states,) __lowerCamelCase : Optional[Any] = hidden_states if self.pooler is not None: __lowerCamelCase : Tuple = torch.flatten(self.pooler(SCREAMING_SNAKE_CASE__) ,start_dim=1) else: __lowerCamelCase : List[str] = None if not return_dict: return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None) return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=SCREAMING_SNAKE_CASE__ ,pooler_output=SCREAMING_SNAKE_CASE__ ,hidden_states=SCREAMING_SNAKE_CASE__ ,) @add_start_docstrings( ''' MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. ''' , SCREAMING_SNAKE_CASE , ) class A_ ( SCREAMING_SNAKE_CASE ): def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : MobileNetVaConfig): super().__init__(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[Any] = config.num_labels __lowerCamelCase : Optional[Any] = MobileNetVaModel(SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[str] = self.mobilenet_va.layer[-1].convolution.out_channels # Classifier head __lowerCamelCase : Any = nn.Dropout(config.classifier_dropout_prob ,inplace=SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[Any] = nn.Linear(SCREAMING_SNAKE_CASE__ ,config.num_labels) if config.num_labels > 0 else nn.Identity() # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=SCREAMING_SNAKE_CASE__ ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,) def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,): __lowerCamelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict __lowerCamelCase : Optional[int] = self.mobilenet_va(SCREAMING_SNAKE_CASE__ ,output_hidden_states=SCREAMING_SNAKE_CASE__ ,return_dict=SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[Any] = outputs.pooler_output if return_dict else outputs[1] __lowerCamelCase : List[str] = self.classifier(self.dropout(SCREAMING_SNAKE_CASE__)) __lowerCamelCase : List[str] = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: __lowerCamelCase : Dict = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): __lowerCamelCase : int = 'single_label_classification' else: __lowerCamelCase : Tuple = 'multi_label_classification' if self.config.problem_type == "regression": __lowerCamelCase : Tuple = MSELoss() if self.num_labels == 1: __lowerCamelCase : int = loss_fct(logits.squeeze() ,labels.squeeze()) else: __lowerCamelCase : Union[str, Any] = loss_fct(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) elif self.config.problem_type == "single_label_classification": __lowerCamelCase : List[str] = CrossEntropyLoss() __lowerCamelCase : List[str] = loss_fct(logits.view(-1 ,self.num_labels) ,labels.view(-1)) elif self.config.problem_type == "multi_label_classification": __lowerCamelCase : int = BCEWithLogitsLoss() __lowerCamelCase : int = loss_fct(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) if not return_dict: __lowerCamelCase : List[str] = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention( loss=SCREAMING_SNAKE_CASE__ ,logits=SCREAMING_SNAKE_CASE__ ,hidden_states=outputs.hidden_states ,)
652
0
"""simple docstring""" import itertools import string from collections.abc import Generator, Iterable def __A ( a_ :Iterable[str] , a_ :int) -> Generator[tuple[str, ...], None, None]: __a : List[str] = iter(a_) while True: __a : List[Any] = tuple(itertools.islice(a_ , a_)) if not chunk: return yield chunk def __A ( a_ :str) -> str: __a : int = ''''''.join([c.upper() for c in dirty if c in string.ascii_letters]) __a : Tuple = '''''' if len(a_) < 2: return dirty for i in range(len(a_) - 1): clean += dirty[i] if dirty[i] == dirty[i + 1]: clean += "X" clean += dirty[-1] if len(a_) & 1: clean += "X" return clean def __A ( a_ :str) -> list[str]: # I and J are used interchangeably to allow # us to use a 5x5 table (25 letters) __a : Optional[Any] = '''ABCDEFGHIKLMNOPQRSTUVWXYZ''' # we're using a list instead of a '2d' array because it makes the math # for setting up the table and doing the actual encoding/decoding simpler __a : Tuple = [] # copy key chars into the table if they are in `alphabet` ignoring duplicates for char in key.upper(): if char not in table and char in alphabet: table.append(a_) # fill the rest of the table in with the remaining alphabet chars for char in alphabet: if char not in table: table.append(a_) return table def __A ( a_ :str , a_ :str) -> str: __a : Optional[Any] = generate_table(a_) __a : Optional[int] = prepare_input(a_) __a : List[str] = '''''' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(a_ , 2): __a , __a : Optional[Any] = divmod(table.index(a_) , 5) __a , __a : Tuple = divmod(table.index(a_) , 5) if rowa == rowa: ciphertext += table[rowa * 5 + (cola + 1) % 5] ciphertext += table[rowa * 5 + (cola + 1) % 5] elif cola == cola: ciphertext += table[((rowa + 1) % 5) * 5 + cola] ciphertext += table[((rowa + 1) % 5) * 5 + cola] else: # rectangle ciphertext += table[rowa * 5 + cola] ciphertext += table[rowa * 5 + cola] return ciphertext def __A ( a_ :str , a_ :str) -> str: __a : Any = generate_table(a_) __a : Any = '''''' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(a_ , 2): __a , __a : Any = divmod(table.index(a_) , 5) __a , __a : Union[str, Any] = divmod(table.index(a_) , 5) if rowa == rowa: plaintext += table[rowa * 5 + (cola - 1) % 5] plaintext += table[rowa * 5 + (cola - 1) % 5] elif cola == cola: plaintext += table[((rowa - 1) % 5) * 5 + cola] plaintext += table[((rowa - 1) % 5) * 5 + cola] else: # rectangle plaintext += table[rowa * 5 + cola] plaintext += table[rowa * 5 + cola] return plaintext
52
from pathlib import Path import cva import numpy as np from matplotlib import pyplot as plt def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray: __lowerCamelCase : Optional[Any] = cva.getAffineTransform(lowerCamelCase__ , lowerCamelCase__ ) return cva.warpAffine(lowerCamelCase__ , lowerCamelCase__ , (rows, cols) ) if __name__ == "__main__": # read original image a =cva.imread( str(Path(__file__).resolve().parent.parent / """image_data""" / """lena.jpg""") ) # turn image in gray scale value a =cva.cvtColor(image, cva.COLOR_BGR2GRAY) # get image shape a , a =gray_img.shape # set different points to rotate image a =np.array([[50, 50], [200, 50], [50, 200]], np.floataa) a =np.array([[10, 100], [200, 50], [100, 250]], np.floataa) a =np.array([[50, 50], [150, 50], [120, 200]], np.floataa) a =np.array([[10, 100], [80, 50], [180, 250]], np.floataa) # add all rotated images in a list a =[ gray_img, get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), ] # plot different image rotations a =plt.figure(1) a =["""Original""", """Rotation 1""", """Rotation 2""", """Rotation 3"""] for i, image in enumerate(images): plt.subplot(2, 2, i + 1), plt.imshow(image, """gray""") plt.title(titles[i]) plt.axis("""off""") plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95) plt.show()
652
0
import os import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers.models.realm.configuration_realm import RealmConfig from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer class _UpperCAmelCase ( _UpperCamelCase ): """simple docstring""" def lowercase ( self : Any ) -> Tuple: __lowerCAmelCase = tempfile.mkdtemp() __lowerCAmelCase = 5 # Realm tok __lowerCAmelCase = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'test', 'question', 'this', 'is', 'the', 'first', 'second', 'third', 'fourth', 'fifth', 'record', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] __lowerCAmelCase = os.path.join(self.tmpdirname , 'realm_tokenizer' ) os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ ) __lowerCAmelCase = os.path.join(lowerCAmelCase_ , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) __lowerCAmelCase = os.path.join(self.tmpdirname , 'realm_block_records' ) os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ ) def lowercase ( self : Optional[int] ) -> RealmTokenizer: return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'realm_tokenizer' ) ) def lowercase ( self : List[str] ) -> str: shutil.rmtree(self.tmpdirname ) def lowercase ( self : List[str] ) -> int: __lowerCAmelCase = RealmConfig(num_block_records=self.num_block_records ) return config def lowercase ( self : str ) -> str: __lowerCAmelCase = Dataset.from_dict( { 'id': ['0', '1'], 'question': ['foo', 'bar'], 'answers': [['Foo', 'Bar'], ['Bar']], } ) return dataset def lowercase ( self : List[Any] ) -> Dict: __lowerCAmelCase = np.array( [ B'This is the first record', B'This is the second record', B'This is the third record', B'This is the fourth record', B'This is the fifth record', B'This is a longer longer longer record', ] , dtype=lowerCAmelCase_ , ) return block_records def lowercase ( self : Tuple ) -> List[Any]: __lowerCAmelCase = RealmRetriever( block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , ) return retriever def lowercase ( self : Tuple ) -> Union[str, Any]: __lowerCAmelCase = self.get_config() __lowerCAmelCase = self.get_dummy_retriever() __lowerCAmelCase = retriever.tokenizer __lowerCAmelCase = np.array([0, 3] , dtype='long' ) __lowerCAmelCase = tokenizer(['Test question'] ).input_ids __lowerCAmelCase = tokenizer( ['the fourth'] , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , ).input_ids __lowerCAmelCase = config.reader_seq_len __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = retriever( lowerCAmelCase_ , lowerCAmelCase_ , answer_ids=lowerCAmelCase_ , max_length=lowerCAmelCase_ , return_tensors='np' ) self.assertEqual(len(lowerCAmelCase_ ) , 2 ) self.assertEqual(len(lowerCAmelCase_ ) , 2 ) self.assertEqual(len(lowerCAmelCase_ ) , 2 ) self.assertEqual(concat_inputs.input_ids.shape , (2, 1_0) ) self.assertEqual(concat_inputs.attention_mask.shape , (2, 1_0) ) self.assertEqual(concat_inputs.token_type_ids.shape , (2, 1_0) ) self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 1_0) ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'first', 'record', '[SEP]'] , ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'fourth', 'record', '[SEP]'] , ) def lowercase ( self : Optional[int] ) -> Optional[int]: __lowerCAmelCase = self.get_config() __lowerCAmelCase = self.get_dummy_retriever() __lowerCAmelCase = retriever.tokenizer __lowerCAmelCase = np.array([0, 3, 5] , dtype='long' ) __lowerCAmelCase = tokenizer(['Test question'] ).input_ids __lowerCAmelCase = tokenizer( ['the fourth', 'longer longer'] , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , ).input_ids __lowerCAmelCase = config.reader_seq_len __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = retriever( lowerCAmelCase_ , lowerCAmelCase_ , answer_ids=lowerCAmelCase_ , max_length=lowerCAmelCase_ , return_tensors='np' ) self.assertEqual([False, True, True] , lowerCAmelCase_ ) self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , lowerCAmelCase_ ) self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , lowerCAmelCase_ ) def lowercase ( self : Union[str, Any] ) -> List[Any]: __lowerCAmelCase = self.get_dummy_retriever() retriever.save_pretrained(os.path.join(self.tmpdirname , 'realm_block_records' ) ) # Test local path __lowerCAmelCase = retriever.from_pretrained(os.path.join(self.tmpdirname , 'realm_block_records' ) ) self.assertEqual(retriever.block_records[0] , B'This is the first record' ) # Test mocked remote path with patch('transformers.models.realm.retrieval_realm.hf_hub_download' ) as mock_hf_hub_download: __lowerCAmelCase = os.path.join( os.path.join(self.tmpdirname , 'realm_block_records' ) , _REALM_BLOCK_RECORDS_FILENAME ) __lowerCAmelCase = RealmRetriever.from_pretrained('google/realm-cc-news-pretrained-openqa' ) self.assertEqual(retriever.block_records[0] , B'This is the first record' )
53
import math def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> int: if not isinstance(lowerCamelCase__ , lowerCamelCase__ ): __lowerCamelCase : List[str] = F"Input value of [number={number}] must be an integer" raise TypeError(lowerCamelCase__ ) if number < 1: __lowerCamelCase : int = F"Input value of [number={number}] must be > 0" raise ValueError(lowerCamelCase__ ) elif number == 1: return 3 elif number == 2: return 5 else: __lowerCamelCase : Any = int(math.log(number // 3 , 2 ) ) + 2 __lowerCamelCase : List[Any] = [3, 5] __lowerCamelCase : Union[str, Any] = 2 __lowerCamelCase : List[str] = 3 for block in range(1 , lowerCamelCase__ ): for _ in range(lowerCamelCase__ ): proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] ) proth_index += 1 increment *= 2 return proth_list[number - 1] if __name__ == "__main__": import doctest doctest.testmod() for number in range(11): a =0 try: a =proth(number) except ValueError: print(F"""ValueError: there is no {number}th Proth number""") continue print(F"""The {number}th Proth number: {value}""")
652
0
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from .config import config_command_parser from .config_args import default_config_file, load_config_from_file # noqa: F401 from .default import default_command_parser from .update import update_command_parser def a__ ( lowercase__=None ): '''simple docstring''' UpperCAmelCase_ =argparse.ArgumentParser(add_help=lowercase__ , allow_abbrev=lowercase__ ) # The main config parser UpperCAmelCase_ =config_command_parser(lowercase__ ) # The subparser to add commands to UpperCAmelCase_ =config_parser.add_subparsers(title="subcommands" , dest="subcommand" ) # Then add other parsers with the parent parser default_command_parser(lowercase__ , parents=[parent_parser] ) update_command_parser(lowercase__ , parents=[parent_parser] ) return config_parser def a__ ( ): '''simple docstring''' UpperCAmelCase_ =get_config_parser() UpperCAmelCase_ =config_parser.parse_args() if not hasattr(lowercase__ , "func" ): config_parser.print_help() exit(1 ) # Run args.func(lowercase__ ) if __name__ == "__main__": main()
54
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetaImageProcessor class A_ ( unittest.TestCase ): def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : str=7 ,SCREAMING_SNAKE_CASE__ : Any=3 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=3_0 ,SCREAMING_SNAKE_CASE__ : int=4_0_0 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=True ,SCREAMING_SNAKE_CASE__ : Optional[int]=None ,SCREAMING_SNAKE_CASE__ : Any=True ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=[0.5, 0.5, 0.5] ,SCREAMING_SNAKE_CASE__ : Dict=[0.5, 0.5, 0.5] ,SCREAMING_SNAKE_CASE__ : List[str]=True ,SCREAMING_SNAKE_CASE__ : List[str]=1 / 2_5_5 ,SCREAMING_SNAKE_CASE__ : Tuple=True ,): # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p __lowerCamelCase : List[Any] = size if size is not None else {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3} __lowerCamelCase : str = parent __lowerCamelCase : Union[str, Any] = batch_size __lowerCamelCase : int = num_channels __lowerCamelCase : Dict = min_resolution __lowerCamelCase : Tuple = max_resolution __lowerCamelCase : Dict = do_resize __lowerCamelCase : List[Any] = size __lowerCamelCase : Tuple = do_normalize __lowerCamelCase : Any = image_mean __lowerCamelCase : List[str] = image_std __lowerCamelCase : List[Any] = do_rescale __lowerCamelCase : str = rescale_factor __lowerCamelCase : Tuple = do_pad def lowerCAmelCase ( self : Dict): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : List[str]=False): if not batched: __lowerCamelCase : Optional[Any] = image_inputs[0] if isinstance(SCREAMING_SNAKE_CASE__ ,Image.Image): __lowerCamelCase , __lowerCamelCase : Any = image.size else: __lowerCamelCase , __lowerCamelCase : Any = image.shape[1], image.shape[2] if w < h: __lowerCamelCase : Optional[int] = int(self.size['shortest_edge'] * h / w) __lowerCamelCase : Tuple = self.size['shortest_edge'] elif w > h: __lowerCamelCase : Union[str, Any] = self.size['shortest_edge'] __lowerCamelCase : Union[str, Any] = int(self.size['shortest_edge'] * w / h) else: __lowerCamelCase : List[Any] = self.size['shortest_edge'] __lowerCamelCase : Optional[int] = self.size['shortest_edge'] else: __lowerCamelCase : List[str] = [] for image in image_inputs: __lowerCamelCase , __lowerCamelCase : List[Any] = self.get_expected_values([image]) expected_values.append((expected_height, expected_width)) __lowerCamelCase : Tuple = max(SCREAMING_SNAKE_CASE__ ,key=lambda SCREAMING_SNAKE_CASE__: item[0])[0] __lowerCamelCase : Dict = max(SCREAMING_SNAKE_CASE__ ,key=lambda SCREAMING_SNAKE_CASE__: item[1])[1] return expected_height, expected_width @require_torch @require_vision class A_ ( SCREAMING_SNAKE_CASE , unittest.TestCase ): _UpperCAmelCase : Optional[int] = DetaImageProcessor if is_vision_available() else None def lowerCAmelCase ( self : Optional[Any]): __lowerCamelCase : List[str] = DetaImageProcessingTester(self) @property def lowerCAmelCase ( self : Any): return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase ( self : Dict): __lowerCamelCase : Any = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'image_mean')) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'image_std')) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'do_normalize')) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'do_resize')) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'do_rescale')) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'do_pad')) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'size')) def lowerCAmelCase ( self : str): __lowerCamelCase : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size ,{'shortest_edge': 1_8, 'longest_edge': 1_3_3_3}) self.assertEqual(image_processor.do_pad ,SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Any): pass def lowerCAmelCase ( self : List[str]): # Initialize image_processing __lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict) # create random PIL images __lowerCamelCase : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE__) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,Image.Image) # Test not batched input __lowerCamelCase : List[Any] = image_processing(image_inputs[0] ,return_tensors='pt').pixel_values __lowerCamelCase , __lowerCamelCase : Tuple = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__) self.assertEqual( encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,) # Test batched __lowerCamelCase , __lowerCamelCase : Optional[Any] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ ,batched=SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[int] = image_processing(SCREAMING_SNAKE_CASE__ ,return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) ,) def lowerCAmelCase ( self : str): # Initialize image_processing __lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors __lowerCamelCase : str = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE__ ,numpify=SCREAMING_SNAKE_CASE__) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,np.ndarray) # Test not batched input __lowerCamelCase : Tuple = image_processing(image_inputs[0] ,return_tensors='pt').pixel_values __lowerCamelCase , __lowerCamelCase : int = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__) self.assertEqual( encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,) # Test batched __lowerCamelCase : str = image_processing(SCREAMING_SNAKE_CASE__ ,return_tensors='pt').pixel_values __lowerCamelCase , __lowerCamelCase : Optional[int] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ ,batched=SCREAMING_SNAKE_CASE__) self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) ,) def lowerCAmelCase ( self : int): # Initialize image_processing __lowerCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors __lowerCamelCase : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE__ ,torchify=SCREAMING_SNAKE_CASE__) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,torch.Tensor) # Test not batched input __lowerCamelCase : Union[str, Any] = image_processing(image_inputs[0] ,return_tensors='pt').pixel_values __lowerCamelCase , __lowerCamelCase : int = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__) self.assertEqual( encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,) # Test batched __lowerCamelCase : List[Any] = image_processing(SCREAMING_SNAKE_CASE__ ,return_tensors='pt').pixel_values __lowerCamelCase , __lowerCamelCase : Optional[int] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ ,batched=SCREAMING_SNAKE_CASE__) self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) ,) @slow def lowerCAmelCase ( self : Optional[Any]): # prepare image and target __lowerCamelCase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png') with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' ,'r') as f: __lowerCamelCase : List[str] = json.loads(f.read()) __lowerCamelCase : Union[str, Any] = {'image_id': 3_9_7_6_9, 'annotations': target} # encode them __lowerCamelCase : Optional[int] = DetaImageProcessor() __lowerCamelCase : int = image_processing(images=SCREAMING_SNAKE_CASE__ ,annotations=SCREAMING_SNAKE_CASE__ ,return_tensors='pt') # verify pixel values __lowerCamelCase : List[str] = torch.Size([1, 3, 8_0_0, 1_0_6_6]) self.assertEqual(encoding['pixel_values'].shape ,SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[Any] = torch.tensor([0.2796, 0.3138, 0.3481]) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4)) # verify area __lowerCamelCase : Dict = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438]) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,SCREAMING_SNAKE_CASE__)) # verify boxes __lowerCamelCase : int = torch.Size([6, 4]) self.assertEqual(encoding['labels'][0]['boxes'].shape ,SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[int] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215]) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,SCREAMING_SNAKE_CASE__ ,atol=1E-3)) # verify image_id __lowerCamelCase : Tuple = torch.tensor([3_9_7_6_9]) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,SCREAMING_SNAKE_CASE__)) # verify is_crowd __lowerCamelCase : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,SCREAMING_SNAKE_CASE__)) # verify class_labels __lowerCamelCase : List[Any] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7]) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,SCREAMING_SNAKE_CASE__)) # verify orig_size __lowerCamelCase : str = torch.tensor([4_8_0, 6_4_0]) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,SCREAMING_SNAKE_CASE__)) # verify size __lowerCamelCase : int = torch.tensor([8_0_0, 1_0_6_6]) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,SCREAMING_SNAKE_CASE__)) @slow def lowerCAmelCase ( self : str): # prepare image, target and masks_path __lowerCamelCase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png') with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' ,'r') as f: __lowerCamelCase : Tuple = json.loads(f.read()) __lowerCamelCase : List[Any] = {'file_name': '000000039769.png', 'image_id': 3_9_7_6_9, 'segments_info': target} __lowerCamelCase : Optional[int] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic') # encode them __lowerCamelCase : List[str] = DetaImageProcessor(format='coco_panoptic') __lowerCamelCase : Optional[Any] = image_processing(images=SCREAMING_SNAKE_CASE__ ,annotations=SCREAMING_SNAKE_CASE__ ,masks_path=SCREAMING_SNAKE_CASE__ ,return_tensors='pt') # verify pixel values __lowerCamelCase : List[str] = torch.Size([1, 3, 8_0_0, 1_0_6_6]) self.assertEqual(encoding['pixel_values'].shape ,SCREAMING_SNAKE_CASE__) __lowerCamelCase : Any = torch.tensor([0.2796, 0.3138, 0.3481]) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4)) # verify area __lowerCamelCase : Optional[Any] = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147]) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,SCREAMING_SNAKE_CASE__)) # verify boxes __lowerCamelCase : Tuple = torch.Size([6, 4]) self.assertEqual(encoding['labels'][0]['boxes'].shape ,SCREAMING_SNAKE_CASE__) __lowerCamelCase : Any = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625]) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,SCREAMING_SNAKE_CASE__ ,atol=1E-3)) # verify image_id __lowerCamelCase : int = torch.tensor([3_9_7_6_9]) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,SCREAMING_SNAKE_CASE__)) # verify is_crowd __lowerCamelCase : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,SCREAMING_SNAKE_CASE__)) # verify class_labels __lowerCamelCase : int = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3]) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,SCREAMING_SNAKE_CASE__)) # verify masks __lowerCamelCase : Optional[Any] = 8_2_2_8_7_3 self.assertEqual(encoding['labels'][0]['masks'].sum().item() ,SCREAMING_SNAKE_CASE__) # verify orig_size __lowerCamelCase : Any = torch.tensor([4_8_0, 6_4_0]) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,SCREAMING_SNAKE_CASE__)) # verify size __lowerCamelCase : Any = torch.tensor([8_0_0, 1_0_6_6]) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,SCREAMING_SNAKE_CASE__))
652
0
import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.esm.modeling_esmfold import EsmForProteinFolding class UpperCAmelCase : '''simple docstring''' def __init__( self : int ,A : Dict ,A : int=13 ,A : Dict=7 ,A : Dict=False ,A : List[str]=True ,A : Optional[int]=False ,A : Dict=False ,A : Union[str, Any]=19 ,A : str=32 ,A : int=5 ,A : Any=4 ,A : List[str]=37 ,A : Union[str, Any]="gelu" ,A : int=0.1 ,A : int=0.1 ,A : Tuple=5_12 ,A : List[str]=16 ,A : Optional[int]=2 ,A : Optional[int]=0.02 ,A : str=3 ,A : Optional[int]=4 ,A : Any=None ,): __A = parent __A = batch_size __A = seq_length __A = is_training __A = use_input_mask __A = use_token_type_ids __A = use_labels __A = vocab_size __A = hidden_size __A = num_hidden_layers __A = num_attention_heads __A = intermediate_size __A = hidden_act __A = hidden_dropout_prob __A = attention_probs_dropout_prob __A = max_position_embeddings __A = type_vocab_size __A = type_sequence_label_size __A = initializer_range __A = num_labels __A = num_choices __A = scope def UpperCamelCase_ ( self : int ): __A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) __A = None if self.use_input_mask: __A = random_attention_mask([self.batch_size, self.seq_length] ) __A = None __A = None __A = None if self.use_labels: __A = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) __A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) __A = ids_tensor([self.batch_size] ,self.num_choices ) __A = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase_ ( self : List[Any] ): __A = EsmConfig( vocab_size=33 ,hidden_size=self.hidden_size ,pad_token_id=1 ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,is_folding_model=A ,esmfold_config={"trunk": {"num_blocks": 2}, "fp16_esm": False} ,) return config def UpperCamelCase_ ( self : Optional[Any] ,A : List[Any] ,A : List[Any] ,A : Optional[int] ,A : Tuple ,A : List[Any] ,A : List[Any] ): __A = EsmForProteinFolding(config=A ).float() model.to(A ) model.eval() __A = model(A ,attention_mask=A ) __A = model(A ) __A = model(A ) self.parent.assertEqual(result.positions.shape ,(8, self.batch_size, self.seq_length, 14, 3) ) self.parent.assertEqual(result.angles.shape ,(8, self.batch_size, self.seq_length, 7, 2) ) def UpperCamelCase_ ( self : Optional[int] ): __A = self.prepare_config_and_inputs() ( ( __A ) , ( __A ) , ( __A ) , ( __A ) , ( __A ) , ( __A ) , ) = config_and_inputs __A = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' snake_case_ = False snake_case_ = (EsmForProteinFolding,) if is_torch_available() else () snake_case_ = () snake_case_ = {} if is_torch_available() else {} snake_case_ = False def UpperCamelCase_ ( self : Any ): __A = EsmFoldModelTester(self ) __A = ConfigTester(self ,config_class=A ,hidden_size=37 ) def UpperCamelCase_ ( self : str ): self.config_tester.run_common_tests() def UpperCamelCase_ ( self : Optional[int] ): __A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A ) @unittest.skip("Does not support attention outputs" ) def UpperCamelCase_ ( self : Dict ): pass @unittest.skip def UpperCamelCase_ ( self : Any ): pass @unittest.skip("Esm does not support embedding resizing" ) def UpperCamelCase_ ( self : Any ): pass @unittest.skip("Esm does not support embedding resizing" ) def UpperCamelCase_ ( self : Optional[int] ): pass @unittest.skip("ESMFold does not support passing input embeds!" ) def UpperCamelCase_ ( self : Optional[int] ): pass @unittest.skip("ESMFold does not support head pruning." ) def UpperCamelCase_ ( self : List[str] ): pass @unittest.skip("ESMFold does not support head pruning." ) def UpperCamelCase_ ( self : List[Any] ): pass @unittest.skip("ESMFold does not support head pruning." ) def UpperCamelCase_ ( self : int ): pass @unittest.skip("ESMFold does not support head pruning." ) def UpperCamelCase_ ( self : List[Any] ): pass @unittest.skip("ESMFold does not support head pruning." ) def UpperCamelCase_ ( self : List[Any] ): pass @unittest.skip("ESMFold does not output hidden states in the normal way." ) def UpperCamelCase_ ( self : Optional[Any] ): pass @unittest.skip("ESMfold does not output hidden states in the normal way." ) def UpperCamelCase_ ( self : Dict ): pass @unittest.skip("ESMFold only has one output format." ) def UpperCamelCase_ ( self : int ): pass @unittest.skip("This test doesn't work for ESMFold and doesn't test core functionality" ) def UpperCamelCase_ ( self : List[str] ): pass @unittest.skip("ESMFold does not support input chunking." ) def UpperCamelCase_ ( self : List[Any] ): pass @unittest.skip("ESMFold doesn't respect you and it certainly doesn't respect your initialization arguments." ) def UpperCamelCase_ ( self : List[str] ): pass @unittest.skip("ESMFold doesn't support torchscript compilation." ) def UpperCamelCase_ ( self : int ): pass @unittest.skip("ESMFold doesn't support torchscript compilation." ) def UpperCamelCase_ ( self : int ): pass @unittest.skip("ESMFold doesn't support torchscript compilation." ) def UpperCamelCase_ ( self : int ): pass @unittest.skip("ESMFold doesn't support data parallel." ) def UpperCamelCase_ ( self : Optional[int] ): pass @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def UpperCamelCase_ ( self : Tuple ): pass @require_torch class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' @slow def UpperCamelCase_ ( self : str ): __A = EsmForProteinFolding.from_pretrained("facebook/esmfold_v1" ).float() model.eval() __A = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) __A = model(A )["positions"] __A = torch.tensor([2.58_28, 0.79_93, -10.93_34] ,dtype=torch.floataa ) self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] ,A ,atol=1E-4 ) )
55
import gc import random import unittest import numpy as np import torch from diffusers import ( DDIMScheduler, KandinskyVaaControlnetPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class A_ ( SCREAMING_SNAKE_CASE , unittest.TestCase ): _UpperCAmelCase : Optional[Any] = KandinskyVaaControlnetPipeline _UpperCAmelCase : Optional[Any] = ['''image_embeds''', '''negative_image_embeds''', '''hint'''] _UpperCAmelCase : int = ['''image_embeds''', '''negative_image_embeds''', '''hint'''] _UpperCAmelCase : List[Any] = [ '''generator''', '''height''', '''width''', '''latents''', '''guidance_scale''', '''num_inference_steps''', '''return_dict''', '''guidance_scale''', '''num_images_per_prompt''', '''output_type''', '''return_dict''', ] _UpperCAmelCase : Tuple = False @property def lowerCAmelCase ( self : Tuple): return 3_2 @property def lowerCAmelCase ( self : List[Any]): return 3_2 @property def lowerCAmelCase ( self : str): return self.time_input_dim @property def lowerCAmelCase ( self : List[str]): return self.time_input_dim * 4 @property def lowerCAmelCase ( self : List[str]): return 1_0_0 @property def lowerCAmelCase ( self : Dict): torch.manual_seed(0) __lowerCamelCase : Optional[Any] = { 'in_channels': 8, # Out channels is double in channels because predicts mean and variance 'out_channels': 8, 'addition_embed_type': 'image_hint', 'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'), 'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'), 'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn', 'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2), 'layers_per_block': 1, 'encoder_hid_dim': self.text_embedder_hidden_size, 'encoder_hid_dim_type': 'image_proj', 'cross_attention_dim': self.cross_attention_dim, 'attention_head_dim': 4, 'resnet_time_scale_shift': 'scale_shift', 'class_embed_type': None, } __lowerCamelCase : Union[str, Any] = UNetaDConditionModel(**SCREAMING_SNAKE_CASE__) return model @property def lowerCAmelCase ( self : Union[str, Any]): return { "block_out_channels": [3_2, 3_2, 6_4, 6_4], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 1_2, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def lowerCAmelCase ( self : Optional[Any]): torch.manual_seed(0) __lowerCamelCase : int = VQModel(**self.dummy_movq_kwargs) return model def lowerCAmelCase ( self : Optional[Any]): __lowerCamelCase : Tuple = self.dummy_unet __lowerCamelCase : List[Any] = self.dummy_movq __lowerCamelCase : str = DDIMScheduler( num_train_timesteps=1_0_0_0 ,beta_schedule='linear' ,beta_start=0.00085 ,beta_end=0.012 ,clip_sample=SCREAMING_SNAKE_CASE__ ,set_alpha_to_one=SCREAMING_SNAKE_CASE__ ,steps_offset=1 ,prediction_type='epsilon' ,thresholding=SCREAMING_SNAKE_CASE__ ,) __lowerCamelCase : Dict = { 'unet': unet, 'scheduler': scheduler, 'movq': movq, } return components def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Optional[int]=0): __lowerCamelCase : str = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(SCREAMING_SNAKE_CASE__)).to(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1)).to( SCREAMING_SNAKE_CASE__) # create hint __lowerCamelCase : Optional[int] = floats_tensor((1, 3, 6_4, 6_4) ,rng=random.Random(SCREAMING_SNAKE_CASE__)).to(SCREAMING_SNAKE_CASE__) if str(SCREAMING_SNAKE_CASE__).startswith('mps'): __lowerCamelCase : int = torch.manual_seed(SCREAMING_SNAKE_CASE__) else: __lowerCamelCase : int = torch.Generator(device=SCREAMING_SNAKE_CASE__).manual_seed(SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[str] = { 'image_embeds': image_embeds, 'negative_image_embeds': negative_image_embeds, 'hint': hint, 'generator': generator, 'height': 6_4, 'width': 6_4, 'guidance_scale': 4.0, 'num_inference_steps': 2, 'output_type': 'np', } return inputs def lowerCAmelCase ( self : Optional[Any]): __lowerCamelCase : Dict = 'cpu' __lowerCamelCase : Tuple = self.get_dummy_components() __lowerCamelCase : Any = self.pipeline_class(**SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[str] = pipe.to(SCREAMING_SNAKE_CASE__) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[Any] = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__)) __lowerCamelCase : int = output.images __lowerCamelCase : Tuple = pipe( **self.get_dummy_inputs(SCREAMING_SNAKE_CASE__) ,return_dict=SCREAMING_SNAKE_CASE__ ,)[0] __lowerCamelCase : Dict = image[0, -3:, -3:, -1] __lowerCamelCase : int = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) __lowerCamelCase : List[str] = np.array( [0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1E-2 ), F" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2 ), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @slow @require_torch_gpu class A_ ( unittest.TestCase ): def lowerCAmelCase ( self : int): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase ( self : int): __lowerCamelCase : List[Any] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy') __lowerCamelCase : Union[str, Any] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinskyv22/hint_image_cat.png') __lowerCamelCase : Tuple = torch.from_numpy(np.array(SCREAMING_SNAKE_CASE__)).float() / 255.0 __lowerCamelCase : str = hint.permute(2 ,0 ,1).unsqueeze(0) __lowerCamelCase : Tuple = KandinskyVaaPriorPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-prior' ,torch_dtype=torch.floataa) pipe_prior.to(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Any = KandinskyVaaControlnetPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-controlnet-depth' ,torch_dtype=torch.floataa) __lowerCamelCase : int = pipeline.to(SCREAMING_SNAKE_CASE__) pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[Any] = 'A robot, 4k photo' __lowerCamelCase : List[str] = torch.Generator(device='cuda').manual_seed(0) __lowerCamelCase , __lowerCamelCase : Optional[Any] = pipe_prior( SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,num_inference_steps=5 ,negative_prompt='' ,).to_tuple() __lowerCamelCase : Optional[Any] = torch.Generator(device='cuda').manual_seed(0) __lowerCamelCase : Any = pipeline( image_embeds=SCREAMING_SNAKE_CASE__ ,negative_image_embeds=SCREAMING_SNAKE_CASE__ ,hint=SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,num_inference_steps=1_0_0 ,output_type='np' ,) __lowerCamelCase : List[Any] = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
652
0
'''simple docstring''' from __future__ import annotations from functools import lru_cache from math import ceil _a : Optional[Any] = 100 _a : Dict = set(range(3, NUM_PRIMES, 2)) primes.add(2) _a : int for prime in range(3, ceil(NUM_PRIMES**0.5), 2): if prime not in primes: continue primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime))) @lru_cache(maxsize=1_0_0 ) def _a (lowercase__ : int ) -> set[int]: """simple docstring""" if number_to_partition < 0: return set() elif number_to_partition == 0: return {1} __snake_case = set() __snake_case = 42 __snake_case = 42 for prime in primes: if prime > number_to_partition: continue for sub in partition(number_to_partition - prime ): ret.add(sub * prime ) return ret def _a (lowercase__ : int = 5_0_0_0 ) -> int | None: """simple docstring""" for number_to_partition in range(1 , lowercase__ ): if len(partition(lowercase__ ) ) > number_unique_partitions: return number_to_partition return None if __name__ == "__main__": print(f'''{solution() = }''')
56
from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class A_ : _UpperCAmelCase : int = XGLMConfig _UpperCAmelCase : List[Any] = {} _UpperCAmelCase : Tuple = '''gelu''' def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int]=1_4 ,SCREAMING_SNAKE_CASE__ : Tuple=7 ,SCREAMING_SNAKE_CASE__ : List[Any]=True ,SCREAMING_SNAKE_CASE__ : Any=True ,SCREAMING_SNAKE_CASE__ : Optional[Any]=True ,SCREAMING_SNAKE_CASE__ : Optional[int]=9_9 ,SCREAMING_SNAKE_CASE__ : str=3_2 ,SCREAMING_SNAKE_CASE__ : Tuple=2 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=4 ,SCREAMING_SNAKE_CASE__ : Tuple=3_7 ,SCREAMING_SNAKE_CASE__ : Tuple="gelu" ,SCREAMING_SNAKE_CASE__ : Any=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 ,SCREAMING_SNAKE_CASE__ : List[Any]=5_1_2 ,SCREAMING_SNAKE_CASE__ : str=0.02 ,): __lowerCamelCase : List[str] = parent __lowerCamelCase : List[str] = batch_size __lowerCamelCase : str = seq_length __lowerCamelCase : Optional[Any] = is_training __lowerCamelCase : Any = use_input_mask __lowerCamelCase : str = use_labels __lowerCamelCase : Any = vocab_size __lowerCamelCase : Dict = d_model __lowerCamelCase : int = num_hidden_layers __lowerCamelCase : List[Any] = num_attention_heads __lowerCamelCase : List[str] = ffn_dim __lowerCamelCase : Optional[Any] = activation_function __lowerCamelCase : Tuple = activation_dropout __lowerCamelCase : Union[str, Any] = attention_dropout __lowerCamelCase : List[str] = max_position_embeddings __lowerCamelCase : List[Any] = initializer_range __lowerCamelCase : Any = None __lowerCamelCase : List[str] = 0 __lowerCamelCase : List[str] = 2 __lowerCamelCase : Dict = 1 def lowerCAmelCase ( self : Any): return XGLMConfig.from_pretrained('facebook/xglm-564M') def lowerCAmelCase ( self : str): __lowerCamelCase : Any = tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size) ,clip_value_min=0 ,clip_value_max=3) __lowerCamelCase : Dict = None if self.use_input_mask: __lowerCamelCase : int = random_attention_mask([self.batch_size, self.seq_length]) __lowerCamelCase : int = self.get_config() __lowerCamelCase : Union[str, Any] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] ,2) return ( config, input_ids, input_mask, head_mask, ) def lowerCAmelCase ( self : List[Any]): return XGLMConfig( vocab_size=self.vocab_size ,d_model=self.hidden_size ,num_layers=self.num_hidden_layers ,attention_heads=self.num_attention_heads ,ffn_dim=self.ffn_dim ,activation_function=self.activation_function ,activation_dropout=self.activation_dropout ,attention_dropout=self.attention_dropout ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,use_cache=SCREAMING_SNAKE_CASE__ ,bos_token_id=self.bos_token_id ,eos_token_id=self.eos_token_id ,pad_token_id=self.pad_token_id ,return_dict=SCREAMING_SNAKE_CASE__ ,) def lowerCAmelCase ( self : int): __lowerCamelCase : int = self.prepare_config_and_inputs() ( ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ) : Any = config_and_inputs __lowerCamelCase : str = { 'input_ids': input_ids, 'head_mask': head_mask, } return config, inputs_dict @require_tf class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ): _UpperCAmelCase : str = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () _UpperCAmelCase : List[Any] = (TFXGLMForCausalLM,) if is_tf_available() else () _UpperCAmelCase : str = ( {'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {} ) _UpperCAmelCase : Tuple = False _UpperCAmelCase : Optional[int] = False _UpperCAmelCase : Union[str, Any] = False def lowerCAmelCase ( self : Tuple): __lowerCamelCase : Tuple = TFXGLMModelTester(self) __lowerCamelCase : int = ConfigTester(self ,config_class=SCREAMING_SNAKE_CASE__ ,n_embd=3_7) def lowerCAmelCase ( self : List[Any]): self.config_tester.run_common_tests() @slow def lowerCAmelCase ( self : str): for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase : Union[str, Any] = TFXGLMModel.from_pretrained(SCREAMING_SNAKE_CASE__) self.assertIsNotNone(SCREAMING_SNAKE_CASE__) @unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.') def lowerCAmelCase ( self : Union[str, Any]): super().test_resize_token_embeddings() @require_tf class A_ ( unittest.TestCase ): @slow def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=True): __lowerCamelCase : Any = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M') __lowerCamelCase : int = tf.convert_to_tensor([[2, 2_6_8, 9_8_6_5]] ,dtype=tf.intaa) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off __lowerCamelCase : Optional[Any] = [2, 2_6_8, 9_8_6_5, 6_7, 1_1, 1_9_8_8, 5_7_2_5_2, 9_8_6_5, 5, 9_8_4, 6_7, 1_9_8_8, 2_1_3_8_3_8, 1_6_5_8, 5_3, 7_0_4_4_6, 3_3, 6_6_5_7, 2_7_8, 1_5_8_1] # fmt: on __lowerCamelCase : int = model.generate(SCREAMING_SNAKE_CASE__ ,do_sample=SCREAMING_SNAKE_CASE__ ,num_beams=1) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist() ,SCREAMING_SNAKE_CASE__) @slow def lowerCAmelCase ( self : List[str]): __lowerCamelCase : Tuple = XGLMTokenizer.from_pretrained('facebook/xglm-564M') __lowerCamelCase : int = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M') tf.random.set_seed(0) __lowerCamelCase : Optional[Any] = tokenizer('Today is a nice day and' ,return_tensors='tf') __lowerCamelCase : List[Any] = tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(':/CPU:0'): __lowerCamelCase : Union[str, Any] = model.generate(SCREAMING_SNAKE_CASE__ ,do_sample=SCREAMING_SNAKE_CASE__ ,seed=[7, 0]) __lowerCamelCase : List[str] = tokenizer.decode(output_ids[0] ,skip_special_tokens=SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[int] = ( 'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due' ) self.assertEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) @slow def lowerCAmelCase ( self : Dict): __lowerCamelCase : Union[str, Any] = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M') __lowerCamelCase : Union[str, Any] = XGLMTokenizer.from_pretrained('facebook/xglm-564M') __lowerCamelCase : Union[str, Any] = 'left' # use different length sentences to test batching __lowerCamelCase : List[str] = [ 'This is an extremelly long sentence that only exists to test the ability of the model to cope with ' 'left-padding, such as in batched generation. The output for the sequence below should be the same ' 'regardless of whether left padding is applied or not. When', 'Hello, my dog is a little', ] __lowerCamelCase : List[Any] = tokenizer(SCREAMING_SNAKE_CASE__ ,return_tensors='tf' ,padding=SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[int] = inputs['input_ids'] __lowerCamelCase : Dict = model.generate(input_ids=SCREAMING_SNAKE_CASE__ ,attention_mask=inputs['attention_mask'] ,max_new_tokens=1_2) __lowerCamelCase : Tuple = tokenizer(sentences[0] ,return_tensors='tf').input_ids __lowerCamelCase : List[str] = model.generate(input_ids=SCREAMING_SNAKE_CASE__ ,max_new_tokens=1_2) __lowerCamelCase : Any = tokenizer(sentences[1] ,return_tensors='tf').input_ids __lowerCamelCase : List[Any] = model.generate(input_ids=SCREAMING_SNAKE_CASE__ ,max_new_tokens=1_2) __lowerCamelCase : int = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ ,skip_special_tokens=SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[Any] = tokenizer.decode(output_non_padded[0] ,skip_special_tokens=SCREAMING_SNAKE_CASE__) __lowerCamelCase : Any = tokenizer.decode(output_padded[0] ,skip_special_tokens=SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[int] = [ 'This is an extremelly long sentence that only exists to test the ability of the model to cope with ' 'left-padding, such as in batched generation. The output for the sequence below should be the same ' 'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be ' 'a single', 'Hello, my dog is a little bit of a shy one, but he is very friendly', ] self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) self.assertListEqual(SCREAMING_SNAKE_CASE__ ,[non_padded_sentence, padded_sentence])
652
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A_ : int = { 'configuration_lilt': ['LILT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LiltConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : Any = [ 'LILT_PRETRAINED_MODEL_ARCHIVE_LIST', 'LiltForQuestionAnswering', 'LiltForSequenceClassification', 'LiltForTokenClassification', 'LiltModel', 'LiltPreTrainedModel', ] if TYPE_CHECKING: from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_lilt import ( LILT_PRETRAINED_MODEL_ARCHIVE_LIST, LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, LiltPreTrainedModel, ) else: import sys A_ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
57
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline else: from .pipeline_kandinsky import KandinskyPipeline from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput from .text_encoder import MultilingualCLIP
652
0
"""simple docstring""" from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = ['''image_processor''', '''tokenizer'''] _lowerCamelCase = '''AutoImageProcessor''' _lowerCamelCase = '''AutoTokenizer''' def __init__( self , _lowercase , _lowercase ) -> Optional[Any]: '''simple docstring''' super().__init__(_lowercase , _lowercase ) snake_case_ : List[Any] = self.image_processor def __call__( self , _lowercase=None , _lowercase=None , _lowercase=None , **_lowercase ) -> Tuple: '''simple docstring''' if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: snake_case_ : List[str] = self.tokenizer(_lowercase , return_tensors=_lowercase , **_lowercase ) if images is not None: snake_case_ : Optional[int] = self.image_processor(_lowercase , return_tensors=_lowercase , **_lowercase ) if text is not None and images is not None: snake_case_ : List[Any] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_lowercase ) , tensor_type=_lowercase ) def UpperCAmelCase__ ( self , *_lowercase , **_lowercase ) -> Tuple: '''simple docstring''' return self.tokenizer.batch_decode(*_lowercase , **_lowercase ) def UpperCAmelCase__ ( self , *_lowercase , **_lowercase ) -> int: '''simple docstring''' return self.tokenizer.decode(*_lowercase , **_lowercase ) @property def UpperCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' return ["input_ids", "attention_mask", "pixel_values"]
58
from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig a =logging.get_logger(__name__) # General docstring a ="""RegNetConfig""" # Base docstring a ="""facebook/regnet-y-040""" a =[1, 1088, 7, 7] # Image classification docstring a ="""facebook/regnet-y-040""" a ="""tabby, tabby cat""" a =[ """facebook/regnet-y-040""", # See all regnet models at https://huggingface.co/models?filter=regnet ] class A_ ( tf.keras.layers.Layer ): def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int = 3 ,SCREAMING_SNAKE_CASE__ : int = 1 ,SCREAMING_SNAKE_CASE__ : int = 1 ,SCREAMING_SNAKE_CASE__ : Optional[str] = "relu" ,**SCREAMING_SNAKE_CASE__ : Optional[int] ,): super().__init__(**SCREAMING_SNAKE_CASE__) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb __lowerCamelCase : Optional[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2) __lowerCamelCase : Union[str, Any] = tf.keras.layers.ConvaD( filters=SCREAMING_SNAKE_CASE__ ,kernel_size=SCREAMING_SNAKE_CASE__ ,strides=SCREAMING_SNAKE_CASE__ ,padding='VALID' ,groups=SCREAMING_SNAKE_CASE__ ,use_bias=SCREAMING_SNAKE_CASE__ ,name='convolution' ,) __lowerCamelCase : int = tf.keras.layers.BatchNormalization(epsilon=1E-5 ,momentum=0.9 ,name='normalization') __lowerCamelCase : Union[str, Any] = ACTaFN[activation] if activation is not None else tf.identity def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : List[str]): __lowerCamelCase : List[Any] = self.convolution(self.padding(SCREAMING_SNAKE_CASE__)) __lowerCamelCase : Union[str, Any] = self.normalization(SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[Any] = self.activation(SCREAMING_SNAKE_CASE__) return hidden_state class A_ ( tf.keras.layers.Layer ): def __init__( self : str ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,**SCREAMING_SNAKE_CASE__ : Dict): super().__init__(**SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[int] = config.num_channels __lowerCamelCase : Dict = TFRegNetConvLayer( out_channels=config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act ,name='embedder' ,) def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[Any]): __lowerCamelCase : Optional[int] = shape_list(SCREAMING_SNAKE_CASE__)[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( 'Make sure that the channel dimension of the pixel values match with the one set in the configuration.') # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) __lowerCamelCase : Optional[int] = tf.transpose(SCREAMING_SNAKE_CASE__ ,perm=(0, 2, 3, 1)) __lowerCamelCase : List[Any] = self.embedder(SCREAMING_SNAKE_CASE__) return hidden_state class A_ ( tf.keras.layers.Layer ): def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int = 2 ,**SCREAMING_SNAKE_CASE__ : Tuple): super().__init__(**SCREAMING_SNAKE_CASE__) __lowerCamelCase : Any = tf.keras.layers.ConvaD( filters=SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,strides=SCREAMING_SNAKE_CASE__ ,use_bias=SCREAMING_SNAKE_CASE__ ,name='convolution') __lowerCamelCase : Optional[Any] = tf.keras.layers.BatchNormalization(epsilon=1E-5 ,momentum=0.9 ,name='normalization') def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : tf.Tensor ,SCREAMING_SNAKE_CASE__ : bool = False): return self.normalization(self.convolution(SCREAMING_SNAKE_CASE__) ,training=SCREAMING_SNAKE_CASE__) class A_ ( tf.keras.layers.Layer ): def __init__( self : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : Any): super().__init__(**SCREAMING_SNAKE_CASE__) __lowerCamelCase : int = tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE__ ,name='pooler') __lowerCamelCase : Dict = [ tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation='relu' ,name='attention.0'), tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation='sigmoid' ,name='attention.2'), ] def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int): # [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels] __lowerCamelCase : Optional[Any] = self.pooler(SCREAMING_SNAKE_CASE__) for layer_module in self.attention: __lowerCamelCase : Any = layer_module(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[int] = hidden_state * pooled return hidden_state class A_ ( tf.keras.layers.Layer ): def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int = 1 ,**SCREAMING_SNAKE_CASE__ : List[Any]): super().__init__(**SCREAMING_SNAKE_CASE__) __lowerCamelCase : Tuple = in_channels != out_channels or stride != 1 __lowerCamelCase : Union[str, Any] = max(1 ,out_channels // config.groups_width) __lowerCamelCase : Dict = ( TFRegNetShortCut(SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,name='shortcut') if should_apply_shortcut else tf.keras.layers.Activation('linear' ,name='shortcut') ) # `self.layers` instead of `self.layer` because that is a reserved argument. __lowerCamelCase : Optional[int] = [ TFRegNetConvLayer(SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation=config.hidden_act ,name='layer.0'), TFRegNetConvLayer( SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,groups=SCREAMING_SNAKE_CASE__ ,activation=config.hidden_act ,name='layer.1'), TFRegNetConvLayer(SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation=SCREAMING_SNAKE_CASE__ ,name='layer.2'), ] __lowerCamelCase : Dict = ACTaFN[config.hidden_act] def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Optional[int]): __lowerCamelCase : int = hidden_state for layer_module in self.layers: __lowerCamelCase : List[str] = layer_module(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[int] = self.shortcut(SCREAMING_SNAKE_CASE__) hidden_state += residual __lowerCamelCase : int = self.activation(SCREAMING_SNAKE_CASE__) return hidden_state class A_ ( tf.keras.layers.Layer ): def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int = 1 ,**SCREAMING_SNAKE_CASE__ : List[str]): super().__init__(**SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[Any] = in_channels != out_channels or stride != 1 __lowerCamelCase : Tuple = max(1 ,out_channels // config.groups_width) __lowerCamelCase : int = ( TFRegNetShortCut(SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,name='shortcut') if should_apply_shortcut else tf.keras.layers.Activation('linear' ,name='shortcut') ) __lowerCamelCase : Optional[int] = [ TFRegNetConvLayer(SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation=config.hidden_act ,name='layer.0'), TFRegNetConvLayer( SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,groups=SCREAMING_SNAKE_CASE__ ,activation=config.hidden_act ,name='layer.1'), TFRegNetSELayer(SCREAMING_SNAKE_CASE__ ,reduced_channels=int(round(in_channels / 4)) ,name='layer.2'), TFRegNetConvLayer(SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation=SCREAMING_SNAKE_CASE__ ,name='layer.3'), ] __lowerCamelCase : List[Any] = ACTaFN[config.hidden_act] def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : str): __lowerCamelCase : Optional[int] = hidden_state for layer_module in self.layers: __lowerCamelCase : Dict = layer_module(SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[Any] = self.shortcut(SCREAMING_SNAKE_CASE__) hidden_state += residual __lowerCamelCase : Any = self.activation(SCREAMING_SNAKE_CASE__) return hidden_state class A_ ( tf.keras.layers.Layer ): def __init__( self : int ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int = 2 ,SCREAMING_SNAKE_CASE__ : int = 2 ,**SCREAMING_SNAKE_CASE__ : Tuple): super().__init__(**SCREAMING_SNAKE_CASE__) __lowerCamelCase : Any = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer __lowerCamelCase : Tuple = [ # downsampling is done in the first layer with stride of 2 layer(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,name='layers.0'), *[layer(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,name=F"layers.{i+1}") for i in range(depth - 1)], ] def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : List[Any]): for layer_module in self.layers: __lowerCamelCase : Any = layer_module(SCREAMING_SNAKE_CASE__) return hidden_state class A_ ( tf.keras.layers.Layer ): def __init__( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,**SCREAMING_SNAKE_CASE__ : Any): super().__init__(**SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[Any] = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( SCREAMING_SNAKE_CASE__ ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,name='stages.0' ,)) __lowerCamelCase : Optional[int] = zip(config.hidden_sizes ,config.hidden_sizes[1:]) for i, ((in_channels, out_channels), depth) in enumerate(zip(SCREAMING_SNAKE_CASE__ ,config.depths[1:])): self.stages.append(TFRegNetStage(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,depth=SCREAMING_SNAKE_CASE__ ,name=F"stages.{i+1}")) def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : tf.Tensor ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = True): __lowerCamelCase : Optional[Any] = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: __lowerCamelCase : Optional[Any] = hidden_states + (hidden_state,) __lowerCamelCase : str = stage_module(SCREAMING_SNAKE_CASE__) if output_hidden_states: __lowerCamelCase : Tuple = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None) return TFBaseModelOutputWithNoAttention(last_hidden_state=SCREAMING_SNAKE_CASE__ ,hidden_states=SCREAMING_SNAKE_CASE__) @keras_serializable class A_ ( tf.keras.layers.Layer ): _UpperCAmelCase : List[Any] = RegNetConfig def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : Optional[int]): super().__init__(**SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[Any] = config __lowerCamelCase : Optional[int] = TFRegNetEmbeddings(SCREAMING_SNAKE_CASE__ ,name='embedder') __lowerCamelCase : Union[str, Any] = TFRegNetEncoder(SCREAMING_SNAKE_CASE__ ,name='encoder') __lowerCamelCase : int = tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE__ ,name='pooler') @unpack_inputs def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : tf.Tensor ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : bool = False ,): __lowerCamelCase : Dict = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __lowerCamelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict __lowerCamelCase : Union[str, Any] = self.embedder(SCREAMING_SNAKE_CASE__ ,training=SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[Any] = self.encoder( SCREAMING_SNAKE_CASE__ ,output_hidden_states=SCREAMING_SNAKE_CASE__ ,return_dict=SCREAMING_SNAKE_CASE__ ,training=SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[int] = encoder_outputs[0] __lowerCamelCase : int = self.pooler(SCREAMING_SNAKE_CASE__) # Change to NCHW output format have uniformity in the modules __lowerCamelCase : Union[str, Any] = tf.transpose(SCREAMING_SNAKE_CASE__ ,perm=(0, 3, 1, 2)) __lowerCamelCase : str = tf.transpose(SCREAMING_SNAKE_CASE__ ,perm=(0, 3, 1, 2)) # Change the other hidden state outputs to NCHW as well if output_hidden_states: __lowerCamelCase : Union[str, Any] = tuple([tf.transpose(SCREAMING_SNAKE_CASE__ ,perm=(0, 3, 1, 2)) for h in encoder_outputs[1]]) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=SCREAMING_SNAKE_CASE__ ,pooler_output=SCREAMING_SNAKE_CASE__ ,hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states ,) class A_ ( SCREAMING_SNAKE_CASE ): _UpperCAmelCase : Any = RegNetConfig _UpperCAmelCase : Optional[int] = '''regnet''' _UpperCAmelCase : List[Any] = '''pixel_values''' @property def lowerCAmelCase ( self : int): return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) ,dtype=tf.floataa)} a =r""" Parameters: This model is a Tensorflow [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and behavior. config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. """ a =r""" Args: pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConveNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( '''The bare RegNet model outputting raw features without any specific head on top.''' , SCREAMING_SNAKE_CASE , ) class A_ ( SCREAMING_SNAKE_CASE ): def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,*SCREAMING_SNAKE_CASE__ : Tuple ,**SCREAMING_SNAKE_CASE__ : Tuple): super().__init__(SCREAMING_SNAKE_CASE__ ,*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[str] = TFRegNetMainLayer(SCREAMING_SNAKE_CASE__ ,name='regnet') @unpack_inputs @add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC ,output_type=SCREAMING_SNAKE_CASE__ ,config_class=_CONFIG_FOR_DOC ,modality='vision' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,) def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : tf.Tensor ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : int=False ,): __lowerCamelCase : List[Any] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __lowerCamelCase : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict __lowerCamelCase : Tuple = self.regnet( pixel_values=SCREAMING_SNAKE_CASE__ ,output_hidden_states=SCREAMING_SNAKE_CASE__ ,return_dict=SCREAMING_SNAKE_CASE__ ,training=SCREAMING_SNAKE_CASE__ ,) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state ,pooler_output=outputs.pooler_output ,hidden_states=outputs.hidden_states ,) @add_start_docstrings( ''' RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. ''' , SCREAMING_SNAKE_CASE , ) class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,*SCREAMING_SNAKE_CASE__ : List[str] ,**SCREAMING_SNAKE_CASE__ : str): super().__init__(SCREAMING_SNAKE_CASE__ ,*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__) __lowerCamelCase : Union[str, Any] = config.num_labels __lowerCamelCase : Union[str, Any] = TFRegNetMainLayer(SCREAMING_SNAKE_CASE__ ,name='regnet') # classification head __lowerCamelCase : Optional[Any] = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels ,name='classifier.1') if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=SCREAMING_SNAKE_CASE__ ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,) def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : tf.Tensor = None ,SCREAMING_SNAKE_CASE__ : tf.Tensor = None ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : Any=False ,): __lowerCamelCase : Any = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __lowerCamelCase : Tuple = return_dict if return_dict is not None else self.config.use_return_dict __lowerCamelCase : str = self.regnet( SCREAMING_SNAKE_CASE__ ,output_hidden_states=SCREAMING_SNAKE_CASE__ ,return_dict=SCREAMING_SNAKE_CASE__ ,training=SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[Any] = outputs.pooler_output if return_dict else outputs[1] __lowerCamelCase : Optional[Any] = self.classifier[0](SCREAMING_SNAKE_CASE__) __lowerCamelCase : Dict = self.classifier[1](SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[str] = None if labels is None else self.hf_compute_loss(labels=SCREAMING_SNAKE_CASE__ ,logits=SCREAMING_SNAKE_CASE__) if not return_dict: __lowerCamelCase : Union[str, Any] = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=SCREAMING_SNAKE_CASE__ ,logits=SCREAMING_SNAKE_CASE__ ,hidden_states=outputs.hidden_states)
652
0
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { "asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json", # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = "sew-d" def __init__(self : Optional[int] , UpperCAmelCase_ : str=32 , UpperCAmelCase_ : str=768 , UpperCAmelCase_ : Any=12 , UpperCAmelCase_ : Optional[Any]=12 , UpperCAmelCase_ : Optional[Any]=3_072 , UpperCAmelCase_ : Tuple=2 , UpperCAmelCase_ : List[Any]=512 , UpperCAmelCase_ : Union[str, Any]=256 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : int=("p2c", "c2p") , UpperCAmelCase_ : Tuple="layer_norm" , UpperCAmelCase_ : Any="gelu_python" , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : List[Any]=0.0 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : List[str]=1E-7 , UpperCAmelCase_ : List[Any]=1E-5 , UpperCAmelCase_ : Union[str, Any]="group" , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : Any=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , UpperCAmelCase_ : str=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , UpperCAmelCase_ : Optional[Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : Tuple=128 , UpperCAmelCase_ : Optional[int]=16 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Tuple=0.05 , UpperCAmelCase_ : Union[str, Any]=10 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : List[Any]=0.0 , UpperCAmelCase_ : Any=10 , UpperCAmelCase_ : Union[str, Any]=0 , UpperCAmelCase_ : List[str]="mean" , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Dict=256 , UpperCAmelCase_ : Union[str, Any]=0 , UpperCAmelCase_ : Tuple=1 , UpperCAmelCase_ : int=2 , **UpperCAmelCase_ : Tuple , ) ->str: '''simple docstring''' super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_) lowerCamelCase__: str =hidden_size lowerCamelCase__: int =feat_extract_norm lowerCamelCase__: str =feat_extract_activation lowerCamelCase__: List[Any] =list(UpperCAmelCase_) lowerCamelCase__: Dict =list(UpperCAmelCase_) lowerCamelCase__: Tuple =list(UpperCAmelCase_) lowerCamelCase__: str =conv_bias lowerCamelCase__: Any =num_conv_pos_embeddings lowerCamelCase__: List[Any] =num_conv_pos_embedding_groups lowerCamelCase__: Optional[int] =len(self.conv_dim) lowerCamelCase__: Tuple =num_hidden_layers lowerCamelCase__: Tuple =intermediate_size lowerCamelCase__: Optional[Any] =squeeze_factor lowerCamelCase__: int =max_position_embeddings lowerCamelCase__: Dict =position_buckets lowerCamelCase__: List[Any] =share_att_key lowerCamelCase__: Optional[Any] =relative_attention lowerCamelCase__: List[str] =norm_rel_ebd lowerCamelCase__: List[Any] =list(UpperCAmelCase_) lowerCamelCase__: Optional[int] =hidden_act lowerCamelCase__: str =num_attention_heads lowerCamelCase__: str =hidden_dropout lowerCamelCase__: List[str] =attention_dropout lowerCamelCase__: str =activation_dropout lowerCamelCase__: int =feat_proj_dropout lowerCamelCase__: Tuple =final_dropout lowerCamelCase__: Any =layer_norm_eps lowerCamelCase__: Optional[Any] =feature_layer_norm_eps lowerCamelCase__: Dict =initializer_range lowerCamelCase__: int =vocab_size if ( (len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect." "It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`," F"""but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)""" F"""= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.""") # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowerCamelCase__: Any =apply_spec_augment lowerCamelCase__: List[Any] =mask_time_prob lowerCamelCase__: Optional[int] =mask_time_length lowerCamelCase__: Optional[Any] =mask_time_min_masks lowerCamelCase__: Tuple =mask_feature_prob lowerCamelCase__: int =mask_feature_length lowerCamelCase__: Optional[int] =mask_feature_min_masks # ctc loss lowerCamelCase__: Dict =ctc_loss_reduction lowerCamelCase__: Any =ctc_zero_infinity # sequence classification lowerCamelCase__: Any =use_weighted_layer_sum lowerCamelCase__: Optional[Any] =classifier_proj_size @property def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Optional[Any]: '''simple docstring''' return functools.reduce(operator.mul , self.conv_stride , 1)
59
import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # Sentinels a =object() # For specifying empty leaf dict `{}` a =object() def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> int: __lowerCamelCase : Tuple = tuple((re.compile(x + '$' ) for x in qs) ) for i in range(len(lowerCamelCase__ ) - len(lowerCamelCase__ ) + 1 ): __lowerCamelCase : str = [x.match(lowerCamelCase__ ) for x, y in zip(lowerCamelCase__ , ks[i:] )] if matches and all(lowerCamelCase__ ): return True return False def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple: def replace(lowerCamelCase__ , lowerCamelCase__ ): for rule, replacement in rules: if _match(lowerCamelCase__ , lowerCamelCase__ ): return replacement return val return replace def SCREAMING_SNAKE_CASE__ ( ) -> str: return [ # embeddings (("transformer", "wpe", "embedding"), P('mp' , lowerCamelCase__ )), (("transformer", "wte", "embedding"), P('mp' , lowerCamelCase__ )), # atention (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(lowerCamelCase__ , 'mp' )), (("attention", "out_proj", "kernel"), P('mp' , lowerCamelCase__ )), (("attention", "out_proj", "bias"), None), # mlp (("mlp", "c_fc", "kernel"), P(lowerCamelCase__ , 'mp' )), (("mlp", "c_fc", "bias"), P('mp' )), (("mlp", "c_proj", "kernel"), P('mp' , lowerCamelCase__ )), (("mlp", "c_proj", "bias"), None), # layer norms ((r"ln_\d+", "bias"), None), ((r"\d+", r"ln_\d+", "scale"), None), (("ln_f", "bias"), None), (("ln_f", "scale"), None), ] def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str: __lowerCamelCase : List[str] = _get_partition_rules() __lowerCamelCase : Optional[Any] = _replacement_rules(lowerCamelCase__ ) __lowerCamelCase : Tuple = {k: _unmatched for k in flatten_dict(lowerCamelCase__ )} __lowerCamelCase : List[Any] = {k: replace(lowerCamelCase__ , lowerCamelCase__ ) for k, v in initd.items()} assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(lowerCamelCase__ ) )
652
0
import argparse import os import re lowerCAmelCase_ = '''src/transformers/models/auto''' # re pattern that matches mapping introductions: # SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict lowerCAmelCase_ = re.compile(r'''[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict''') # re pattern that matches identifiers in mappings lowerCAmelCase_ = re.compile(r'''\s*\(\s*"(\S[^"]+)"''') def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase = False ) -> Tuple: """simple docstring""" with open(_UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f: snake_case_ : Optional[Any] = f.read() snake_case_ : Optional[Any] = content.split('''\n''' ) snake_case_ : Optional[int] = [] snake_case_ : List[Any] = 0 while line_idx < len(_UpperCamelCase ): if _re_intro_mapping.search(lines[line_idx] ) is not None: snake_case_ : Any = len(re.search(R'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8 # Start of a new mapping! while not lines[line_idx].startswith(''' ''' * indent + '''(''' ): new_lines.append(lines[line_idx] ) line_idx += 1 snake_case_ : List[Any] = [] while lines[line_idx].strip() != "]": # Blocks either fit in one line or not if lines[line_idx].strip() == "(": snake_case_ : int = line_idx while not lines[line_idx].startswith(''' ''' * indent + ''')''' ): line_idx += 1 blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) ) else: blocks.append(lines[line_idx] ) line_idx += 1 # Sort blocks by their identifiers snake_case_ : int = sorted(_UpperCamelCase , key=lambda _UpperCamelCase : _re_identifier.search(_UpperCamelCase ).groups()[0] ) new_lines += blocks else: new_lines.append(lines[line_idx] ) line_idx += 1 if overwrite: with open(_UpperCamelCase , '''w''' , encoding='''utf-8''' ) as f: f.write('''\n'''.join(_UpperCamelCase ) ) elif "\n".join(_UpperCamelCase ) != content: return True def lowerCamelCase_ ( _UpperCamelCase = False ) -> Tuple: """simple docstring""" snake_case_ : Union[str, Any] = [os.path.join(_UpperCamelCase , _UpperCamelCase ) for f in os.listdir(_UpperCamelCase ) if f.endswith('''.py''' )] snake_case_ : Any = [sort_auto_mapping(_UpperCamelCase , overwrite=_UpperCamelCase ) for fname in fnames] if not overwrite and any(_UpperCamelCase ): snake_case_ : List[str] = [f for f, d in zip(_UpperCamelCase , _UpperCamelCase ) if d] raise ValueError( f'''The following files have auto mappings that need sorting: {", ".join(_UpperCamelCase )}. Run `make style` to fix''' ''' this.''' ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''') lowerCAmelCase_ = parser.parse_args() sort_all_auto_mappings(not args.check_only)
60
import math def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> list: __lowerCamelCase : Union[str, Any] = [True] * n __lowerCamelCase : List[Any] = False __lowerCamelCase : int = False __lowerCamelCase : Any = True for i in range(3 , int(n**0.5 + 1 ) , 2 ): __lowerCamelCase : List[str] = i * 2 while index < n: __lowerCamelCase : Optional[int] = False __lowerCamelCase : List[Any] = index + i __lowerCamelCase : Optional[Any] = [2] for i in range(3 , lowerCamelCase__ , 2 ): if is_prime[i]: primes.append(lowerCamelCase__ ) return primes def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = 9_9_9_9_6_6_6_6_3_3_3_3 ) -> int: __lowerCamelCase : Optional[Any] = math.floor(math.sqrt(lowerCamelCase__ ) ) + 1_0_0 __lowerCamelCase : Dict = prime_sieve(lowerCamelCase__ ) __lowerCamelCase : Tuple = 0 __lowerCamelCase : Dict = 0 __lowerCamelCase : Any = primes[prime_index] while (last_prime**2) <= limit: __lowerCamelCase : Any = primes[prime_index + 1] __lowerCamelCase : Optional[Any] = last_prime**2 __lowerCamelCase : Dict = next_prime**2 # Get numbers divisible by lps(current) __lowerCamelCase : Tuple = lower_bound + last_prime while upper_bound > current <= limit: matches_sum += current current += last_prime # Reset the upper_bound while (upper_bound - next_prime) > limit: upper_bound -= next_prime # Add the numbers divisible by ups(current) __lowerCamelCase : Any = upper_bound - next_prime while current > lower_bound: matches_sum += current current -= next_prime # Remove the numbers divisible by both ups and lps __lowerCamelCase : List[Any] = 0 while upper_bound > current <= limit: if current <= lower_bound: # Increment the current number current += last_prime * next_prime continue if current > limit: break # Remove twice since it was added by both ups and lps matches_sum -= current * 2 # Increment the current number current += last_prime * next_prime # Setup for next pair __lowerCamelCase : Dict = next_prime prime_index += 1 return matches_sum if __name__ == "__main__": print(solution())
652
0
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase = logging.get_logger(__name__) def _A ( lowerCAmelCase_ : str ): """simple docstring""" lowerCAmelCase__ = DPTConfig(embedding_type="hybrid" ) if "large" in checkpoint_url: lowerCAmelCase__ = 1024 lowerCAmelCase__ = 4096 lowerCAmelCase__ = 24 lowerCAmelCase__ = 16 lowerCAmelCase__ = [5, 11, 17, 23] lowerCAmelCase__ = [256, 512, 1024, 1024] lowerCAmelCase__ = (1, 384, 384) if "nyu" or "midas" in checkpoint_url: lowerCAmelCase__ = 768 lowerCAmelCase__ = [1, 1, 1, 0.5] lowerCAmelCase__ = [256, 512, 768, 768] lowerCAmelCase__ = 150 lowerCAmelCase__ = 16 lowerCAmelCase__ = (1, 384, 384) lowerCAmelCase__ = False lowerCAmelCase__ = "project" if "ade" in checkpoint_url: lowerCAmelCase__ = True lowerCAmelCase__ = 768 lowerCAmelCase__ = [1, 1, 1, 0.5] lowerCAmelCase__ = 150 lowerCAmelCase__ = 16 lowerCAmelCase__ = "huggingface/label-files" lowerCAmelCase__ = "ade20k-id2label.json" lowerCAmelCase__ = json.load(open(cached_download(hf_hub_url(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="dataset" ) ) , "r" ) ) lowerCAmelCase__ = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()} lowerCAmelCase__ = idalabel lowerCAmelCase__ = {v: k for k, v in idalabel.items()} lowerCAmelCase__ = [1, 150, 480, 480] return config, expected_shape def _A ( lowerCAmelCase_ : List[Any] ): """simple docstring""" lowerCAmelCase__ = ["pretrained.model.head.weight", "pretrained.model.head.bias"] for k in ignore_keys: state_dict.pop(lowerCAmelCase_ , lowerCAmelCase_ ) def _A ( lowerCAmelCase_ : Optional[int] ): """simple docstring""" if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): lowerCAmelCase__ = name.replace("pretrained.model" , "dpt.encoder" ) if "pretrained.model" in name: lowerCAmelCase__ = name.replace("pretrained.model" , "dpt.embeddings" ) if "patch_embed" in name: lowerCAmelCase__ = name.replace("patch_embed" , "" ) if "pos_embed" in name: lowerCAmelCase__ = name.replace("pos_embed" , "position_embeddings" ) if "attn.proj" in name: lowerCAmelCase__ = name.replace("attn.proj" , "attention.output.dense" ) if "proj" in name and "project" not in name: lowerCAmelCase__ = name.replace("proj" , "projection" ) if "blocks" in name: lowerCAmelCase__ = name.replace("blocks" , "layer" ) if "mlp.fc1" in name: lowerCAmelCase__ = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: lowerCAmelCase__ = name.replace("mlp.fc2" , "output.dense" ) if "norm1" in name and "backbone" not in name: lowerCAmelCase__ = name.replace("norm1" , "layernorm_before" ) if "norm2" in name and "backbone" not in name: lowerCAmelCase__ = name.replace("norm2" , "layernorm_after" ) if "scratch.output_conv" in name: lowerCAmelCase__ = name.replace("scratch.output_conv" , "head" ) if "scratch" in name: lowerCAmelCase__ = name.replace("scratch" , "neck" ) if "layer1_rn" in name: lowerCAmelCase__ = name.replace("layer1_rn" , "convs.0" ) if "layer2_rn" in name: lowerCAmelCase__ = name.replace("layer2_rn" , "convs.1" ) if "layer3_rn" in name: lowerCAmelCase__ = name.replace("layer3_rn" , "convs.2" ) if "layer4_rn" in name: lowerCAmelCase__ = name.replace("layer4_rn" , "convs.3" ) if "refinenet" in name: lowerCAmelCase__ = int(name[len("neck.refinenet" ) : len("neck.refinenet" ) + 1] ) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 lowerCAmelCase__ = name.replace(F'refinenet{layer_idx}' , F'fusion_stage.layers.{abs(layer_idx-4 )}' ) if "out_conv" in name: lowerCAmelCase__ = name.replace("out_conv" , "projection" ) if "resConfUnit1" in name: lowerCAmelCase__ = name.replace("resConfUnit1" , "residual_layer1" ) if "resConfUnit2" in name: lowerCAmelCase__ = name.replace("resConfUnit2" , "residual_layer2" ) if "conv1" in name: lowerCAmelCase__ = name.replace("conv1" , "convolution1" ) if "conv2" in name: lowerCAmelCase__ = name.replace("conv2" , "convolution2" ) # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: lowerCAmelCase__ = name.replace("pretrained.act_postprocess1.0.project.0" , "neck.reassemble_stage.readout_projects.0.0" ) if "pretrained.act_postprocess2.0.project.0" in name: lowerCAmelCase__ = name.replace("pretrained.act_postprocess2.0.project.0" , "neck.reassemble_stage.readout_projects.1.0" ) if "pretrained.act_postprocess3.0.project.0" in name: lowerCAmelCase__ = name.replace("pretrained.act_postprocess3.0.project.0" , "neck.reassemble_stage.readout_projects.2.0" ) if "pretrained.act_postprocess4.0.project.0" in name: lowerCAmelCase__ = name.replace("pretrained.act_postprocess4.0.project.0" , "neck.reassemble_stage.readout_projects.3.0" ) # resize blocks if "pretrained.act_postprocess1.3" in name: lowerCAmelCase__ = name.replace("pretrained.act_postprocess1.3" , "neck.reassemble_stage.layers.0.projection" ) if "pretrained.act_postprocess1.4" in name: lowerCAmelCase__ = name.replace("pretrained.act_postprocess1.4" , "neck.reassemble_stage.layers.0.resize" ) if "pretrained.act_postprocess2.3" in name: lowerCAmelCase__ = name.replace("pretrained.act_postprocess2.3" , "neck.reassemble_stage.layers.1.projection" ) if "pretrained.act_postprocess2.4" in name: lowerCAmelCase__ = name.replace("pretrained.act_postprocess2.4" , "neck.reassemble_stage.layers.1.resize" ) if "pretrained.act_postprocess3.3" in name: lowerCAmelCase__ = name.replace("pretrained.act_postprocess3.3" , "neck.reassemble_stage.layers.2.projection" ) if "pretrained.act_postprocess4.3" in name: lowerCAmelCase__ = name.replace("pretrained.act_postprocess4.3" , "neck.reassemble_stage.layers.3.projection" ) if "pretrained.act_postprocess4.4" in name: lowerCAmelCase__ = name.replace("pretrained.act_postprocess4.4" , "neck.reassemble_stage.layers.3.resize" ) if "pretrained" in name: lowerCAmelCase__ = name.replace("pretrained" , "dpt" ) if "bn" in name: lowerCAmelCase__ = name.replace("bn" , "batch_norm" ) if "head" in name: lowerCAmelCase__ = name.replace("head" , "head.head" ) if "encoder.norm" in name: lowerCAmelCase__ = name.replace("encoder.norm" , "layernorm" ) if "auxlayer" in name: lowerCAmelCase__ = name.replace("auxlayer" , "auxiliary_head.head" ) if "backbone" in name: lowerCAmelCase__ = name.replace("backbone" , "backbone.bit.encoder" ) if ".." in name: lowerCAmelCase__ = name.replace(".." , "." ) if "stem.conv" in name: lowerCAmelCase__ = name.replace("stem.conv" , "bit.embedder.convolution" ) if "blocks" in name: lowerCAmelCase__ = name.replace("blocks" , "layers" ) if "convolution" in name and "backbone" in name: lowerCAmelCase__ = name.replace("convolution" , "conv" ) if "layer" in name and "backbone" in name: lowerCAmelCase__ = name.replace("layer" , "layers" ) if "backbone.bit.encoder.bit" in name: lowerCAmelCase__ = name.replace("backbone.bit.encoder.bit" , "backbone.bit" ) if "embedder.conv" in name: lowerCAmelCase__ = name.replace("embedder.conv" , "embedder.convolution" ) if "backbone.bit.encoder.stem.norm" in name: lowerCAmelCase__ = name.replace("backbone.bit.encoder.stem.norm" , "backbone.bit.embedder.norm" ) return name def _A ( lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] ): """simple docstring""" for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCAmelCase__ = state_dict.pop(F'dpt.encoder.layer.{i}.attn.qkv.weight' ) lowerCAmelCase__ = state_dict.pop(F'dpt.encoder.layer.{i}.attn.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict lowerCAmelCase__ = in_proj_weight[: config.hidden_size, :] lowerCAmelCase__ = in_proj_bias[: config.hidden_size] lowerCAmelCase__ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCAmelCase__ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCAmelCase__ = in_proj_weight[ -config.hidden_size :, : ] lowerCAmelCase__ = in_proj_bias[-config.hidden_size :] def _A ( ): """simple docstring""" lowerCAmelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg" lowerCAmelCase__ = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw ) return im @torch.no_grad() def _A ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] ): """simple docstring""" lowerCAmelCase__ , lowerCAmelCase__ = get_dpt_config(lowerCAmelCase_ ) # load original state_dict from URL # state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu") lowerCAmelCase__ = torch.load(lowerCAmelCase_ , map_location="cpu" ) # remove certain keys remove_ignore_keys_(lowerCAmelCase_ ) # rename keys for key in state_dict.copy().keys(): lowerCAmelCase__ = state_dict.pop(lowerCAmelCase_ ) lowerCAmelCase__ = val # read in qkv matrices read_in_q_k_v(lowerCAmelCase_ , lowerCAmelCase_ ) # load HuggingFace model lowerCAmelCase__ = DPTForSemanticSegmentation(lowerCAmelCase_ ) if "ade" in checkpoint_url else DPTForDepthEstimation(lowerCAmelCase_ ) model.load_state_dict(lowerCAmelCase_ ) model.eval() # Check outputs on an image lowerCAmelCase__ = 480 if "ade" in checkpoint_url else 384 lowerCAmelCase__ = DPTImageProcessor(size=lowerCAmelCase_ ) lowerCAmelCase__ = prepare_img() lowerCAmelCase__ = image_processor(lowerCAmelCase_ , return_tensors="pt" ) # forward pass lowerCAmelCase__ = model(**lowerCAmelCase_ ).logits if "ade" in checkpoint_url else model(**lowerCAmelCase_ ).predicted_depth if show_prediction: lowerCAmelCase__ = ( torch.nn.functional.interpolate( outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="bicubic" , align_corners=lowerCAmelCase_ , ) .squeeze() .cpu() .numpy() ) Image.fromarray((prediction / prediction.max()) * 255 ).show() if pytorch_dump_folder_path is not None: Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ ) print(F'Saving model to {pytorch_dump_folder_path}' ) model.save_pretrained(lowerCAmelCase_ ) print(F'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(lowerCAmelCase_ ) if push_to_hub: model.push_to_hub("ybelkada/dpt-hybrid-midas" ) image_processor.push_to_hub("ybelkada/dpt-hybrid-midas" ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt', type=str, help='URL of the original DPT checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=False, help='Path to the output PyTorch model directory.', ) parser.add_argument( '--push_to_hub', action='store_true', ) parser.add_argument( '--model_name', default='dpt-large', type=str, help='Name of the model, in case you\'re pushing to the hub.', ) parser.add_argument( '--show_prediction', action='store_true', ) UpperCamelCase = parser.parse_args() convert_dpt_checkpoint( args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction )
61
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin, SchedulerOutput @dataclass class A_ ( SCREAMING_SNAKE_CASE ): _UpperCAmelCase : torch.FloatTensor _UpperCAmelCase : torch.FloatTensor class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): _UpperCAmelCase : Dict = 1 @register_to_config def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : int = 2_0_0_0 ,SCREAMING_SNAKE_CASE__ : float = 0.15 ,SCREAMING_SNAKE_CASE__ : float = 0.01 ,SCREAMING_SNAKE_CASE__ : float = 1348.0 ,SCREAMING_SNAKE_CASE__ : float = 1E-5 ,SCREAMING_SNAKE_CASE__ : int = 1 ,): # standard deviation of the initial noise distribution __lowerCamelCase : int = sigma_max # setable values __lowerCamelCase : List[str] = None self.set_sigmas(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : Optional[int] = None): return sample def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : float = None ,SCREAMING_SNAKE_CASE__ : Union[str, torch.device] = None): __lowerCamelCase : Dict = sampling_eps if sampling_eps is not None else self.config.sampling_eps __lowerCamelCase : Optional[int] = torch.linspace(1 ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,device=SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : float = None ,SCREAMING_SNAKE_CASE__ : float = None ,SCREAMING_SNAKE_CASE__ : float = None): __lowerCamelCase : Optional[int] = sigma_min if sigma_min is not None else self.config.sigma_min __lowerCamelCase : Optional[int] = sigma_max if sigma_max is not None else self.config.sigma_max __lowerCamelCase : Any = sampling_eps if sampling_eps is not None else self.config.sampling_eps if self.timesteps is None: self.set_timesteps(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[int] = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps) __lowerCamelCase : Optional[Any] = torch.exp(torch.linspace(math.log(SCREAMING_SNAKE_CASE__) ,math.log(SCREAMING_SNAKE_CASE__) ,SCREAMING_SNAKE_CASE__)) __lowerCamelCase : str = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps]) def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : List[str]): return torch.where( timesteps == 0 ,torch.zeros_like(t.to(timesteps.device)) ,self.discrete_sigmas[timesteps - 1].to(timesteps.device) ,) def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : Optional[torch.Generator] = None ,SCREAMING_SNAKE_CASE__ : bool = True ,): if self.timesteps is None: raise ValueError( '`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler') __lowerCamelCase : List[str] = timestep * torch.ones( sample.shape[0] ,device=sample.device) # torch.repeat_interleave(timestep, sample.shape[0]) __lowerCamelCase : str = (timestep * (len(self.timesteps) - 1)).long() # mps requires indices to be in the same device, so we use cpu as is the default with cuda __lowerCamelCase : Dict = timesteps.to(self.discrete_sigmas.device) __lowerCamelCase : Optional[Any] = self.discrete_sigmas[timesteps].to(sample.device) __lowerCamelCase : Optional[Any] = self.get_adjacent_sigma(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__).to(sample.device) __lowerCamelCase : int = torch.zeros_like(SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[Any] = (sigma**2 - adjacent_sigma**2) ** 0.5 # equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x) # also equation 47 shows the analog from SDE models to ancestral sampling methods __lowerCamelCase : int = diffusion.flatten() while len(diffusion.shape) < len(sample.shape): __lowerCamelCase : List[Any] = diffusion.unsqueeze(-1) __lowerCamelCase : Any = drift - diffusion**2 * model_output # equation 6: sample noise for the diffusion term of __lowerCamelCase : int = randn_tensor( sample.shape ,layout=sample.layout ,generator=SCREAMING_SNAKE_CASE__ ,device=sample.device ,dtype=sample.dtype) __lowerCamelCase : Optional[int] = sample - drift # subtract because `dt` is a small negative timestep # TODO is the variable diffusion the correct scaling term for the noise? __lowerCamelCase : Union[str, Any] = prev_sample_mean + diffusion * noise # add impact of diffusion field g if not return_dict: return (prev_sample, prev_sample_mean) return SdeVeOutput(prev_sample=SCREAMING_SNAKE_CASE__ ,prev_sample_mean=SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : Optional[torch.Generator] = None ,SCREAMING_SNAKE_CASE__ : bool = True ,): if self.timesteps is None: raise ValueError( '`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler') # For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z" # sample noise for correction __lowerCamelCase : Optional[int] = randn_tensor(sample.shape ,layout=sample.layout ,generator=SCREAMING_SNAKE_CASE__).to(sample.device) # compute step size from the model_output, the noise, and the snr __lowerCamelCase : str = torch.norm(model_output.reshape(model_output.shape[0] ,-1) ,dim=-1).mean() __lowerCamelCase : Tuple = torch.norm(noise.reshape(noise.shape[0] ,-1) ,dim=-1).mean() __lowerCamelCase : Tuple = (self.config.snr * noise_norm / grad_norm) ** 2 * 2 __lowerCamelCase : Optional[int] = step_size * torch.ones(sample.shape[0]).to(sample.device) # self.repeat_scalar(step_size, sample.shape[0]) # compute corrected sample: model_output term and noise term __lowerCamelCase : Union[str, Any] = step_size.flatten() while len(step_size.shape) < len(sample.shape): __lowerCamelCase : List[str] = step_size.unsqueeze(-1) __lowerCamelCase : str = sample + step_size * model_output __lowerCamelCase : Any = prev_sample_mean + ((step_size * 2) ** 0.5) * noise if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,): # Make sure sigmas and timesteps have the same device and dtype as original_samples __lowerCamelCase : int = timesteps.to(original_samples.device) __lowerCamelCase : Any = self.discrete_sigmas.to(original_samples.device)[timesteps] __lowerCamelCase : Optional[Any] = ( noise * sigmas[:, None, None, None] if noise is not None else torch.randn_like(SCREAMING_SNAKE_CASE__) * sigmas[:, None, None, None] ) __lowerCamelCase : str = noise + original_samples return noisy_samples def __len__( self : Optional[int]): return self.config.num_train_timesteps
652
0
from typing import List, Optional, Tuple, Union import torch from torch import nn from torch.nn import CrossEntropyLoss from ... import AutoBackbone from ...modeling_outputs import SemanticSegmenterOutput from ...modeling_utils import PreTrainedModel from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings from ...utils.backbone_utils import BackboneMixin from .configuration_upernet import UperNetConfig snake_case = [ """openmmlab/upernet-convnext-tiny""", # See all UperNet models at https://huggingface.co/models?filter=upernet ] # General docstring snake_case = """UperNetConfig""" class SCREAMING_SNAKE_CASE ( nn.Module ): '''simple docstring''' def __init__( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[int, Tuple[int, int]] , UpperCAmelCase_ : Union[int, Tuple[int, int], str] = 0 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Union[int, Tuple[int, int]] = 1 , ): super().__init__() SCREAMING_SNAKE_CASE : str = nn.Convad( in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , kernel_size=UpperCAmelCase_ , padding=UpperCAmelCase_ , bias=UpperCAmelCase_ , dilation=UpperCAmelCase_ , ) SCREAMING_SNAKE_CASE : Union[str, Any] = nn.BatchNormad(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = nn.ReLU() def _A ( self : Union[str, Any] , UpperCAmelCase_ : torch.Tensor ): SCREAMING_SNAKE_CASE : Any = self.conv(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Dict = self.batch_norm(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.activation(UpperCAmelCase_ ) return output class SCREAMING_SNAKE_CASE ( nn.Module ): '''simple docstring''' def __init__( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ): super().__init__() SCREAMING_SNAKE_CASE : str = [ nn.AdaptiveAvgPoolad(UpperCAmelCase_ ), UperNetConvModule(UpperCAmelCase_ , UpperCAmelCase_ , kernel_size=1 ), ] for i, layer in enumerate(self.layers ): self.add_module(str(UpperCAmelCase_ ) , UpperCAmelCase_ ) def _A ( self : Any , UpperCAmelCase_ : torch.Tensor ): SCREAMING_SNAKE_CASE : Optional[int] = input for layer in self.layers: SCREAMING_SNAKE_CASE : Optional[Any] = layer(UpperCAmelCase_ ) return hidden_state class SCREAMING_SNAKE_CASE ( nn.Module ): '''simple docstring''' def __init__( self : Dict , UpperCAmelCase_ : Tuple[int, ...] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : bool ): super().__init__() SCREAMING_SNAKE_CASE : Dict = pool_scales SCREAMING_SNAKE_CASE : Optional[int] = align_corners SCREAMING_SNAKE_CASE : Union[str, Any] = in_channels SCREAMING_SNAKE_CASE : List[str] = channels SCREAMING_SNAKE_CASE : str = [] for i, pool_scale in enumerate(UpperCAmelCase_ ): SCREAMING_SNAKE_CASE : Optional[int] = UperNetPyramidPoolingBlock(pool_scale=UpperCAmelCase_ , in_channels=UpperCAmelCase_ , channels=UpperCAmelCase_ ) self.blocks.append(UpperCAmelCase_ ) self.add_module(str(UpperCAmelCase_ ) , UpperCAmelCase_ ) def _A ( self : Tuple , UpperCAmelCase_ : torch.Tensor ): SCREAMING_SNAKE_CASE : List[Any] = [] for ppm in self.blocks: SCREAMING_SNAKE_CASE : Dict = ppm(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = nn.functional.interpolate( UpperCAmelCase_ , size=x.size()[2:] , mode="bilinear" , align_corners=self.align_corners ) ppm_outs.append(UpperCAmelCase_ ) return ppm_outs class SCREAMING_SNAKE_CASE ( nn.Module ): '''simple docstring''' def __init__( self : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] ): super().__init__() SCREAMING_SNAKE_CASE : Tuple = config SCREAMING_SNAKE_CASE : List[str] = config.pool_scales # e.g. (1, 2, 3, 6) SCREAMING_SNAKE_CASE : Dict = in_channels SCREAMING_SNAKE_CASE : str = config.hidden_size SCREAMING_SNAKE_CASE : str = False SCREAMING_SNAKE_CASE : Any = nn.Convad(self.channels , config.num_labels , kernel_size=1 ) # PSP Module SCREAMING_SNAKE_CASE : Tuple = UperNetPyramidPoolingModule( self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , ) SCREAMING_SNAKE_CASE : Tuple = UperNetConvModule( self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , ) # FPN Module SCREAMING_SNAKE_CASE : Union[str, Any] = nn.ModuleList() SCREAMING_SNAKE_CASE : List[Any] = nn.ModuleList() for in_channels in self.in_channels[:-1]: # skip the top layer SCREAMING_SNAKE_CASE : Optional[Any] = UperNetConvModule(UpperCAmelCase_ , self.channels , kernel_size=1 ) SCREAMING_SNAKE_CASE : Dict = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 ) self.lateral_convs.append(UpperCAmelCase_ ) self.fpn_convs.append(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = UperNetConvModule( len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , ) def _A ( self : Optional[int] ): self.apply(self._init_weights ) def _A ( self : Tuple , UpperCAmelCase_ : Union[str, Any] ): if isinstance(UpperCAmelCase_ , nn.Convad ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() def _A ( self : Tuple , UpperCAmelCase_ : str ): SCREAMING_SNAKE_CASE : Any = inputs[-1] SCREAMING_SNAKE_CASE : Union[str, Any] = [x] psp_outs.extend(self.psp_modules(UpperCAmelCase_ ) ) SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat(UpperCAmelCase_ , dim=1 ) SCREAMING_SNAKE_CASE : Optional[Any] = self.bottleneck(UpperCAmelCase_ ) return output def _A ( self : Dict , UpperCAmelCase_ : torch.Tensor ): # build laterals SCREAMING_SNAKE_CASE : Tuple = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )] laterals.append(self.psp_forward(UpperCAmelCase_ ) ) # build top-down path SCREAMING_SNAKE_CASE : Union[str, Any] = len(UpperCAmelCase_ ) for i in range(used_backbone_levels - 1 , 0 , -1 ): SCREAMING_SNAKE_CASE : Optional[int] = laterals[i - 1].shape[2:] SCREAMING_SNAKE_CASE : List[str] = laterals[i - 1] + nn.functional.interpolate( laterals[i] , size=UpperCAmelCase_ , mode="bilinear" , align_corners=self.align_corners ) # build outputs SCREAMING_SNAKE_CASE : Any = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )] # append psp feature fpn_outs.append(laterals[-1] ) for i in range(used_backbone_levels - 1 , 0 , -1 ): SCREAMING_SNAKE_CASE : List[Any] = nn.functional.interpolate( fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode="bilinear" , align_corners=self.align_corners ) SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat(UpperCAmelCase_ , dim=1 ) SCREAMING_SNAKE_CASE : Tuple = self.fpn_bottleneck(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = self.classifier(UpperCAmelCase_ ) return output class SCREAMING_SNAKE_CASE ( nn.Module ): '''simple docstring''' def __init__( self : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : int = 2 , UpperCAmelCase_ : int = 3 , UpperCAmelCase_ : Union[int, Tuple[int, int]] = 1 ): super().__init__() SCREAMING_SNAKE_CASE : int = config SCREAMING_SNAKE_CASE : Optional[Any] = config.auxiliary_in_channels SCREAMING_SNAKE_CASE : List[str] = config.auxiliary_channels SCREAMING_SNAKE_CASE : List[Any] = config.auxiliary_num_convs SCREAMING_SNAKE_CASE : Tuple = config.auxiliary_concat_input SCREAMING_SNAKE_CASE : Optional[int] = in_index SCREAMING_SNAKE_CASE : List[Any] = (kernel_size // 2) * dilation SCREAMING_SNAKE_CASE : int = [] convs.append( UperNetConvModule( self.in_channels , self.channels , kernel_size=UpperCAmelCase_ , padding=UpperCAmelCase_ , dilation=UpperCAmelCase_ ) ) for i in range(self.num_convs - 1 ): convs.append( UperNetConvModule( self.channels , self.channels , kernel_size=UpperCAmelCase_ , padding=UpperCAmelCase_ , dilation=UpperCAmelCase_ ) ) if self.num_convs == 0: SCREAMING_SNAKE_CASE : Optional[int] = nn.Identity() else: SCREAMING_SNAKE_CASE : List[str] = nn.Sequential(*UpperCAmelCase_ ) if self.concat_input: SCREAMING_SNAKE_CASE : Dict = UperNetConvModule( self.in_channels + self.channels , self.channels , kernel_size=UpperCAmelCase_ , padding=kernel_size // 2 ) SCREAMING_SNAKE_CASE : Optional[Any] = nn.Convad(self.channels , config.num_labels , kernel_size=1 ) def _A ( self : Optional[int] ): self.apply(self._init_weights ) def _A ( self : int , UpperCAmelCase_ : Union[str, Any] ): if isinstance(UpperCAmelCase_ , nn.Convad ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() def _A ( self : Tuple , UpperCAmelCase_ : torch.Tensor ): # just take the relevant feature maps SCREAMING_SNAKE_CASE : Union[str, Any] = encoder_hidden_states[self.in_index] SCREAMING_SNAKE_CASE : Any = self.convs(UpperCAmelCase_ ) if self.concat_input: SCREAMING_SNAKE_CASE : str = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) ) SCREAMING_SNAKE_CASE : int = self.classifier(UpperCAmelCase_ ) return output class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Tuple = UperNetConfig UpperCamelCase_ : List[str] = '''pixel_values''' UpperCamelCase_ : List[str] = True def _A ( self : Tuple , UpperCAmelCase_ : Tuple ): if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): module.backbone.init_weights() module.decode_head.init_weights() module.auxiliary_head.init_weights() def _A ( self : Tuple ): self.backbone.init_weights() self.decode_head.init_weights() self.auxiliary_head.init_weights() def _A ( self : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict=False ): if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): SCREAMING_SNAKE_CASE : List[Any] = value snake_case = r""" Parameters: This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. config ([`UperNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ snake_case = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( '''UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.''' , lowerCAmelCase , ) class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def __init__( self : Any , UpperCAmelCase_ : Any ): super().__init__(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = AutoBackbone.from_config(config.backbone_config ) # Semantic segmentation head(s) SCREAMING_SNAKE_CASE : Union[str, Any] = UperNetHead(UpperCAmelCase_ , in_channels=self.backbone.channels ) SCREAMING_SNAKE_CASE : Union[str, Any] = UperNetFCNHead(UpperCAmelCase_ ) if config.use_auxiliary_head else None # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format("batch_size, sequence_length" ) ) @replace_return_docstrings(output_type=UpperCAmelCase_ , config_class=_CONFIG_FOR_DOC ) def _A ( self : Union[str, Any] , UpperCAmelCase_ : Optional[torch.Tensor] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[torch.Tensor] = None , UpperCAmelCase_ : Optional[bool] = None , ): SCREAMING_SNAKE_CASE : int = return_dict if return_dict is not None else self.config.use_return_dict SCREAMING_SNAKE_CASE : List[Any] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) SCREAMING_SNAKE_CASE : List[Any] = output_attentions if output_attentions is not None else self.config.output_attentions SCREAMING_SNAKE_CASE : Tuple = self.backbone.forward_with_filtered_kwargs( UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ , output_attentions=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = outputs.feature_maps SCREAMING_SNAKE_CASE : Optional[Any] = self.decode_head(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : str = nn.functional.interpolate(UpperCAmelCase_ , size=pixel_values.shape[2:] , mode="bilinear" , align_corners=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = None if self.auxiliary_head is not None: SCREAMING_SNAKE_CASE : Dict = self.auxiliary_head(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : int = nn.functional.interpolate( UpperCAmelCase_ , size=pixel_values.shape[2:] , mode="bilinear" , align_corners=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = None if labels is not None: if self.config.num_labels == 1: raise ValueError("The number of labels should be greater than one" ) else: # compute weighted loss SCREAMING_SNAKE_CASE : Optional[Any] = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index ) SCREAMING_SNAKE_CASE : Dict = loss_fct(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : int = loss_fct(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss if not return_dict: if output_hidden_states: SCREAMING_SNAKE_CASE : Any = (logits,) + outputs[1:] else: SCREAMING_SNAKE_CASE : Any = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SemanticSegmenterOutput( loss=UpperCAmelCase_ , logits=UpperCAmelCase_ , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
62
import logging import numpy as np import pytest from scipy.linalg import eigh logging.basicConfig(level=logging.INFO, format="""%(message)s""") def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> np.ndarray: return input_array.reshape((input_array.size, 1) ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray: __lowerCamelCase : str = np.nan for i in range(lowerCamelCase__ ): __lowerCamelCase : int = features[:, labels == i] __lowerCamelCase : Optional[int] = data.mean(1 ) # Centralize the data of class i __lowerCamelCase : int = data - column_reshape(lowerCamelCase__ ) if i > 0: # If covariance_sum is not None covariance_sum += np.dot(lowerCamelCase__ , centered_data.T ) else: # If covariance_sum is np.nan (i.e. first loop) __lowerCamelCase : Union[str, Any] = np.dot(lowerCamelCase__ , centered_data.T ) return covariance_sum / features.shape[1] def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray: __lowerCamelCase : Optional[Any] = features.mean(1 ) __lowerCamelCase : Union[str, Any] = np.nan for i in range(lowerCamelCase__ ): __lowerCamelCase : Optional[Any] = features[:, labels == i] __lowerCamelCase : Union[str, Any] = data.shape[1] __lowerCamelCase : Union[str, Any] = data.mean(1 ) if i > 0: # If covariance_sum is not None covariance_sum += device_data * np.dot( column_reshape(lowerCamelCase__ ) - column_reshape(lowerCamelCase__ ) , (column_reshape(lowerCamelCase__ ) - column_reshape(lowerCamelCase__ )).T , ) else: # If covariance_sum is np.nan (i.e. first loop) __lowerCamelCase : List[str] = device_data * np.dot( column_reshape(lowerCamelCase__ ) - column_reshape(lowerCamelCase__ ) , (column_reshape(lowerCamelCase__ ) - column_reshape(lowerCamelCase__ )).T , ) return covariance_sum / features.shape[1] def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray: # Check if the features have been loaded if features.any(): __lowerCamelCase : Tuple = features.mean(1 ) # Center the dataset __lowerCamelCase : Any = features - np.reshape(lowerCamelCase__ , (data_mean.size, 1) ) __lowerCamelCase : Optional[int] = np.dot(lowerCamelCase__ , centered_data.T ) / features.shape[1] __lowerCamelCase , __lowerCamelCase : List[Any] = np.linalg.eigh(lowerCamelCase__ ) # Take all the columns in the reverse order (-1), and then takes only the first __lowerCamelCase : Dict = eigenvectors[:, ::-1][:, 0:dimensions] # Project the database on the new space __lowerCamelCase : int = np.dot(filtered_eigenvectors.T , lowerCamelCase__ ) logging.info('Principal Component Analysis computed' ) return projected_data else: logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=lowerCamelCase__ ) logging.error('Dataset empty' ) raise AssertionError def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray: assert classes > dimensions # Check if features have been already loaded if features.any: __lowerCamelCase , __lowerCamelCase : Dict = eigh( covariance_between_classes(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) , covariance_within_classes(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) , ) __lowerCamelCase : Union[str, Any] = eigenvectors[:, ::-1][:, :dimensions] __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = np.linalg.svd(lowerCamelCase__ ) __lowerCamelCase : int = svd_matrix[:, 0:dimensions] __lowerCamelCase : Optional[int] = np.dot(filtered_svd_matrix.T , lowerCamelCase__ ) logging.info('Linear Discriminant Analysis computed' ) return projected_data else: logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=lowerCamelCase__ ) logging.error('Dataset empty' ) raise AssertionError def SCREAMING_SNAKE_CASE__ ( ) -> None: # Create dummy dataset with 2 classes and 3 features __lowerCamelCase : Optional[int] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] ) __lowerCamelCase : Optional[int] = np.array([0, 0, 0, 1, 1] ) __lowerCamelCase : Optional[Any] = 2 __lowerCamelCase : Tuple = 2 # Assert that the function raises an AssertionError if dimensions > classes with pytest.raises(lowerCamelCase__ ) as error_info: __lowerCamelCase : int = linear_discriminant_analysis( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) if isinstance(lowerCamelCase__ , np.ndarray ): raise AssertionError( 'Did not raise AssertionError for dimensions > classes' ) assert error_info.type is AssertionError def SCREAMING_SNAKE_CASE__ ( ) -> None: __lowerCamelCase : Dict = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] ) __lowerCamelCase : Dict = 2 __lowerCamelCase : int = np.array([[6.9282_0323, 8.6602_5404, 10.3923_0485], [3.0, 3.0, 3.0]] ) with pytest.raises(lowerCamelCase__ ) as error_info: __lowerCamelCase : Optional[Any] = principal_component_analysis(lowerCamelCase__ , lowerCamelCase__ ) if not np.allclose(lowerCamelCase__ , lowerCamelCase__ ): raise AssertionError assert error_info.type is AssertionError if __name__ == "__main__": import doctest doctest.testmod()
652
0
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer a : Dict = logging.get_logger(__name__) a : List[Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} a : Tuple = { "vocab_file": { "facebook/dpr-ctx_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt" ), "facebook/dpr-ctx_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt" ), }, "tokenizer_file": { "facebook/dpr-ctx_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json" ), "facebook/dpr-ctx_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json" ), }, } a : Any = { "vocab_file": { "facebook/dpr-question_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt" ), "facebook/dpr-question_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt" ), }, "tokenizer_file": { "facebook/dpr-question_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json" ), "facebook/dpr-question_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json" ), }, } a : Dict = { "vocab_file": { "facebook/dpr-reader-single-nq-base": ( "https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt" ), "facebook/dpr-reader-multiset-base": ( "https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt" ), }, "tokenizer_file": { "facebook/dpr-reader-single-nq-base": ( "https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json" ), "facebook/dpr-reader-multiset-base": ( "https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json" ), }, } a : Optional[int] = { "facebook/dpr-ctx_encoder-single-nq-base": 512, "facebook/dpr-ctx_encoder-multiset-base": 512, } a : int = { "facebook/dpr-question_encoder-single-nq-base": 512, "facebook/dpr-question_encoder-multiset-base": 512, } a : Any = { "facebook/dpr-reader-single-nq-base": 512, "facebook/dpr-reader-multiset-base": 512, } a : Optional[int] = { "facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True}, "facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True}, } a : List[str] = { "facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True}, "facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True}, } a : List[str] = { "facebook/dpr-reader-single-nq-base": {"do_lower_case": True}, "facebook/dpr-reader-multiset-base": {"do_lower_case": True}, } class a ( lowercase__ ): """simple docstring""" a : Dict = VOCAB_FILES_NAMES a : Tuple = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP a : Tuple = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a : Any = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION a : str = DPRContextEncoderTokenizer class a ( lowercase__ ): """simple docstring""" a : Union[str, Any] = VOCAB_FILES_NAMES a : List[Any] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP a : List[str] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a : Dict = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION a : List[Any] = DPRQuestionEncoderTokenizer a : Optional[int] = collections.namedtuple( "DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"] ) a : Dict = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"]) a : Tuple = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n " @add_start_docstrings(lowercase__ ) class a : """simple docstring""" def __call__( self : Optional[Any] , __lowercase : Union[str, Any] , __lowercase : Optional[str] = None , __lowercase : Optional[str] = None , __lowercase : Union[bool, str] = False , __lowercase : Union[bool, str] = False , __lowercase : Optional[int] = None , __lowercase : Optional[Union[str, TensorType]] = None , __lowercase : Optional[bool] = None , **__lowercase : List[Any] , ) -> BatchEncoding: if titles is None and texts is None: return super().__call__( __lowercase , padding=__lowercase , truncation=__lowercase , max_length=__lowercase , return_tensors=__lowercase , return_attention_mask=__lowercase , **__lowercase , ) elif titles is None or texts is None: __UpperCAmelCase : Optional[int] = titles if texts is None else texts return super().__call__( __lowercase , __lowercase , padding=__lowercase , truncation=__lowercase , max_length=__lowercase , return_tensors=__lowercase , return_attention_mask=__lowercase , **__lowercase , ) __UpperCAmelCase : Optional[int] = titles if not isinstance(__lowercase , __lowercase ) else [titles] __UpperCAmelCase : Dict = texts if not isinstance(__lowercase , __lowercase ) else [texts] __UpperCAmelCase : Union[str, Any] = len(__lowercase ) __UpperCAmelCase : Dict = questions if not isinstance(__lowercase , __lowercase ) else [questions] * n_passages assert len(__lowercase ) == len( __lowercase ), f"""There should be as many titles than texts but got {len(__lowercase )} titles and {len(__lowercase )} texts.""" __UpperCAmelCase : Union[str, Any] = super().__call__(__lowercase , __lowercase , padding=__lowercase , truncation=__lowercase )["""input_ids"""] __UpperCAmelCase : Any = super().__call__(__lowercase , add_special_tokens=__lowercase , padding=__lowercase , truncation=__lowercase )["""input_ids"""] __UpperCAmelCase : Union[str, Any] = { """input_ids""": [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(__lowercase , __lowercase ) ] } if return_attention_mask is not False: __UpperCAmelCase : Tuple = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) __UpperCAmelCase : int = attention_mask return self.pad(__lowercase , padding=__lowercase , max_length=__lowercase , return_tensors=__lowercase ) def UpperCAmelCase ( self : Union[str, Any] , __lowercase : BatchEncoding , __lowercase : DPRReaderOutput , __lowercase : int = 16 , __lowercase : int = 64 , __lowercase : int = 4 , ) -> List[DPRSpanPrediction]: __UpperCAmelCase : List[Any] = reader_input["""input_ids"""] __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = reader_output[:3] __UpperCAmelCase : Any = len(__lowercase ) __UpperCAmelCase : Union[str, Any] = sorted(range(__lowercase ) , reverse=__lowercase , key=relevance_logits.__getitem__ ) __UpperCAmelCase : List[DPRReaderOutput] = [] for doc_id in sorted_docs: __UpperCAmelCase : List[Any] = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence __UpperCAmelCase : Tuple = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: __UpperCAmelCase : Dict = sequence_ids.index(self.pad_token_id ) else: __UpperCAmelCase : Any = len(__lowercase ) __UpperCAmelCase : str = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__lowercase , top_spans=__lowercase , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__lowercase , start_index=__lowercase , end_index=__lowercase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(__lowercase ) >= num_spans: break return nbest_spans_predictions[:num_spans] def UpperCAmelCase ( self : Union[str, Any] , __lowercase : List[int] , __lowercase : List[int] , __lowercase : int , __lowercase : int , ) -> List[DPRSpanPrediction]: __UpperCAmelCase : Union[str, Any] = [] for start_index, start_score in enumerate(__lowercase ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) __UpperCAmelCase : Tuple = sorted(__lowercase , key=lambda __lowercase : x[1] , reverse=__lowercase ) __UpperCAmelCase : List[Any] = [] for (start_index, end_index), score in scores: assert start_index <= end_index, f"""Wrong span indices: [{start_index}:{end_index}]""" __UpperCAmelCase : Optional[Any] = end_index - start_index + 1 assert length <= max_answer_length, f"""Span is too long: {length} > {max_answer_length}""" if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(__lowercase ) == top_spans: break return chosen_span_intervals @add_end_docstrings(lowercase__ ) class a ( lowercase__ , lowercase__ ): """simple docstring""" a : Optional[int] = VOCAB_FILES_NAMES a : Optional[Any] = READER_PRETRAINED_VOCAB_FILES_MAP a : int = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a : List[Any] = READER_PRETRAINED_INIT_CONFIGURATION a : List[Any] = ['input_ids', 'attention_mask'] a : Optional[Any] = DPRReaderTokenizer
63
import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow a =logging.getLogger() @unittest.skip('''Temporarily disable the doc tests.''' ) @require_torch @require_tf @slow class A_ ( unittest.TestCase ): def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Path ,SCREAMING_SNAKE_CASE__ : Union[str, None] = None ,SCREAMING_SNAKE_CASE__ : Union[List[str], None] = None ,SCREAMING_SNAKE_CASE__ : Union[str, List[str], None] = None ,SCREAMING_SNAKE_CASE__ : bool = True ,): __lowerCamelCase : List[str] = [file for file in os.listdir(SCREAMING_SNAKE_CASE__) if os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__))] if identifier is not None: __lowerCamelCase : str = [file for file in files if identifier in file] if n_identifier is not None: if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__): for n_ in n_identifier: __lowerCamelCase : Optional[int] = [file for file in files if n_ not in file] else: __lowerCamelCase : Dict = [file for file in files if n_identifier not in file] __lowerCamelCase : str = ignore_files or [] ignore_files.append('__init__.py') __lowerCamelCase : Tuple = [file for file in files if file not in ignore_files] for file in files: # Open all files print('Testing' ,SCREAMING_SNAKE_CASE__) if only_modules: __lowerCamelCase : Optional[int] = file.split('.')[0] try: __lowerCamelCase : Optional[Any] = getattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[str] = doctest.DocTestSuite(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[int] = unittest.TextTestRunner().run(SCREAMING_SNAKE_CASE__) self.assertIs(len(result.failures) ,0) except AttributeError: logger.info(F"{module_identifier} is not a module.") else: __lowerCamelCase : int = doctest.testfile(str('..' / directory / file) ,optionflags=doctest.ELLIPSIS) self.assertIs(result.failed ,0) def lowerCAmelCase ( self : List[Any]): __lowerCamelCase : Dict = Path('src/transformers') __lowerCamelCase : Any = 'modeling' __lowerCamelCase : Dict = [ 'modeling_ctrl.py', 'modeling_tf_ctrl.py', ] self.analyze_directory(SCREAMING_SNAKE_CASE__ ,identifier=SCREAMING_SNAKE_CASE__ ,ignore_files=SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Dict): __lowerCamelCase : Tuple = Path('src/transformers') __lowerCamelCase : Optional[int] = 'tokenization' self.analyze_directory(SCREAMING_SNAKE_CASE__ ,identifier=SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Tuple): __lowerCamelCase : List[Any] = Path('src/transformers') __lowerCamelCase : str = 'configuration' self.analyze_directory(SCREAMING_SNAKE_CASE__ ,identifier=SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : int): __lowerCamelCase : Dict = Path('src/transformers') __lowerCamelCase : Any = ['configuration', 'modeling', 'tokenization'] self.analyze_directory(SCREAMING_SNAKE_CASE__ ,n_identifier=SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : int): __lowerCamelCase : List[Any] = Path('docs/source') __lowerCamelCase : str = ['favicon.ico'] self.analyze_directory(SCREAMING_SNAKE_CASE__ ,ignore_files=SCREAMING_SNAKE_CASE__ ,only_modules=SCREAMING_SNAKE_CASE__)
652
0
from __future__ import annotations from collections.abc import Callable lowercase_ : int = list[list[float | int]] def A__ ( snake_case_ : Matrix , snake_case_ : Matrix ): SCREAMING_SNAKE_CASE__: int= len(snake_case_ ) SCREAMING_SNAKE_CASE__: Matrix= [[0 for _ in range(size + 1 )] for _ in range(snake_case_ )] SCREAMING_SNAKE_CASE__: int SCREAMING_SNAKE_CASE__: int SCREAMING_SNAKE_CASE__: int SCREAMING_SNAKE_CASE__: int SCREAMING_SNAKE_CASE__: int SCREAMING_SNAKE_CASE__: float for row in range(snake_case_ ): for col in range(snake_case_ ): SCREAMING_SNAKE_CASE__: Union[str, Any]= matrix[row][col] SCREAMING_SNAKE_CASE__: int= vector[row][0] SCREAMING_SNAKE_CASE__: int= 0 SCREAMING_SNAKE_CASE__: Union[str, Any]= 0 while row < size and col < size: # pivoting SCREAMING_SNAKE_CASE__: int= max((abs(augmented[rowa][col] ), rowa) for rowa in range(snake_case_ , snake_case_ ) )[ 1 ] if augmented[pivot_row][col] == 0: col += 1 continue else: SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: str= augmented[pivot_row], augmented[row] for rowa in range(row + 1 , snake_case_ ): SCREAMING_SNAKE_CASE__: Union[str, Any]= augmented[rowa][col] / augmented[row][col] SCREAMING_SNAKE_CASE__: Optional[int]= 0 for cola in range(col + 1 , size + 1 ): augmented[rowa][cola] -= augmented[row][cola] * ratio row += 1 col += 1 # back substitution for col in range(1 , snake_case_ ): for row in range(snake_case_ ): SCREAMING_SNAKE_CASE__: List[Any]= augmented[row][col] / augmented[col][col] for cola in range(snake_case_ , size + 1 ): augmented[row][cola] -= augmented[col][cola] * ratio # round to get rid of numbers like 2.000000000000004 return [ [round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(snake_case_ ) ] def A__ ( snake_case_ : list[int] ): SCREAMING_SNAKE_CASE__: int= len(snake_case_ ) SCREAMING_SNAKE_CASE__: Matrix= [[0 for _ in range(snake_case_ )] for _ in range(snake_case_ )] SCREAMING_SNAKE_CASE__: Matrix= [[0] for _ in range(snake_case_ )] SCREAMING_SNAKE_CASE__: Matrix SCREAMING_SNAKE_CASE__: int SCREAMING_SNAKE_CASE__: int SCREAMING_SNAKE_CASE__: int for x_val, y_val in enumerate(snake_case_ ): for col in range(snake_case_ ): SCREAMING_SNAKE_CASE__: Union[str, Any]= (x_val + 1) ** (size - col - 1) SCREAMING_SNAKE_CASE__: List[Any]= y_val SCREAMING_SNAKE_CASE__: int= solve(snake_case_ , snake_case_ ) def interpolated_func(snake_case_ : int ) -> int: return sum( round(coeffs[x_val][0] ) * (var ** (size - x_val - 1)) for x_val in range(snake_case_ ) ) return interpolated_func def A__ ( snake_case_ : int ): return ( 1 - variable + variable**2 - variable**3 + variable**4 - variable**5 + variable**6 - variable**7 + variable**8 - variable**9 + variable**10 ) def A__ ( snake_case_ : Callable[[int], int] = question_function , snake_case_ : int = 10 ): SCREAMING_SNAKE_CASE__: list[int]= [func(snake_case_ ) for x_val in range(1 , order + 1 )] SCREAMING_SNAKE_CASE__: list[Callable[[int], int]]= [ interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 ) ] SCREAMING_SNAKE_CASE__: int= 0 SCREAMING_SNAKE_CASE__: Callable[[int], int] SCREAMING_SNAKE_CASE__: int for poly in polynomials: SCREAMING_SNAKE_CASE__: Optional[Any]= 1 while func(snake_case_ ) == poly(snake_case_ ): x_val += 1 ret += poly(snake_case_ ) return ret if __name__ == "__main__": print(f'''{solution() = }''')
64
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging a =logging.get_logger(__name__) a ="""▁""" a ={"""vocab_file""": """sentencepiece.bpe.model""", """monolingual_vocab_file""": """dict.txt"""} a ={ """vocab_file""": { """vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model""", }, """monolingual_vocab_file""": { """vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt""", }, } a ={"""vinai/bartpho-syllable""": 1024} class A_ ( SCREAMING_SNAKE_CASE ): _UpperCAmelCase : List[str] = VOCAB_FILES_NAMES _UpperCAmelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase : Dict = ['''input_ids''', '''attention_mask'''] def __init__( self : Dict ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : List[Any]="<s>" ,SCREAMING_SNAKE_CASE__ : Any="</s>" ,SCREAMING_SNAKE_CASE__ : List[str]="</s>" ,SCREAMING_SNAKE_CASE__ : List[str]="<s>" ,SCREAMING_SNAKE_CASE__ : int="<unk>" ,SCREAMING_SNAKE_CASE__ : Dict="<pad>" ,SCREAMING_SNAKE_CASE__ : List[str]="<mask>" ,SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None ,**SCREAMING_SNAKE_CASE__ : Dict ,): # Mask token behave like a normal word, i.e. include the space before it __lowerCamelCase : Union[str, Any] = AddedToken(SCREAMING_SNAKE_CASE__ ,lstrip=SCREAMING_SNAKE_CASE__ ,rstrip=SCREAMING_SNAKE_CASE__) if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) else mask_token __lowerCamelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=SCREAMING_SNAKE_CASE__ ,eos_token=SCREAMING_SNAKE_CASE__ ,unk_token=SCREAMING_SNAKE_CASE__ ,sep_token=SCREAMING_SNAKE_CASE__ ,cls_token=SCREAMING_SNAKE_CASE__ ,pad_token=SCREAMING_SNAKE_CASE__ ,mask_token=SCREAMING_SNAKE_CASE__ ,sp_model_kwargs=self.sp_model_kwargs ,**SCREAMING_SNAKE_CASE__ ,) __lowerCamelCase : int = vocab_file __lowerCamelCase : Tuple = monolingual_vocab_file __lowerCamelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(str(SCREAMING_SNAKE_CASE__)) # Load the reduced vocab # Keep order of special tokens for backward compatibility __lowerCamelCase : Optional[int] = {} __lowerCamelCase : List[Any] = 0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(SCREAMING_SNAKE_CASE__) not in self.fairseq_tokens_to_ids: __lowerCamelCase : Any = cnt cnt += 1 with open(SCREAMING_SNAKE_CASE__ ,'r' ,encoding='utf-8') as f: for line in f.readlines(): __lowerCamelCase : Any = line.strip().split()[0] __lowerCamelCase : List[str] = len(self.fairseq_tokens_to_ids) if str(SCREAMING_SNAKE_CASE__) not in self.fairseq_tokens_to_ids: __lowerCamelCase : Dict = len(self.fairseq_tokens_to_ids) __lowerCamelCase : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : int): __lowerCamelCase : Tuple = self.__dict__.copy() __lowerCamelCase : Optional[int] = None __lowerCamelCase : Optional[int] = self.sp_model.serialized_model_proto() return state def __setstate__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any]): __lowerCamelCase : List[str] = d # for backward compatibility if not hasattr(self ,'sp_model_kwargs'): __lowerCamelCase : str = {} __lowerCamelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.LoadFromSerializedProto(self.sp_model_proto) def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __lowerCamelCase : Tuple = [self.cls_token_id] __lowerCamelCase : int = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ,SCREAMING_SNAKE_CASE__ : bool = False): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE__ ,token_ids_a=SCREAMING_SNAKE_CASE__ ,already_has_special_tokens=SCREAMING_SNAKE_CASE__) if token_ids_a is None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1] return [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1] def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None): __lowerCamelCase : Dict = [self.sep_token_id] __lowerCamelCase : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] @property def lowerCAmelCase ( self : List[str]): return len(self.fairseq_ids_to_tokens) def lowerCAmelCase ( self : Dict): __lowerCamelCase : Optional[int] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : str): return self.sp_model.encode(SCREAMING_SNAKE_CASE__ ,out_type=SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[Any]): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Dict): return self.fairseq_ids_to_tokens[index] def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : str): __lowerCamelCase : Any = ''.join(SCREAMING_SNAKE_CASE__).replace(SCREAMING_SNAKE_CASE__ ,' ').strip() return out_string def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[str] = None): if not os.path.isdir(SCREAMING_SNAKE_CASE__): logger.error(F"Vocabulary path ({save_directory}) should be a directory") return __lowerCamelCase : Union[str, Any] = os.path.join( SCREAMING_SNAKE_CASE__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) __lowerCamelCase : Union[str, Any] = os.path.join( SCREAMING_SNAKE_CASE__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['monolingual_vocab_file'] ,) if os.path.abspath(self.vocab_file) != os.path.abspath(SCREAMING_SNAKE_CASE__) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file ,SCREAMING_SNAKE_CASE__) elif not os.path.isfile(self.vocab_file): with open(SCREAMING_SNAKE_CASE__ ,'wb') as fi: __lowerCamelCase : List[str] = self.sp_model.serialized_model_proto() fi.write(SCREAMING_SNAKE_CASE__) if os.path.abspath(self.monolingual_vocab_file) != os.path.abspath( SCREAMING_SNAKE_CASE__) and os.path.isfile(self.monolingual_vocab_file): copyfile(self.monolingual_vocab_file ,SCREAMING_SNAKE_CASE__) elif not os.path.isfile(self.monolingual_vocab_file): with open(SCREAMING_SNAKE_CASE__ ,'w' ,encoding='utf-8') as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(F"{str(SCREAMING_SNAKE_CASE__)} \n") return out_vocab_file, out_monolingual_vocab_file
652
0
"""simple docstring""" import os import random import sys from . import cryptomath_module as cryptomath from . import rabin_miller __UpperCAmelCase = 3 def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' print("""Generating primitive root of p""" ) while True: UpperCAmelCase__ : List[Any] = random.randrange(3 , __UpperCamelCase ) if pow(__UpperCamelCase , 2 , __UpperCamelCase ) == 1: continue if pow(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) == 1: continue return g def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' print("""Generating prime p...""" ) UpperCAmelCase__ : str = rabin_miller.generate_large_prime(__UpperCamelCase ) # select large prime number. UpperCAmelCase__ : Optional[Any] = primitive_root(__UpperCamelCase ) # one primitive root on modulo p. UpperCAmelCase__ : List[str] = random.randrange(3 , __UpperCamelCase ) # private_key -> have to be greater than 2 for safety. UpperCAmelCase__ : List[str] = cryptomath.find_mod_inverse(pow(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase ) UpperCAmelCase__ : Any = (key_size, e_a, e_a, p) UpperCAmelCase__ : List[str] = (key_size, d) return public_key, private_key def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' if os.path.exists(F"{name}_pubkey.txt" ) or os.path.exists(F"{name}_privkey.txt" ): print("""\nWARNING:""" ) print( F"\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n" """Use a different name or delete these files and re-run this program.""" ) sys.exit() UpperCAmelCase__ , UpperCAmelCase__ : str = generate_key(__UpperCamelCase ) print(F"\nWriting public key to file {name}_pubkey.txt..." ) with open(F"{name}_pubkey.txt" , """w""" ) as fo: fo.write(F"{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}" ) print(F"Writing private key to file {name}_privkey.txt..." ) with open(F"{name}_privkey.txt" , """w""" ) as fo: fo.write(F"{private_key[0]},{private_key[1]}" ) def lowerCAmelCase ( ): '''simple docstring''' print("""Making key files...""" ) make_key_files("""elgamal""" , 2048 ) print("""Key files generation successful""" ) if __name__ == "__main__": main()
65
from __future__ import annotations import inspect import unittest import numpy as np from transformers import DeiTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, ) from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class A_ : def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Dict=1_3 ,SCREAMING_SNAKE_CASE__ : int=3_0 ,SCREAMING_SNAKE_CASE__ : int=2 ,SCREAMING_SNAKE_CASE__ : List[Any]=3 ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : int=True ,SCREAMING_SNAKE_CASE__ : List[str]=3_2 ,SCREAMING_SNAKE_CASE__ : Any=2 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=4 ,SCREAMING_SNAKE_CASE__ : List[str]=3_7 ,SCREAMING_SNAKE_CASE__ : Optional[Any]="gelu" ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_0 ,SCREAMING_SNAKE_CASE__ : int=0.02 ,SCREAMING_SNAKE_CASE__ : str=3 ,SCREAMING_SNAKE_CASE__ : Dict=None ,SCREAMING_SNAKE_CASE__ : Optional[Any]=2 ,): __lowerCamelCase : Optional[int] = parent __lowerCamelCase : Optional[Any] = batch_size __lowerCamelCase : Dict = image_size __lowerCamelCase : Optional[Any] = patch_size __lowerCamelCase : Optional[Any] = num_channels __lowerCamelCase : str = is_training __lowerCamelCase : List[Any] = use_labels __lowerCamelCase : Any = hidden_size __lowerCamelCase : Optional[int] = num_hidden_layers __lowerCamelCase : Any = num_attention_heads __lowerCamelCase : Tuple = intermediate_size __lowerCamelCase : Dict = hidden_act __lowerCamelCase : Optional[Any] = hidden_dropout_prob __lowerCamelCase : List[Any] = attention_probs_dropout_prob __lowerCamelCase : Dict = type_sequence_label_size __lowerCamelCase : Optional[Any] = initializer_range __lowerCamelCase : List[str] = scope __lowerCamelCase : Union[str, Any] = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) __lowerCamelCase : str = (image_size // patch_size) ** 2 __lowerCamelCase : str = num_patches + 2 def lowerCAmelCase ( self : Optional[Any]): __lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) __lowerCamelCase : List[Any] = None if self.use_labels: __lowerCamelCase : int = ids_tensor([self.batch_size] ,self.type_sequence_label_size) __lowerCamelCase : List[str] = self.get_config() return config, pixel_values, labels def lowerCAmelCase ( self : List[Any]): return DeiTConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=SCREAMING_SNAKE_CASE__ ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,) def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Dict): __lowerCamelCase : Optional[Any] = TFDeiTModel(config=SCREAMING_SNAKE_CASE__) __lowerCamelCase : Dict = model(SCREAMING_SNAKE_CASE__) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size)) def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Any): __lowerCamelCase : Optional[int] = TFDeiTForMaskedImageModeling(config=SCREAMING_SNAKE_CASE__) __lowerCamelCase : str = model(SCREAMING_SNAKE_CASE__) self.parent.assertEqual( result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size)) # test greyscale images __lowerCamelCase : int = 1 __lowerCamelCase : Tuple = TFDeiTForMaskedImageModeling(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) __lowerCamelCase : str = model(SCREAMING_SNAKE_CASE__) self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size)) def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any]): __lowerCamelCase : Dict = self.type_sequence_label_size __lowerCamelCase : List[Any] = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE__) __lowerCamelCase : str = model(SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size)) # test greyscale images __lowerCamelCase : List[Any] = 1 __lowerCamelCase : Tuple = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) __lowerCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size)) def lowerCAmelCase ( self : Dict): __lowerCamelCase : Optional[Any] = self.prepare_config_and_inputs() __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = config_and_inputs __lowerCamelCase : Optional[int] = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ): _UpperCAmelCase : Union[str, Any] = ( ( TFDeiTModel, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, ) if is_tf_available() else () ) _UpperCAmelCase : List[Any] = ( { '''feature-extraction''': TFDeiTModel, '''image-classification''': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher), } if is_tf_available() else {} ) _UpperCAmelCase : Optional[int] = False _UpperCAmelCase : Optional[Any] = False _UpperCAmelCase : Optional[int] = False _UpperCAmelCase : Optional[int] = False def lowerCAmelCase ( self : Any): __lowerCamelCase : str = TFDeiTModelTester(self) __lowerCamelCase : Optional[int] = ConfigTester(self ,config_class=SCREAMING_SNAKE_CASE__ ,has_text_modality=SCREAMING_SNAKE_CASE__ ,hidden_size=3_7) def lowerCAmelCase ( self : str): self.config_tester.run_common_tests() @unittest.skip(reason='DeiT does not use inputs_embeds') def lowerCAmelCase ( self : List[Any]): pass def lowerCAmelCase ( self : Dict): __lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase : Dict = model_class(SCREAMING_SNAKE_CASE__) self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer)) __lowerCamelCase : int = model.get_output_embeddings() self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE__ ,tf.keras.layers.Dense)) def lowerCAmelCase ( self : List[Any]): __lowerCamelCase , __lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase : List[Any] = model_class(SCREAMING_SNAKE_CASE__) __lowerCamelCase : str = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCamelCase : Any = [*signature.parameters.keys()] __lowerCamelCase : Union[str, Any] = ['pixel_values'] self.assertListEqual(arg_names[:1] ,SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : str): __lowerCamelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : str): __lowerCamelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Dict): __lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : str=False): __lowerCamelCase : Optional[Any] = super()._prepare_for_class(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,return_labels=SCREAMING_SNAKE_CASE__) if return_labels: if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call).parameters: del inputs_dict["labels"] return inputs_dict @slow def lowerCAmelCase ( self : Optional[int]): for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase : Union[str, Any] = TFDeiTModel.from_pretrained(SCREAMING_SNAKE_CASE__) self.assertIsNotNone(SCREAMING_SNAKE_CASE__) def SCREAMING_SNAKE_CASE__ ( ) -> Tuple: __lowerCamelCase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class A_ ( unittest.TestCase ): @cached_property def lowerCAmelCase ( self : List[Any]): return ( DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224') if is_vision_available() else None ) @slow def lowerCAmelCase ( self : List[Any]): __lowerCamelCase : Optional[int] = TFDeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224') __lowerCamelCase : int = self.default_image_processor __lowerCamelCase : Tuple = prepare_img() __lowerCamelCase : Tuple = image_processor(images=SCREAMING_SNAKE_CASE__ ,return_tensors='tf') # forward pass __lowerCamelCase : int = model(**SCREAMING_SNAKE_CASE__) # verify the logits __lowerCamelCase : Optional[int] = tf.TensorShape((1, 1_0_0_0)) self.assertEqual(outputs.logits.shape ,SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[Any] = tf.constant([-1.0266, 0.1912, -1.2861]) self.assertTrue(np.allclose(outputs.logits[0, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4))
652
0
from __future__ import annotations import requests def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> dict: _lowercase : Tuple = F"""https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty""" return requests.get(SCREAMING_SNAKE_CASE ).json() def __magic_name__ ( SCREAMING_SNAKE_CASE = 10 ) -> list[dict]: _lowercase : List[str] = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty' _lowercase : str = requests.get(SCREAMING_SNAKE_CASE ).json()[:max_stories] return [get_hackernews_story(SCREAMING_SNAKE_CASE ) for story_id in story_ids] def __magic_name__ ( SCREAMING_SNAKE_CASE = 10 ) -> str: _lowercase : List[Any] = hackernews_top_stories(SCREAMING_SNAKE_CASE ) return "\n".join('* [{title}]({url})'.format(**SCREAMING_SNAKE_CASE ) for story in stories ) if __name__ == "__main__": print(hackernews_top_stories_as_markdown())
66
from dataclasses import dataclass from enum import Enum from typing import List, Optional, Union import numpy as np import PIL from PIL import Image from ...utils import BaseOutput, is_torch_available, is_transformers_available @dataclass class A_ ( SCREAMING_SNAKE_CASE ): _UpperCAmelCase : Union[List[PIL.Image.Image], np.ndarray] _UpperCAmelCase : Optional[List[bool]] if is_transformers_available() and is_torch_available(): from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
652
0
import importlib.metadata from typing import Union from packaging.version import Version, parse from .constants import STR_OPERATION_TO_FUNC snake_case = parse(importlib.metadata.version("""torch""")) def SCREAMING_SNAKE_CASE__ ( snake_case__ :Union[str, Version] , snake_case__ :str , snake_case__ :str ) -> int: if operation not in STR_OPERATION_TO_FUNC.keys(): raise ValueError(F"""`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}""" ) _lowercase = STR_OPERATION_TO_FUNC[operation] if isinstance(snake_case__ , snake_case__ ): _lowercase = parse(importlib.metadata.version(snake_case__ ) ) return operation(snake_case__ , parse(snake_case__ ) ) def SCREAMING_SNAKE_CASE__ ( snake_case__ :str , snake_case__ :str ) -> Dict: return compare_versions(snake_case__ , snake_case__ , snake_case__ )
67
# NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from ...utils import deprecate from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401 from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401 deprecate( """stable diffusion controlnet""", """0.22.0""", """Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""", standard_warn=False, stacklevel=3, )
652
0
import unittest import numpy as np from transformers import RobertaConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.roberta.modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, ) class _A ( unittest.TestCase ): """simple docstring""" def __init__( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=13 , __SCREAMING_SNAKE_CASE : Dict=7 , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : Optional[Any]=99 , __SCREAMING_SNAKE_CASE : Dict=32 , __SCREAMING_SNAKE_CASE : int=5 , __SCREAMING_SNAKE_CASE : Dict=4 , __SCREAMING_SNAKE_CASE : str=37 , __SCREAMING_SNAKE_CASE : Union[str, Any]="gelu" , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : str=512 , __SCREAMING_SNAKE_CASE : Dict=16 , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.02 , __SCREAMING_SNAKE_CASE : List[str]=4 , ) -> Optional[Any]: __UpperCAmelCase =parent __UpperCAmelCase =batch_size __UpperCAmelCase =seq_length __UpperCAmelCase =is_training __UpperCAmelCase =use_attention_mask __UpperCAmelCase =use_token_type_ids __UpperCAmelCase =use_labels __UpperCAmelCase =vocab_size __UpperCAmelCase =hidden_size __UpperCAmelCase =num_hidden_layers __UpperCAmelCase =num_attention_heads __UpperCAmelCase =intermediate_size __UpperCAmelCase =hidden_act __UpperCAmelCase =hidden_dropout_prob __UpperCAmelCase =attention_probs_dropout_prob __UpperCAmelCase =max_position_embeddings __UpperCAmelCase =type_vocab_size __UpperCAmelCase =type_sequence_label_size __UpperCAmelCase =initializer_range __UpperCAmelCase =num_choices def _a ( self : List[Any] ) -> List[str]: __UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase =None if self.use_attention_mask: __UpperCAmelCase =random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase =None if self.use_token_type_ids: __UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __UpperCAmelCase =RobertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def _a ( self : Tuple ) -> Optional[int]: __UpperCAmelCase =self.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =config_and_inputs __UpperCAmelCase ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict def _a ( self : List[str] ) -> Dict: __UpperCAmelCase =self.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =config_and_inputs __UpperCAmelCase =True __UpperCAmelCase =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax class _A ( UpperCamelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase : Union[str, Any] = True lowerCamelCase : Union[str, Any] = ( ( FlaxRobertaModel, FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, ) if is_flax_available() else () ) def _a ( self : List[Any] ) -> List[str]: __UpperCAmelCase =FlaxRobertaModelTester(self ) @slow def _a ( self : Optional[Any] ) -> List[Any]: for model_class_name in self.all_model_classes: __UpperCAmelCase =model_class_name.from_pretrained("""roberta-base""" , from_pt=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model(np.ones((1, 1) ) ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
68
import collections import os from typing import List, Optional, Tuple from transformers.utils import is_jieba_available, requires_backends if is_jieba_available(): import jieba from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging a =logging.get_logger(__name__) a ={"""vocab_file""": """vocab.txt"""} a ={ """vocab_file""": { """openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""", }, } a ={ """openbmb/cpm-ant-10b""": 1024, } def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple: __lowerCamelCase : int = collections.OrderedDict() with open(lowerCamelCase__ , 'r' , encoding='utf-8' ) as reader: __lowerCamelCase : Optional[int] = reader.readlines() for index, token in enumerate(lowerCamelCase__ ): __lowerCamelCase : Optional[Any] = token.rstrip('\n' ) __lowerCamelCase : Union[str, Any] = index return vocab class A_ ( SCREAMING_SNAKE_CASE ): def __init__( self : int ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Optional[int]="<unk>" ,SCREAMING_SNAKE_CASE__ : Optional[int]=2_0_0): __lowerCamelCase : str = vocab __lowerCamelCase : Dict = unk_token __lowerCamelCase : int = max_input_chars_per_word def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any]): __lowerCamelCase : int = list(SCREAMING_SNAKE_CASE__) if len(SCREAMING_SNAKE_CASE__) > self.max_input_chars_per_word: return [self.unk_token] __lowerCamelCase : Tuple = 0 __lowerCamelCase : str = [] while start < len(SCREAMING_SNAKE_CASE__): __lowerCamelCase : List[Any] = len(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Any = None while start < end: __lowerCamelCase : Any = ''.join(chars[start:end]) if substr in self.vocab: __lowerCamelCase : Optional[Any] = substr break end -= 1 if cur_substr is None: sub_tokens.append(self.unk_token) start += 1 else: sub_tokens.append(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Dict = end return sub_tokens class A_ ( SCREAMING_SNAKE_CASE ): _UpperCAmelCase : List[str] = VOCAB_FILES_NAMES _UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase : str = ['''input_ids''', '''attention_mask'''] _UpperCAmelCase : Optional[int] = False def __init__( self : Tuple ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Tuple="<d>" ,SCREAMING_SNAKE_CASE__ : Tuple="</d>" ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="<s>" ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="</s>" ,SCREAMING_SNAKE_CASE__ : str="<pad>" ,SCREAMING_SNAKE_CASE__ : List[str]="<unk>" ,SCREAMING_SNAKE_CASE__ : List[Any]="</n>" ,SCREAMING_SNAKE_CASE__ : int="</_>" ,SCREAMING_SNAKE_CASE__ : List[Any]="left" ,**SCREAMING_SNAKE_CASE__ : List[str] ,): requires_backends(self ,['jieba']) super().__init__( bod_token=SCREAMING_SNAKE_CASE__ ,eod_token=SCREAMING_SNAKE_CASE__ ,bos_token=SCREAMING_SNAKE_CASE__ ,eos_token=SCREAMING_SNAKE_CASE__ ,pad_token=SCREAMING_SNAKE_CASE__ ,unk_token=SCREAMING_SNAKE_CASE__ ,line_token=SCREAMING_SNAKE_CASE__ ,space_token=SCREAMING_SNAKE_CASE__ ,padding_side=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,) __lowerCamelCase : Optional[Any] = bod_token __lowerCamelCase : Dict = eod_token __lowerCamelCase : Any = load_vocab(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Any = self.encoder[space_token] __lowerCamelCase : Dict = self.encoder[line_token] del self.encoder[space_token] del self.encoder[line_token] __lowerCamelCase : Optional[Any] = collections.OrderedDict(sorted(self.encoder.items() ,key=lambda SCREAMING_SNAKE_CASE__: x[1])) __lowerCamelCase : int = {v: k for k, v in self.encoder.items()} __lowerCamelCase : Union[str, Any] = WordpieceTokenizer(vocab=self.encoder ,unk_token=self.unk_token) @property def lowerCAmelCase ( self : List[Any]): return self.encoder[self.bod_token] @property def lowerCAmelCase ( self : Tuple): return self.encoder[self.eod_token] @property def lowerCAmelCase ( self : Union[str, Any]): return self.encoder["\n"] @property def lowerCAmelCase ( self : str): return len(self.encoder) def lowerCAmelCase ( self : str): return dict(self.encoder ,**self.added_tokens_encoder) def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Optional[int]): __lowerCamelCase : Any = [] for x in jieba.cut(SCREAMING_SNAKE_CASE__ ,cut_all=SCREAMING_SNAKE_CASE__): output_tokens.extend(self.wordpiece_tokenizer.tokenize(SCREAMING_SNAKE_CASE__)) return output_tokens def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Tuple ,**SCREAMING_SNAKE_CASE__ : List[Any]): __lowerCamelCase : Tuple = [i for i in token_ids if i >= 0] __lowerCamelCase : str = [ x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id ] return super()._decode(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : List[Any]): return token in self.encoder def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[str]): return "".join(SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[Any]): return self.encoder.get(SCREAMING_SNAKE_CASE__ ,self.encoder.get(self.unk_token)) def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[Any]): return self.decoder.get(SCREAMING_SNAKE_CASE__ ,self.unk_token) def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[str] = None): if os.path.isdir(SCREAMING_SNAKE_CASE__): __lowerCamelCase : Any = os.path.join( SCREAMING_SNAKE_CASE__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) else: __lowerCamelCase : int = (filename_prefix + '-' if filename_prefix else '') + save_directory __lowerCamelCase : Any = 0 if " " in self.encoder: __lowerCamelCase : Any = self.encoder[' '] del self.encoder[" "] if "\n" in self.encoder: __lowerCamelCase : str = self.encoder['\n'] del self.encoder["\n"] __lowerCamelCase : str = collections.OrderedDict(sorted(self.encoder.items() ,key=lambda SCREAMING_SNAKE_CASE__: x[1])) with open(SCREAMING_SNAKE_CASE__ ,'w' ,encoding='utf-8') as writer: for token, token_index in self.encoder.items(): if index != token_index: logger.warning( F"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive." ' Please check that the vocabulary is not corrupted!') __lowerCamelCase : Any = token_index writer.write(token + '\n') index += 1 return (vocab_file,) def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : List[int] = None): if token_ids_a is None: return [self.bos_token_id] + token_ids_a return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ,SCREAMING_SNAKE_CASE__ : bool = False): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE__ ,token_ids_a=SCREAMING_SNAKE_CASE__ ,already_has_special_tokens=SCREAMING_SNAKE_CASE__) if token_ids_a is not None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) return [1] + ([0] * len(SCREAMING_SNAKE_CASE__))
652
0
'''simple docstring''' def __UpperCAmelCase ( _UpperCAmelCase : int = 60_08_51_47_51_43 ) -> int: try: __snake_case = int(_UpperCAmelCase ) except (TypeError, ValueError): raise TypeError("Parameter n must be int or castable to int." ) if n <= 0: raise ValueError("Parameter n must be greater than or equal to one." ) __snake_case = 1 __snake_case = 2 while i * i <= n: while n % i == 0: __snake_case = i n //= i i += 1 if n > 1: __snake_case = n return int(_UpperCAmelCase ) if __name__ == "__main__": print(F'''{solution() = }''')
69
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a ={"""configuration_mmbt""": ["""MMBTConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a =["""MMBTForClassification""", """MMBTModel""", """ModalEmbeddings"""] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys a =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
652
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCamelCase : Any = { "configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"], "tokenization_m2m_100": ["M2M100Tokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : List[Any] = [ "M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST", "M2M100ForConditionalGeneration", "M2M100Model", "M2M100PreTrainedModel", ] if TYPE_CHECKING: from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig from .tokenization_mam_aaa import MaMaaaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mam_aaa import ( M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST, MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaPreTrainedModel, ) else: import sys lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
70
import torch from diffusers import UnCLIPScheduler from .test_schedulers import SchedulerCommonTest class A_ ( SCREAMING_SNAKE_CASE ): _UpperCAmelCase : int = (UnCLIPScheduler,) def lowerCAmelCase ( self : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : List[Any]): __lowerCamelCase : Any = { 'num_train_timesteps': 1_0_0_0, 'variance_type': 'fixed_small_log', 'clip_sample': True, 'clip_sample_range': 1.0, 'prediction_type': 'epsilon', } config.update(**SCREAMING_SNAKE_CASE__) return config def lowerCAmelCase ( self : Optional[Any]): for timesteps in [1, 5, 1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Optional[Any]): for variance in ["fixed_small_log", "learned_range"]: self.check_over_configs(variance_type=SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Union[str, Any]): for clip_sample in [True, False]: self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Tuple): for clip_sample_range in [1, 5, 1_0, 2_0]: self.check_over_configs(clip_sample_range=SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : List[Any]): for prediction_type in ["epsilon", "sample"]: self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Dict): for time_step in [0, 5_0_0, 9_9_9]: for prev_timestep in [None, 5, 1_0_0, 2_5_0, 5_0_0, 7_5_0]: if prev_timestep is not None and prev_timestep >= time_step: continue self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ ,prev_timestep=SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Optional[int]): __lowerCamelCase : Optional[int] = self.scheduler_classes[0] __lowerCamelCase : Any = self.get_scheduler_config(variance_type='fixed_small_log') __lowerCamelCase : Dict = scheduler_class(**SCREAMING_SNAKE_CASE__) assert torch.sum(torch.abs(scheduler._get_variance(0) - 1.00_00E-10)) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(4_8_7) - 0.0549625)) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(9_9_9) - 0.9994987)) < 1E-5 def lowerCAmelCase ( self : Any): __lowerCamelCase : Dict = self.scheduler_classes[0] __lowerCamelCase : List[str] = self.get_scheduler_config(variance_type='learned_range') __lowerCamelCase : int = scheduler_class(**SCREAMING_SNAKE_CASE__) __lowerCamelCase : Tuple = 0.5 assert scheduler._get_variance(1 ,predicted_variance=SCREAMING_SNAKE_CASE__) - -10.1712790 < 1E-5 assert scheduler._get_variance(4_8_7 ,predicted_variance=SCREAMING_SNAKE_CASE__) - -5.7998052 < 1E-5 assert scheduler._get_variance(9_9_9 ,predicted_variance=SCREAMING_SNAKE_CASE__) - -0.0010011 < 1E-5 def lowerCAmelCase ( self : List[str]): __lowerCamelCase : str = self.scheduler_classes[0] __lowerCamelCase : str = self.get_scheduler_config() __lowerCamelCase : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[Any] = scheduler.timesteps __lowerCamelCase : Union[str, Any] = self.dummy_model() __lowerCamelCase : Optional[Any] = self.dummy_sample_deter __lowerCamelCase : List[str] = torch.manual_seed(0) for i, t in enumerate(SCREAMING_SNAKE_CASE__): # 1. predict noise residual __lowerCamelCase : int = model(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) # 2. predict previous mean of sample x_t-1 __lowerCamelCase : Optional[int] = scheduler.step(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__).prev_sample __lowerCamelCase : Optional[Any] = pred_prev_sample __lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__)) __lowerCamelCase : Tuple = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__)) assert abs(result_sum.item() - 252.2682495) < 1E-2 assert abs(result_mean.item() - 0.3284743) < 1E-3 def lowerCAmelCase ( self : str): __lowerCamelCase : str = self.scheduler_classes[0] __lowerCamelCase : List[Any] = self.get_scheduler_config() __lowerCamelCase : int = scheduler_class(**SCREAMING_SNAKE_CASE__) scheduler.set_timesteps(2_5) __lowerCamelCase : int = scheduler.timesteps __lowerCamelCase : Tuple = self.dummy_model() __lowerCamelCase : Any = self.dummy_sample_deter __lowerCamelCase : Any = torch.manual_seed(0) for i, t in enumerate(SCREAMING_SNAKE_CASE__): # 1. predict noise residual __lowerCamelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) if i + 1 == timesteps.shape[0]: __lowerCamelCase : Optional[Any] = None else: __lowerCamelCase : Union[str, Any] = timesteps[i + 1] # 2. predict previous mean of sample x_t-1 __lowerCamelCase : int = scheduler.step( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,prev_timestep=SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__).prev_sample __lowerCamelCase : Union[str, Any] = pred_prev_sample __lowerCamelCase : Tuple = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__)) __lowerCamelCase : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__)) assert abs(result_sum.item() - 258.2044983) < 1E-2 assert abs(result_mean.item() - 0.3362038) < 1E-3 def lowerCAmelCase ( self : List[Any]): pass def lowerCAmelCase ( self : Union[str, Any]): pass
652
0
'''simple docstring''' from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import TensorType, logging if TYPE_CHECKING: from ...onnx.config import PatchingSpec from ...tokenization_utils_base import PreTrainedTokenizerBase _lowerCamelCase = logging.get_logger(__name__) _lowerCamelCase = { """allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json""", """allenai/longformer-large-4096""": """https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json""", """allenai/longformer-large-4096-finetuned-triviaqa""": ( """https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json""" ), """allenai/longformer-base-4096-extra.pos.embd.only""": ( """https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json""" ), """allenai/longformer-large-4096-extra.pos.embd.only""": ( """https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json""" ), } class _snake_case (__SCREAMING_SNAKE_CASE): __A : Tuple ="longformer" def __init__( self ,_snake_case = 5_12 ,_snake_case = 2 ,_snake_case = 1 ,_snake_case = 0 ,_snake_case = 2 ,_snake_case = 3_05_22 ,_snake_case = 7_68 ,_snake_case = 12 ,_snake_case = 12 ,_snake_case = 30_72 ,_snake_case = "gelu" ,_snake_case = 0.1 ,_snake_case = 0.1 ,_snake_case = 5_12 ,_snake_case = 2 ,_snake_case = 0.02 ,_snake_case = 1E-12 ,_snake_case = False ,**_snake_case ,): super().__init__(pad_token_id=_snake_case ,**_snake_case ) UpperCAmelCase_ : Union[str, Any] = attention_window UpperCAmelCase_ : str = sep_token_id UpperCAmelCase_ : List[str] = bos_token_id UpperCAmelCase_ : Optional[Any] = eos_token_id UpperCAmelCase_ : Union[str, Any] = vocab_size UpperCAmelCase_ : Dict = hidden_size UpperCAmelCase_ : Union[str, Any] = num_hidden_layers UpperCAmelCase_ : Union[str, Any] = num_attention_heads UpperCAmelCase_ : Optional[Any] = hidden_act UpperCAmelCase_ : List[str] = intermediate_size UpperCAmelCase_ : Optional[Any] = hidden_dropout_prob UpperCAmelCase_ : int = attention_probs_dropout_prob UpperCAmelCase_ : Optional[int] = max_position_embeddings UpperCAmelCase_ : Optional[Any] = type_vocab_size UpperCAmelCase_ : Dict = initializer_range UpperCAmelCase_ : int = layer_norm_eps UpperCAmelCase_ : Optional[int] = onnx_export class _snake_case (__SCREAMING_SNAKE_CASE): def __init__( self ,_snake_case ,_snake_case = "default" ,_snake_case = None ): super().__init__(_snake_case ,_snake_case ,_snake_case ) UpperCAmelCase_ : int = True @property def UpperCamelCase__ ( self ): if self.task == "multiple-choice": UpperCAmelCase_ : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"} else: UpperCAmelCase_ : Union[str, Any] = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("global_attention_mask", dynamic_axis), ] ) @property def UpperCamelCase__ ( self ): UpperCAmelCase_ : Optional[Any] = super().outputs if self.task == "default": UpperCAmelCase_ : str = {0: "batch"} return outputs @property def UpperCamelCase__ ( self ): return 1E-4 @property def UpperCamelCase__ ( self ): # needs to be >= 14 to support tril operator return max(super().default_onnx_opset ,14 ) def UpperCamelCase__ ( self ,_snake_case ,_snake_case = -1 ,_snake_case = -1 ,_snake_case = False ,_snake_case = None ,): UpperCAmelCase_ : Any = super().generate_dummy_inputs( preprocessor=_snake_case ,batch_size=_snake_case ,seq_length=_snake_case ,is_pair=_snake_case ,framework=_snake_case ) import torch # for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64) # makes the export fail randomly UpperCAmelCase_ : Tuple = torch.zeros_like(inputs["input_ids"] ) # make every second token global UpperCAmelCase_ : List[str] = 1 return inputs
71
from ...configuration_utils import PretrainedConfig from ...utils import logging a =logging.get_logger(__name__) a ={ """caidas/swin2sr-classicalsr-x2-64""": ( """https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json""" ), } class A_ ( SCREAMING_SNAKE_CASE ): _UpperCAmelCase : Optional[int] = '''swin2sr''' _UpperCAmelCase : Any = { '''hidden_size''': '''embed_dim''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any]=6_4 ,SCREAMING_SNAKE_CASE__ : Optional[int]=1 ,SCREAMING_SNAKE_CASE__ : List[Any]=3 ,SCREAMING_SNAKE_CASE__ : Tuple=1_8_0 ,SCREAMING_SNAKE_CASE__ : Any=[6, 6, 6, 6, 6, 6] ,SCREAMING_SNAKE_CASE__ : int=[6, 6, 6, 6, 6, 6] ,SCREAMING_SNAKE_CASE__ : Optional[Any]=8 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=2.0 ,SCREAMING_SNAKE_CASE__ : Optional[int]=True ,SCREAMING_SNAKE_CASE__ : Any=0.0 ,SCREAMING_SNAKE_CASE__ : Any=0.0 ,SCREAMING_SNAKE_CASE__ : List[str]=0.1 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="gelu" ,SCREAMING_SNAKE_CASE__ : Any=False ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.02 ,SCREAMING_SNAKE_CASE__ : Dict=1E-5 ,SCREAMING_SNAKE_CASE__ : Dict=2 ,SCREAMING_SNAKE_CASE__ : Tuple=1.0 ,SCREAMING_SNAKE_CASE__ : int="1conv" ,SCREAMING_SNAKE_CASE__ : Optional[int]="pixelshuffle" ,**SCREAMING_SNAKE_CASE__ : Optional[int] ,): super().__init__(**SCREAMING_SNAKE_CASE__) __lowerCamelCase : Dict = image_size __lowerCamelCase : str = patch_size __lowerCamelCase : List[Any] = num_channels __lowerCamelCase : Dict = embed_dim __lowerCamelCase : Dict = depths __lowerCamelCase : Any = len(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[Any] = num_heads __lowerCamelCase : Tuple = window_size __lowerCamelCase : Dict = mlp_ratio __lowerCamelCase : str = qkv_bias __lowerCamelCase : Optional[int] = hidden_dropout_prob __lowerCamelCase : Optional[Any] = attention_probs_dropout_prob __lowerCamelCase : List[Any] = drop_path_rate __lowerCamelCase : Optional[int] = hidden_act __lowerCamelCase : Dict = use_absolute_embeddings __lowerCamelCase : Optional[Any] = layer_norm_eps __lowerCamelCase : str = initializer_range __lowerCamelCase : List[Any] = upscale __lowerCamelCase : List[Any] = img_range __lowerCamelCase : List[str] = resi_connection __lowerCamelCase : Union[str, Any] = upsampler
652
0
'''simple docstring''' import doctest from collections import deque import numpy as np class __magic_name__ : def __init__( self ): lowercase =[2, 1, 2, -1] lowercase =[1, 2, 3, 4] def _A( self ): lowercase =len(self.first_signal ) lowercase =len(self.second_signal ) lowercase =max(snake_case_ , snake_case_ ) # create a zero matrix of max_length x max_length lowercase =[[0] * max_length for i in range(snake_case_ )] # fills the smaller signal with zeros to make both signals of same length if length_first_signal < length_second_signal: self.first_signal += [0] * (max_length - length_first_signal) elif length_first_signal > length_second_signal: self.second_signal += [0] * (max_length - length_second_signal) for i in range(snake_case_ ): lowercase =deque(self.second_signal ) rotated_signal.rotate(snake_case_ ) for j, item in enumerate(snake_case_ ): matrix[i][j] += item # multiply the matrix with the first signal lowercase =np.matmul(np.transpose(snake_case_ ) , np.transpose(self.first_signal ) ) # rounding-off to two decimal places return [round(snake_case_ , 2 ) for i in final_signal] if __name__ == "__main__": doctest.testmod()
72
import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=True , lowerCamelCase__="pt" ) -> Dict: __lowerCamelCase : Any = {'add_prefix_space': True} if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and not line.startswith(' ' ) else {} __lowerCamelCase : int = padding_side return tokenizer( [line] , max_length=lowerCamelCase__ , padding='max_length' if pad_to_max_length else None , truncation=lowerCamelCase__ , return_tensors=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , **lowerCamelCase__ , ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , ) -> List[str]: __lowerCamelCase : List[str] = input_ids.ne(lowerCamelCase__ ).any(dim=0 ) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class A_ ( SCREAMING_SNAKE_CASE ): def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int]="train" ,SCREAMING_SNAKE_CASE__ : Tuple=None ,SCREAMING_SNAKE_CASE__ : Dict=None ,SCREAMING_SNAKE_CASE__ : int=None ,SCREAMING_SNAKE_CASE__ : List[Any]="" ,): super().__init__() __lowerCamelCase : Optional[Any] = Path(SCREAMING_SNAKE_CASE__).joinpath(type_path + '.source') __lowerCamelCase : Any = Path(SCREAMING_SNAKE_CASE__).joinpath(type_path + '.target') __lowerCamelCase : List[Any] = self.get_char_lens(self.src_file) __lowerCamelCase : List[Any] = max_source_length __lowerCamelCase : List[str] = max_target_length assert min(self.src_lens) > 0, F"found empty line in {self.src_file}" __lowerCamelCase : Any = tokenizer __lowerCamelCase : Optional[int] = prefix if n_obs is not None: __lowerCamelCase : Dict = self.src_lens[:n_obs] __lowerCamelCase : str = src_lang __lowerCamelCase : Any = tgt_lang def __len__( self : Tuple): return len(self.src_lens) def __getitem__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : str): __lowerCamelCase : Dict = index + 1 # linecache starts at 1 __lowerCamelCase : Any = self.prefix + linecache.getline(str(self.src_file) ,SCREAMING_SNAKE_CASE__).rstrip('\n') __lowerCamelCase : int = linecache.getline(str(self.tgt_file) ,SCREAMING_SNAKE_CASE__).rstrip('\n') assert source_line, F"empty source line for index {index}" assert tgt_line, F"empty tgt line for index {index}" # Need to add eos token manually for T5 if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right __lowerCamelCase : Dict = ( self.tokenizer.question_encoder if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__) else self.tokenizer ) __lowerCamelCase : Optional[int] = self.tokenizer.generator if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__) else self.tokenizer __lowerCamelCase : List[str] = encode_line(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,self.max_source_length ,'right') __lowerCamelCase : Any = encode_line(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,self.max_target_length ,'right') __lowerCamelCase : List[Any] = source_inputs['input_ids'].squeeze() __lowerCamelCase : Tuple = target_inputs['input_ids'].squeeze() __lowerCamelCase : Tuple = source_inputs['attention_mask'].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def lowerCAmelCase ( SCREAMING_SNAKE_CASE__ : int): return [len(SCREAMING_SNAKE_CASE__) for x in Path(SCREAMING_SNAKE_CASE__).open().readlines()] def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : List[Any]): __lowerCamelCase : Optional[Any] = torch.stack([x['input_ids'] for x in batch]) __lowerCamelCase : Any = torch.stack([x['attention_mask'] for x in batch]) __lowerCamelCase : Union[str, Any] = torch.stack([x['decoder_input_ids'] for x in batch]) __lowerCamelCase : Optional[int] = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__) else self.tokenizer.pad_token_id ) __lowerCamelCase : int = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__) else self.tokenizer.pad_token_id ) __lowerCamelCase : int = trim_batch(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) __lowerCamelCase , __lowerCamelCase : int = trim_batch(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__) __lowerCamelCase : Union[str, Any] = { 'input_ids': source_ids, 'attention_mask': source_mask, 'decoder_input_ids': y, } return batch a =getLogger(__name__) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Any: return list(itertools.chain.from_iterable(lowerCamelCase__ ) ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> None: __lowerCamelCase : str = get_git_info() save_json(lowerCamelCase__ , os.path.join(lowerCamelCase__ , 'git_log.json' ) ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=4 , **lowerCamelCase__ ) -> List[str]: with open(lowerCamelCase__ , 'w' ) as f: json.dump(lowerCamelCase__ , lowerCamelCase__ , indent=lowerCamelCase__ , **lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple: with open(lowerCamelCase__ ) as f: return json.load(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( ) -> List[str]: __lowerCamelCase : str = git.Repo(search_parent_directories=lowerCamelCase__ ) __lowerCamelCase : Any = { 'repo_id': str(lowerCamelCase__ ), 'repo_sha': str(repo.head.object.hexsha ), 'repo_branch': str(repo.active_branch ), 'hostname': str(socket.gethostname() ), } return repo_infos def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List: return list(map(lowerCamelCase__ , lowerCamelCase__ ) ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]: with open(lowerCamelCase__ , 'wb' ) as f: return pickle.dump(lowerCamelCase__ , lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str: def remove_articles(lowerCamelCase__ ): return re.sub(R'\b(a|an|the)\b' , ' ' , lowerCamelCase__ ) def white_space_fix(lowerCamelCase__ ): return " ".join(text.split() ) def remove_punc(lowerCamelCase__ ): __lowerCamelCase : Dict = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(lowerCamelCase__ ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase__ ) ) ) ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> int: __lowerCamelCase : str = normalize_answer(lowerCamelCase__ ).split() __lowerCamelCase : Optional[int] = normalize_answer(lowerCamelCase__ ).split() __lowerCamelCase : Union[str, Any] = Counter(lowerCamelCase__ ) & Counter(lowerCamelCase__ ) __lowerCamelCase : Any = sum(common.values() ) if num_same == 0: return 0 __lowerCamelCase : List[Any] = 1.0 * num_same / len(lowerCamelCase__ ) __lowerCamelCase : int = 1.0 * num_same / len(lowerCamelCase__ ) __lowerCamelCase : Optional[Any] = (2 * precision * recall) / (precision + recall) return fa def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Dict: return normalize_answer(lowerCamelCase__ ) == normalize_answer(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Dict: assert len(lowerCamelCase__ ) == len(lowerCamelCase__ ) __lowerCamelCase : Dict = 0 for hypo, pred in zip(lowerCamelCase__ , lowerCamelCase__ ): em += exact_match_score(lowerCamelCase__ , lowerCamelCase__ ) if len(lowerCamelCase__ ) > 0: em /= len(lowerCamelCase__ ) return {"em": em} def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple: return model_prefix.startswith('rag' ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]: __lowerCamelCase : Optional[int] = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead __lowerCamelCase : List[str] = 'dropout_rate' for p in extra_params: if getattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): if not hasattr(lowerCamelCase__ , lowerCamelCase__ ) and not hasattr(lowerCamelCase__ , equivalent_param[p] ): logger.info('config doesn\'t have a `{}` attribute'.format(lowerCamelCase__ ) ) delattr(lowerCamelCase__ , lowerCamelCase__ ) continue __lowerCamelCase : List[Any] = p if hasattr(lowerCamelCase__ , lowerCamelCase__ ) else equivalent_param[p] setattr(lowerCamelCase__ , lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) ) delattr(lowerCamelCase__ , lowerCamelCase__ ) return hparams, config
652
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available a_ : List[str] = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Optional[Any] = ['GPTSw3Tokenizer'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_swa import GPTSwaTokenizer else: import sys a_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
73
from typing import Optional, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_mobilenet_va import MobileNetVaConfig a =logging.get_logger(__name__) # General docstring a ="""MobileNetV1Config""" # Base docstring a ="""google/mobilenet_v1_1.0_224""" a =[1, 1024, 7, 7] # Image classification docstring a ="""google/mobilenet_v1_1.0_224""" a ="""tabby, tabby cat""" a =[ """google/mobilenet_v1_1.0_224""", """google/mobilenet_v1_0.75_192""", # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 ] def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ) -> str: __lowerCamelCase : str = {} if isinstance(lowerCamelCase__ , lowerCamelCase__ ): __lowerCamelCase : int = model.mobilenet_va else: __lowerCamelCase : List[str] = model __lowerCamelCase : List[Any] = 'MobilenetV1/Conv2d_0/' __lowerCamelCase : List[Any] = backbone.conv_stem.convolution.weight __lowerCamelCase : List[str] = backbone.conv_stem.normalization.bias __lowerCamelCase : Tuple = backbone.conv_stem.normalization.weight __lowerCamelCase : Union[str, Any] = backbone.conv_stem.normalization.running_mean __lowerCamelCase : Optional[int] = backbone.conv_stem.normalization.running_var for i in range(1_3 ): __lowerCamelCase : Any = i + 1 __lowerCamelCase : Union[str, Any] = i * 2 __lowerCamelCase : Optional[Any] = backbone.layer[pt_index] __lowerCamelCase : Optional[int] = F"MobilenetV1/Conv2d_{tf_index}_depthwise/" __lowerCamelCase : Tuple = pointer.convolution.weight __lowerCamelCase : Optional[Any] = pointer.normalization.bias __lowerCamelCase : Union[str, Any] = pointer.normalization.weight __lowerCamelCase : List[str] = pointer.normalization.running_mean __lowerCamelCase : Union[str, Any] = pointer.normalization.running_var __lowerCamelCase : int = backbone.layer[pt_index + 1] __lowerCamelCase : Union[str, Any] = F"MobilenetV1/Conv2d_{tf_index}_pointwise/" __lowerCamelCase : Optional[Any] = pointer.convolution.weight __lowerCamelCase : Any = pointer.normalization.bias __lowerCamelCase : str = pointer.normalization.weight __lowerCamelCase : Dict = pointer.normalization.running_mean __lowerCamelCase : List[str] = pointer.normalization.running_var if isinstance(lowerCamelCase__ , lowerCamelCase__ ): __lowerCamelCase : Union[str, Any] = 'MobilenetV1/Logits/Conv2d_1c_1x1/' __lowerCamelCase : Any = model.classifier.weight __lowerCamelCase : int = model.classifier.bias return tf_to_pt_map def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]: try: import numpy as np import tensorflow as tf except ImportError: logger.error( 'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see ' 'https://www.tensorflow.org/install/ for installation instructions.' ) raise # Load weights from TF model __lowerCamelCase : List[str] = tf.train.list_variables(lowerCamelCase__ ) __lowerCamelCase : List[str] = {} for name, shape in init_vars: logger.info(F"Loading TF weight {name} with shape {shape}" ) __lowerCamelCase : Any = tf.train.load_variable(lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase : List[Any] = array # Build TF to PyTorch weights loading map __lowerCamelCase : Tuple = _build_tf_to_pytorch_map(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) for name, pointer in tf_to_pt_map.items(): logger.info(F"Importing {name}" ) if name not in tf_weights: logger.info(F"{name} not in tf pre-trained weights, skipping" ) continue __lowerCamelCase : Optional[int] = tf_weights[name] if "depthwise_weights" in name: logger.info('Transposing depthwise' ) __lowerCamelCase : List[str] = np.transpose(lowerCamelCase__ , (2, 3, 0, 1) ) elif "weights" in name: logger.info('Transposing' ) if len(pointer.shape ) == 2: # copying into linear layer __lowerCamelCase : Any = array.squeeze().transpose() else: __lowerCamelCase : Tuple = np.transpose(lowerCamelCase__ , (3, 2, 0, 1) ) if pointer.shape != array.shape: raise ValueError(F"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" ) logger.info(F"Initialize PyTorch weight {name} {array.shape}" ) __lowerCamelCase : Optional[Any] = torch.from_numpy(lowerCamelCase__ ) tf_weights.pop(lowerCamelCase__ , lowerCamelCase__ ) tf_weights.pop(name + '/RMSProp' , lowerCamelCase__ ) tf_weights.pop(name + '/RMSProp_1' , lowerCamelCase__ ) tf_weights.pop(name + '/ExponentialMovingAverage' , lowerCamelCase__ ) logger.info(F"Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}" ) return model def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> torch.Tensor: __lowerCamelCase , __lowerCamelCase : int = features.shape[-2:] __lowerCamelCase , __lowerCamelCase : List[str] = conv_layer.stride __lowerCamelCase , __lowerCamelCase : str = conv_layer.kernel_size if in_height % stride_height == 0: __lowerCamelCase : Optional[int] = max(kernel_height - stride_height , 0 ) else: __lowerCamelCase : Union[str, Any] = max(kernel_height - (in_height % stride_height) , 0 ) if in_width % stride_width == 0: __lowerCamelCase : List[str] = max(kernel_width - stride_width , 0 ) else: __lowerCamelCase : List[str] = max(kernel_width - (in_width % stride_width) , 0 ) __lowerCamelCase : List[str] = pad_along_width // 2 __lowerCamelCase : Optional[int] = pad_along_width - pad_left __lowerCamelCase : Any = pad_along_height // 2 __lowerCamelCase : List[Any] = pad_along_height - pad_top __lowerCamelCase : Union[str, Any] = (pad_left, pad_right, pad_top, pad_bottom) return nn.functional.pad(lowerCamelCase__ , lowerCamelCase__ , 'constant' , 0.0 ) class A_ ( nn.Module ): def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : MobileNetVaConfig ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Optional[int] = 1 ,SCREAMING_SNAKE_CASE__ : Optional[int] = 1 ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : Optional[bool] = True ,SCREAMING_SNAKE_CASE__ : Optional[bool or str] = True ,): super().__init__() __lowerCamelCase : Dict = config if in_channels % groups != 0: raise ValueError(F"Input channels ({in_channels}) are not divisible by {groups} groups.") if out_channels % groups != 0: raise ValueError(F"Output channels ({out_channels}) are not divisible by {groups} groups.") __lowerCamelCase : Optional[Any] = 0 if config.tf_padding else int((kernel_size - 1) / 2) __lowerCamelCase : Optional[int] = nn.Convad( in_channels=SCREAMING_SNAKE_CASE__ ,out_channels=SCREAMING_SNAKE_CASE__ ,kernel_size=SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,padding=SCREAMING_SNAKE_CASE__ ,groups=SCREAMING_SNAKE_CASE__ ,bias=SCREAMING_SNAKE_CASE__ ,padding_mode='zeros' ,) if use_normalization: __lowerCamelCase : Optional[int] = nn.BatchNormad( num_features=SCREAMING_SNAKE_CASE__ ,eps=config.layer_norm_eps ,momentum=0.9997 ,affine=SCREAMING_SNAKE_CASE__ ,track_running_stats=SCREAMING_SNAKE_CASE__ ,) else: __lowerCamelCase : Dict = None if use_activation: if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__): __lowerCamelCase : Dict = ACTaFN[use_activation] elif isinstance(config.hidden_act ,SCREAMING_SNAKE_CASE__): __lowerCamelCase : str = ACTaFN[config.hidden_act] else: __lowerCamelCase : List[str] = config.hidden_act else: __lowerCamelCase : List[str] = None def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : torch.Tensor): if self.config.tf_padding: __lowerCamelCase : Any = apply_tf_padding(SCREAMING_SNAKE_CASE__ ,self.convolution) __lowerCamelCase : Optional[int] = self.convolution(SCREAMING_SNAKE_CASE__) if self.normalization is not None: __lowerCamelCase : Dict = self.normalization(SCREAMING_SNAKE_CASE__) if self.activation is not None: __lowerCamelCase : List[str] = self.activation(SCREAMING_SNAKE_CASE__) return features class A_ ( SCREAMING_SNAKE_CASE ): _UpperCAmelCase : Union[str, Any] = MobileNetVaConfig _UpperCAmelCase : List[str] = load_tf_weights_in_mobilenet_va _UpperCAmelCase : List[str] = '''mobilenet_v1''' _UpperCAmelCase : Any = '''pixel_values''' _UpperCAmelCase : int = False def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Union[nn.Linear, nn.Convad]): if isinstance(SCREAMING_SNAKE_CASE__ ,(nn.Linear, nn.Convad)): module.weight.data.normal_(mean=0.0 ,std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(SCREAMING_SNAKE_CASE__ ,nn.BatchNormad): module.bias.data.zero_() module.weight.data.fill_(1.0) a =r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ a =r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`MobileNetV1ImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( '''The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.''' , SCREAMING_SNAKE_CASE , ) class A_ ( SCREAMING_SNAKE_CASE ): def __init__( self : int ,SCREAMING_SNAKE_CASE__ : MobileNetVaConfig ,SCREAMING_SNAKE_CASE__ : bool = True): super().__init__(SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[str] = config __lowerCamelCase : Optional[int] = 3_2 __lowerCamelCase : List[str] = max(int(depth * config.depth_multiplier) ,config.min_depth) __lowerCamelCase : Optional[Any] = MobileNetVaConvLayer( SCREAMING_SNAKE_CASE__ ,in_channels=config.num_channels ,out_channels=SCREAMING_SNAKE_CASE__ ,kernel_size=3 ,stride=2 ,) __lowerCamelCase : Any = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1] __lowerCamelCase : str = nn.ModuleList() for i in range(1_3): __lowerCamelCase : str = out_channels if strides[i] == 2 or i == 0: depth *= 2 __lowerCamelCase : str = max(int(depth * config.depth_multiplier) ,config.min_depth) self.layer.append( MobileNetVaConvLayer( SCREAMING_SNAKE_CASE__ ,in_channels=SCREAMING_SNAKE_CASE__ ,out_channels=SCREAMING_SNAKE_CASE__ ,kernel_size=3 ,stride=strides[i] ,groups=SCREAMING_SNAKE_CASE__ ,)) self.layer.append( MobileNetVaConvLayer( SCREAMING_SNAKE_CASE__ ,in_channels=SCREAMING_SNAKE_CASE__ ,out_channels=SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,)) __lowerCamelCase : Optional[int] = nn.AdaptiveAvgPoolad((1, 1)) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Dict): raise NotImplementedError @add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC ,output_type=SCREAMING_SNAKE_CASE__ ,config_class=_CONFIG_FOR_DOC ,modality='vision' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,) def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,): __lowerCamelCase : int = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __lowerCamelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('You have to specify pixel_values') __lowerCamelCase : Optional[Any] = self.conv_stem(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[Any] = () if output_hidden_states else None for i, layer_module in enumerate(self.layer): __lowerCamelCase : Dict = layer_module(SCREAMING_SNAKE_CASE__) if output_hidden_states: __lowerCamelCase : Any = all_hidden_states + (hidden_states,) __lowerCamelCase : Optional[Any] = hidden_states if self.pooler is not None: __lowerCamelCase : Tuple = torch.flatten(self.pooler(SCREAMING_SNAKE_CASE__) ,start_dim=1) else: __lowerCamelCase : List[str] = None if not return_dict: return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None) return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=SCREAMING_SNAKE_CASE__ ,pooler_output=SCREAMING_SNAKE_CASE__ ,hidden_states=SCREAMING_SNAKE_CASE__ ,) @add_start_docstrings( ''' MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. ''' , SCREAMING_SNAKE_CASE , ) class A_ ( SCREAMING_SNAKE_CASE ): def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : MobileNetVaConfig): super().__init__(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[Any] = config.num_labels __lowerCamelCase : Optional[Any] = MobileNetVaModel(SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[str] = self.mobilenet_va.layer[-1].convolution.out_channels # Classifier head __lowerCamelCase : Any = nn.Dropout(config.classifier_dropout_prob ,inplace=SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[Any] = nn.Linear(SCREAMING_SNAKE_CASE__ ,config.num_labels) if config.num_labels > 0 else nn.Identity() # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=SCREAMING_SNAKE_CASE__ ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,) def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,): __lowerCamelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict __lowerCamelCase : Optional[int] = self.mobilenet_va(SCREAMING_SNAKE_CASE__ ,output_hidden_states=SCREAMING_SNAKE_CASE__ ,return_dict=SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[Any] = outputs.pooler_output if return_dict else outputs[1] __lowerCamelCase : List[str] = self.classifier(self.dropout(SCREAMING_SNAKE_CASE__)) __lowerCamelCase : List[str] = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: __lowerCamelCase : Dict = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): __lowerCamelCase : int = 'single_label_classification' else: __lowerCamelCase : Tuple = 'multi_label_classification' if self.config.problem_type == "regression": __lowerCamelCase : Tuple = MSELoss() if self.num_labels == 1: __lowerCamelCase : int = loss_fct(logits.squeeze() ,labels.squeeze()) else: __lowerCamelCase : Union[str, Any] = loss_fct(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) elif self.config.problem_type == "single_label_classification": __lowerCamelCase : List[str] = CrossEntropyLoss() __lowerCamelCase : List[str] = loss_fct(logits.view(-1 ,self.num_labels) ,labels.view(-1)) elif self.config.problem_type == "multi_label_classification": __lowerCamelCase : int = BCEWithLogitsLoss() __lowerCamelCase : int = loss_fct(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) if not return_dict: __lowerCamelCase : List[str] = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention( loss=SCREAMING_SNAKE_CASE__ ,logits=SCREAMING_SNAKE_CASE__ ,hidden_states=outputs.hidden_states ,)
652
0
import random import unittest import torch from diffusers import IFInpaintingSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = IFInpaintingSuperResolutionPipeline lowerCAmelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''} lowerCAmelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} ) lowerCAmelCase_ = PipelineTesterMixin.required_optional_params - {'''latents'''} def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" return self._get_superresolution_dummy_components() def UpperCAmelCase__ ( self : Any , _A : str , _A : Tuple=0 ): """simple docstring""" if str(_A ).startswith('''mps''' ): __SCREAMING_SNAKE_CASE : int = torch.manual_seed(_A ) else: __SCREAMING_SNAKE_CASE : Dict = torch.Generator(device=_A ).manual_seed(_A ) __SCREAMING_SNAKE_CASE : Dict = floats_tensor((1, 3, 16, 16) , rng=random.Random(_A ) ).to(_A ) __SCREAMING_SNAKE_CASE : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A ) __SCREAMING_SNAKE_CASE : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A ) __SCREAMING_SNAKE_CASE : str = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''original_image''': original_image, '''mask_image''': mask_image, '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) def UpperCAmelCase__ ( self : int ): """simple docstring""" self._test_save_load_optional_components() @unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' ) def UpperCAmelCase__ ( self : str ): """simple docstring""" super().test_save_load_floataa(expected_max_diff=1e-1 ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def UpperCAmelCase__ ( self : Any ): """simple docstring""" self._test_save_load_local() def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" self._test_inference_batch_single_identical( expected_max_diff=1e-2 , )
74
from pathlib import Path import cva import numpy as np from matplotlib import pyplot as plt def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray: __lowerCamelCase : Optional[Any] = cva.getAffineTransform(lowerCamelCase__ , lowerCamelCase__ ) return cva.warpAffine(lowerCamelCase__ , lowerCamelCase__ , (rows, cols) ) if __name__ == "__main__": # read original image a =cva.imread( str(Path(__file__).resolve().parent.parent / """image_data""" / """lena.jpg""") ) # turn image in gray scale value a =cva.cvtColor(image, cva.COLOR_BGR2GRAY) # get image shape a , a =gray_img.shape # set different points to rotate image a =np.array([[50, 50], [200, 50], [50, 200]], np.floataa) a =np.array([[10, 100], [200, 50], [100, 250]], np.floataa) a =np.array([[50, 50], [150, 50], [120, 200]], np.floataa) a =np.array([[10, 100], [80, 50], [180, 250]], np.floataa) # add all rotated images in a list a =[ gray_img, get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), ] # plot different image rotations a =plt.figure(1) a =["""Original""", """Rotation 1""", """Rotation 2""", """Rotation 3"""] for i, image in enumerate(images): plt.subplot(2, 2, i + 1), plt.imshow(image, """gray""") plt.title(titles[i]) plt.axis("""off""") plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95) plt.show()
652
0
'''simple docstring''' import webbrowser from sys import argv from urllib.parse import parse_qs, quote import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": UpperCamelCase__ = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: '''))) print('''Googling.....''') UpperCamelCase__ = F"""https://www.google.com/search?q={query}&num=100""" UpperCamelCase__ = requests.get( url, headers={'''User-Agent''': str(UserAgent().random)}, ) try: UpperCamelCase__ = ( BeautifulSoup(res.text, '''html.parser''') .find('''div''', attrs={'''class''': '''yuRUbf'''}) .find('''a''') .get('''href''') ) except AttributeError: UpperCamelCase__ = parse_qs( BeautifulSoup(res.text, '''html.parser''') .find('''div''', attrs={'''class''': '''kCrYT'''}) .find('''a''') .get('''href''') )['''url'''][0] webbrowser.open(link)
75
import math def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> int: if not isinstance(lowerCamelCase__ , lowerCamelCase__ ): __lowerCamelCase : List[str] = F"Input value of [number={number}] must be an integer" raise TypeError(lowerCamelCase__ ) if number < 1: __lowerCamelCase : int = F"Input value of [number={number}] must be > 0" raise ValueError(lowerCamelCase__ ) elif number == 1: return 3 elif number == 2: return 5 else: __lowerCamelCase : Any = int(math.log(number // 3 , 2 ) ) + 2 __lowerCamelCase : List[Any] = [3, 5] __lowerCamelCase : Union[str, Any] = 2 __lowerCamelCase : List[str] = 3 for block in range(1 , lowerCamelCase__ ): for _ in range(lowerCamelCase__ ): proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] ) proth_index += 1 increment *= 2 return proth_list[number - 1] if __name__ == "__main__": import doctest doctest.testmod() for number in range(11): a =0 try: a =proth(number) except ValueError: print(F"""ValueError: there is no {number}th Proth number""") continue print(F"""The {number}th Proth number: {value}""")
652
0
"""simple docstring""" from __future__ import annotations import unittest from transformers import FunnelConfig, is_tf_available from transformers.testing_utils import require_tf from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, ) class UpperCAmelCase_ : def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=7 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=99 , UpperCamelCase_=[1, 1, 2] , UpperCamelCase_=1 , UpperCamelCase_=32 , UpperCamelCase_=4 , UpperCamelCase_=8 , UpperCamelCase_=37 , UpperCamelCase_="gelu_new" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=0.0 , UpperCamelCase_=5_12 , UpperCamelCase_=3 , UpperCamelCase_=0.0_2 , UpperCamelCase_=3 , UpperCamelCase_=4 , UpperCamelCase_=None , UpperCamelCase_=False , ) -> Tuple: __lowercase : int = parent __lowercase : Optional[int] = batch_size __lowercase : Dict = seq_length __lowercase : int = is_training __lowercase : int = use_input_mask __lowercase : str = use_token_type_ids __lowercase : List[Any] = use_labels __lowercase : Union[str, Any] = vocab_size __lowercase : Any = block_sizes __lowercase : Optional[Any] = num_decoder_layers __lowercase : Union[str, Any] = d_model __lowercase : str = n_head __lowercase : List[str] = d_head __lowercase : str = d_inner __lowercase : Tuple = hidden_act __lowercase : str = hidden_dropout __lowercase : Optional[Any] = attention_dropout __lowercase : Dict = activation_dropout __lowercase : Dict = max_position_embeddings __lowercase : int = type_vocab_size __lowercase : List[Any] = 2 __lowercase : Optional[int] = num_labels __lowercase : int = num_choices __lowercase : List[Any] = scope __lowercase : Optional[Any] = initializer_std # Used in the tests to check the size of the first attention layer __lowercase : List[str] = n_head # Used in the tests to check the size of the first hidden state __lowercase : Optional[int] = self.d_model # Used in the tests to check the number of output hidden states/attentions __lowercase : Any = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers) # FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with # the last hidden state of the first block (which is the first hidden state of the decoder). if not base: __lowercase : Dict = self.num_hidden_layers + 2 def _lowerCamelCase ( self ) -> Optional[Any]: __lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase : Optional[int] = None if self.use_input_mask: __lowercase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] ) __lowercase : Optional[int] = None if self.use_token_type_ids: __lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowercase : List[Any] = None __lowercase : str = None __lowercase : Any = None if self.use_labels: __lowercase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowercase : List[Any] = ids_tensor([self.batch_size] , self.num_choices ) __lowercase : Optional[int] = FunnelConfig( vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> Dict: __lowercase : Any = TFFunnelModel(config=UpperCamelCase_ ) __lowercase : Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} __lowercase : str = model(UpperCamelCase_ ) __lowercase : int = [input_ids, input_mask] __lowercase : Optional[int] = model(UpperCamelCase_ ) __lowercase : Any = model(UpperCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) __lowercase : int = False __lowercase : List[str] = TFFunnelModel(config=UpperCamelCase_ ) __lowercase : Optional[int] = model(UpperCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) __lowercase : Tuple = False __lowercase : Any = TFFunnelModel(config=UpperCamelCase_ ) __lowercase : str = model(UpperCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> Union[str, Any]: __lowercase : Optional[int] = TFFunnelBaseModel(config=UpperCamelCase_ ) __lowercase : Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} __lowercase : Any = model(UpperCamelCase_ ) __lowercase : List[Any] = [input_ids, input_mask] __lowercase : Any = model(UpperCamelCase_ ) __lowercase : Any = model(UpperCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) __lowercase : Any = False __lowercase : Optional[Any] = TFFunnelBaseModel(config=UpperCamelCase_ ) __lowercase : Optional[int] = model(UpperCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) ) __lowercase : Any = False __lowercase : str = TFFunnelBaseModel(config=UpperCamelCase_ ) __lowercase : Tuple = model(UpperCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> List[Any]: __lowercase : List[str] = TFFunnelForPreTraining(config=UpperCamelCase_ ) __lowercase : Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} __lowercase : List[Any] = model(UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> Any: __lowercase : str = TFFunnelForMaskedLM(config=UpperCamelCase_ ) __lowercase : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} __lowercase : Any = model(UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> Tuple: __lowercase : int = self.num_labels __lowercase : List[str] = TFFunnelForSequenceClassification(config=UpperCamelCase_ ) __lowercase : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} __lowercase : Tuple = model(UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> Dict: __lowercase : Optional[int] = self.num_choices __lowercase : List[Any] = TFFunnelForMultipleChoice(config=UpperCamelCase_ ) __lowercase : Union[str, Any] = tf.tile(tf.expand_dims(UpperCamelCase_ , 1 ) , (1, self.num_choices, 1) ) __lowercase : Tuple = tf.tile(tf.expand_dims(UpperCamelCase_ , 1 ) , (1, self.num_choices, 1) ) __lowercase : str = tf.tile(tf.expand_dims(UpperCamelCase_ , 1 ) , (1, self.num_choices, 1) ) __lowercase : Optional[Any] = { '''input_ids''': multiple_choice_inputs_ids, '''attention_mask''': multiple_choice_input_mask, '''token_type_ids''': multiple_choice_token_type_ids, } __lowercase : List[str] = model(UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> Union[str, Any]: __lowercase : str = self.num_labels __lowercase : Optional[Any] = TFFunnelForTokenClassification(config=UpperCamelCase_ ) __lowercase : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} __lowercase : str = model(UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> Optional[Any]: __lowercase : Any = TFFunnelForQuestionAnswering(config=UpperCamelCase_ ) __lowercase : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} __lowercase : List[Any] = model(UpperCamelCase_ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _lowerCamelCase ( self ) -> Optional[Any]: __lowercase : Tuple = self.prepare_config_and_inputs() ( ( __lowercase ) ,( __lowercase ) ,( __lowercase ) ,( __lowercase ) ,( __lowercase ) ,( __lowercase ) ,( __lowercase ) , ) : str = config_and_inputs __lowercase : List[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class UpperCAmelCase_ ( snake_case , snake_case , unittest.TestCase ): UpperCamelCase =( ( TFFunnelModel, TFFunnelForMaskedLM, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForTokenClassification, ) if is_tf_available() else () ) UpperCamelCase =( { "feature-extraction": (TFFunnelBaseModel, TFFunnelModel), "fill-mask": TFFunnelForMaskedLM, "question-answering": TFFunnelForQuestionAnswering, "text-classification": TFFunnelForSequenceClassification, "token-classification": TFFunnelForTokenClassification, "zero-shot": TFFunnelForSequenceClassification, } if is_tf_available() else {} ) UpperCamelCase =False UpperCamelCase =False def _lowerCamelCase ( self ) -> int: __lowercase : Optional[int] = TFFunnelModelTester(self ) __lowercase : Any = ConfigTester(self , config_class=UpperCamelCase_ ) def _lowerCamelCase ( self ) -> Optional[Any]: self.config_tester.run_common_tests() def _lowerCamelCase ( self ) -> Any: __lowercase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase_ ) def _lowerCamelCase ( self ) -> Dict: __lowercase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*UpperCamelCase_ ) def _lowerCamelCase ( self ) -> List[str]: __lowercase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase_ ) def _lowerCamelCase ( self ) -> Optional[int]: __lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase_ ) def _lowerCamelCase ( self ) -> List[Any]: __lowercase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase_ ) @require_tf class UpperCAmelCase_ ( snake_case , unittest.TestCase ): UpperCamelCase =( (TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else () ) UpperCamelCase =False UpperCamelCase =False def _lowerCamelCase ( self ) -> str: __lowercase : List[str] = TFFunnelModelTester(self , base=UpperCamelCase_ ) __lowercase : Optional[Any] = ConfigTester(self , config_class=UpperCamelCase_ ) def _lowerCamelCase ( self ) -> List[str]: self.config_tester.run_common_tests() def _lowerCamelCase ( self ) -> Optional[Any]: __lowercase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_base_model(*UpperCamelCase_ ) def _lowerCamelCase ( self ) -> List[str]: __lowercase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase_ ) def _lowerCamelCase ( self ) -> Tuple: __lowercase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase_ )
76
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetaImageProcessor class A_ ( unittest.TestCase ): def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : str=7 ,SCREAMING_SNAKE_CASE__ : Any=3 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=3_0 ,SCREAMING_SNAKE_CASE__ : int=4_0_0 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=True ,SCREAMING_SNAKE_CASE__ : Optional[int]=None ,SCREAMING_SNAKE_CASE__ : Any=True ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=[0.5, 0.5, 0.5] ,SCREAMING_SNAKE_CASE__ : Dict=[0.5, 0.5, 0.5] ,SCREAMING_SNAKE_CASE__ : List[str]=True ,SCREAMING_SNAKE_CASE__ : List[str]=1 / 2_5_5 ,SCREAMING_SNAKE_CASE__ : Tuple=True ,): # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p __lowerCamelCase : List[Any] = size if size is not None else {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3} __lowerCamelCase : str = parent __lowerCamelCase : Union[str, Any] = batch_size __lowerCamelCase : int = num_channels __lowerCamelCase : Dict = min_resolution __lowerCamelCase : Tuple = max_resolution __lowerCamelCase : Dict = do_resize __lowerCamelCase : List[Any] = size __lowerCamelCase : Tuple = do_normalize __lowerCamelCase : Any = image_mean __lowerCamelCase : List[str] = image_std __lowerCamelCase : List[Any] = do_rescale __lowerCamelCase : str = rescale_factor __lowerCamelCase : Tuple = do_pad def lowerCAmelCase ( self : Dict): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : List[str]=False): if not batched: __lowerCamelCase : Optional[Any] = image_inputs[0] if isinstance(SCREAMING_SNAKE_CASE__ ,Image.Image): __lowerCamelCase , __lowerCamelCase : Any = image.size else: __lowerCamelCase , __lowerCamelCase : Any = image.shape[1], image.shape[2] if w < h: __lowerCamelCase : Optional[int] = int(self.size['shortest_edge'] * h / w) __lowerCamelCase : Tuple = self.size['shortest_edge'] elif w > h: __lowerCamelCase : Union[str, Any] = self.size['shortest_edge'] __lowerCamelCase : Union[str, Any] = int(self.size['shortest_edge'] * w / h) else: __lowerCamelCase : List[Any] = self.size['shortest_edge'] __lowerCamelCase : Optional[int] = self.size['shortest_edge'] else: __lowerCamelCase : List[str] = [] for image in image_inputs: __lowerCamelCase , __lowerCamelCase : List[Any] = self.get_expected_values([image]) expected_values.append((expected_height, expected_width)) __lowerCamelCase : Tuple = max(SCREAMING_SNAKE_CASE__ ,key=lambda SCREAMING_SNAKE_CASE__: item[0])[0] __lowerCamelCase : Dict = max(SCREAMING_SNAKE_CASE__ ,key=lambda SCREAMING_SNAKE_CASE__: item[1])[1] return expected_height, expected_width @require_torch @require_vision class A_ ( SCREAMING_SNAKE_CASE , unittest.TestCase ): _UpperCAmelCase : Optional[int] = DetaImageProcessor if is_vision_available() else None def lowerCAmelCase ( self : Optional[Any]): __lowerCamelCase : List[str] = DetaImageProcessingTester(self) @property def lowerCAmelCase ( self : Any): return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase ( self : Dict): __lowerCamelCase : Any = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'image_mean')) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'image_std')) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'do_normalize')) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'do_resize')) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'do_rescale')) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'do_pad')) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'size')) def lowerCAmelCase ( self : str): __lowerCamelCase : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size ,{'shortest_edge': 1_8, 'longest_edge': 1_3_3_3}) self.assertEqual(image_processor.do_pad ,SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Any): pass def lowerCAmelCase ( self : List[str]): # Initialize image_processing __lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict) # create random PIL images __lowerCamelCase : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE__) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,Image.Image) # Test not batched input __lowerCamelCase : List[Any] = image_processing(image_inputs[0] ,return_tensors='pt').pixel_values __lowerCamelCase , __lowerCamelCase : Tuple = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__) self.assertEqual( encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,) # Test batched __lowerCamelCase , __lowerCamelCase : Optional[Any] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ ,batched=SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[int] = image_processing(SCREAMING_SNAKE_CASE__ ,return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) ,) def lowerCAmelCase ( self : str): # Initialize image_processing __lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors __lowerCamelCase : str = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE__ ,numpify=SCREAMING_SNAKE_CASE__) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,np.ndarray) # Test not batched input __lowerCamelCase : Tuple = image_processing(image_inputs[0] ,return_tensors='pt').pixel_values __lowerCamelCase , __lowerCamelCase : int = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__) self.assertEqual( encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,) # Test batched __lowerCamelCase : str = image_processing(SCREAMING_SNAKE_CASE__ ,return_tensors='pt').pixel_values __lowerCamelCase , __lowerCamelCase : Optional[int] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ ,batched=SCREAMING_SNAKE_CASE__) self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) ,) def lowerCAmelCase ( self : int): # Initialize image_processing __lowerCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors __lowerCamelCase : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE__ ,torchify=SCREAMING_SNAKE_CASE__) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,torch.Tensor) # Test not batched input __lowerCamelCase : Union[str, Any] = image_processing(image_inputs[0] ,return_tensors='pt').pixel_values __lowerCamelCase , __lowerCamelCase : int = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__) self.assertEqual( encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,) # Test batched __lowerCamelCase : List[Any] = image_processing(SCREAMING_SNAKE_CASE__ ,return_tensors='pt').pixel_values __lowerCamelCase , __lowerCamelCase : Optional[int] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ ,batched=SCREAMING_SNAKE_CASE__) self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) ,) @slow def lowerCAmelCase ( self : Optional[Any]): # prepare image and target __lowerCamelCase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png') with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' ,'r') as f: __lowerCamelCase : List[str] = json.loads(f.read()) __lowerCamelCase : Union[str, Any] = {'image_id': 3_9_7_6_9, 'annotations': target} # encode them __lowerCamelCase : Optional[int] = DetaImageProcessor() __lowerCamelCase : int = image_processing(images=SCREAMING_SNAKE_CASE__ ,annotations=SCREAMING_SNAKE_CASE__ ,return_tensors='pt') # verify pixel values __lowerCamelCase : List[str] = torch.Size([1, 3, 8_0_0, 1_0_6_6]) self.assertEqual(encoding['pixel_values'].shape ,SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[Any] = torch.tensor([0.2796, 0.3138, 0.3481]) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4)) # verify area __lowerCamelCase : Dict = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438]) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,SCREAMING_SNAKE_CASE__)) # verify boxes __lowerCamelCase : int = torch.Size([6, 4]) self.assertEqual(encoding['labels'][0]['boxes'].shape ,SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[int] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215]) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,SCREAMING_SNAKE_CASE__ ,atol=1E-3)) # verify image_id __lowerCamelCase : Tuple = torch.tensor([3_9_7_6_9]) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,SCREAMING_SNAKE_CASE__)) # verify is_crowd __lowerCamelCase : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,SCREAMING_SNAKE_CASE__)) # verify class_labels __lowerCamelCase : List[Any] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7]) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,SCREAMING_SNAKE_CASE__)) # verify orig_size __lowerCamelCase : str = torch.tensor([4_8_0, 6_4_0]) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,SCREAMING_SNAKE_CASE__)) # verify size __lowerCamelCase : int = torch.tensor([8_0_0, 1_0_6_6]) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,SCREAMING_SNAKE_CASE__)) @slow def lowerCAmelCase ( self : str): # prepare image, target and masks_path __lowerCamelCase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png') with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' ,'r') as f: __lowerCamelCase : Tuple = json.loads(f.read()) __lowerCamelCase : List[Any] = {'file_name': '000000039769.png', 'image_id': 3_9_7_6_9, 'segments_info': target} __lowerCamelCase : Optional[int] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic') # encode them __lowerCamelCase : List[str] = DetaImageProcessor(format='coco_panoptic') __lowerCamelCase : Optional[Any] = image_processing(images=SCREAMING_SNAKE_CASE__ ,annotations=SCREAMING_SNAKE_CASE__ ,masks_path=SCREAMING_SNAKE_CASE__ ,return_tensors='pt') # verify pixel values __lowerCamelCase : List[str] = torch.Size([1, 3, 8_0_0, 1_0_6_6]) self.assertEqual(encoding['pixel_values'].shape ,SCREAMING_SNAKE_CASE__) __lowerCamelCase : Any = torch.tensor([0.2796, 0.3138, 0.3481]) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4)) # verify area __lowerCamelCase : Optional[Any] = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147]) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,SCREAMING_SNAKE_CASE__)) # verify boxes __lowerCamelCase : Tuple = torch.Size([6, 4]) self.assertEqual(encoding['labels'][0]['boxes'].shape ,SCREAMING_SNAKE_CASE__) __lowerCamelCase : Any = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625]) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,SCREAMING_SNAKE_CASE__ ,atol=1E-3)) # verify image_id __lowerCamelCase : int = torch.tensor([3_9_7_6_9]) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,SCREAMING_SNAKE_CASE__)) # verify is_crowd __lowerCamelCase : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,SCREAMING_SNAKE_CASE__)) # verify class_labels __lowerCamelCase : int = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3]) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,SCREAMING_SNAKE_CASE__)) # verify masks __lowerCamelCase : Optional[Any] = 8_2_2_8_7_3 self.assertEqual(encoding['labels'][0]['masks'].sum().item() ,SCREAMING_SNAKE_CASE__) # verify orig_size __lowerCamelCase : Any = torch.tensor([4_8_0, 6_4_0]) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,SCREAMING_SNAKE_CASE__)) # verify size __lowerCamelCase : Any = torch.tensor([8_0_0, 1_0_6_6]) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,SCREAMING_SNAKE_CASE__))
652
0
"""simple docstring""" def _UpperCamelCase ( UpperCamelCase ) -> int: """simple docstring""" if not isinstance(UpperCamelCase , UpperCamelCase ): raise ValueError("multiplicative_persistence() only accepts integral values" ) if num < 0: raise ValueError("multiplicative_persistence() does not accept negative values" ) __UpperCAmelCase : List[Any] = 0 __UpperCAmelCase : str = str(UpperCamelCase ) while len(UpperCamelCase ) != 1: __UpperCAmelCase : Any = [int(UpperCamelCase ) for i in num_string] __UpperCAmelCase : List[str] = 1 for i in range(0 , len(UpperCamelCase ) ): total *= numbers[i] __UpperCAmelCase : Optional[int] = str(UpperCamelCase ) steps += 1 return steps def _UpperCamelCase ( UpperCamelCase ) -> int: """simple docstring""" if not isinstance(UpperCamelCase , UpperCamelCase ): raise ValueError("additive_persistence() only accepts integral values" ) if num < 0: raise ValueError("additive_persistence() does not accept negative values" ) __UpperCAmelCase : Dict = 0 __UpperCAmelCase : List[str] = str(UpperCamelCase ) while len(UpperCamelCase ) != 1: __UpperCAmelCase : Dict = [int(UpperCamelCase ) for i in num_string] __UpperCAmelCase : int = 0 for i in range(0 , len(UpperCamelCase ) ): total += numbers[i] __UpperCAmelCase : Optional[int] = str(UpperCamelCase ) steps += 1 return steps if __name__ == "__main__": import doctest doctest.testmod()
77
import gc import random import unittest import numpy as np import torch from diffusers import ( DDIMScheduler, KandinskyVaaControlnetPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class A_ ( SCREAMING_SNAKE_CASE , unittest.TestCase ): _UpperCAmelCase : Optional[Any] = KandinskyVaaControlnetPipeline _UpperCAmelCase : Optional[Any] = ['''image_embeds''', '''negative_image_embeds''', '''hint'''] _UpperCAmelCase : int = ['''image_embeds''', '''negative_image_embeds''', '''hint'''] _UpperCAmelCase : List[Any] = [ '''generator''', '''height''', '''width''', '''latents''', '''guidance_scale''', '''num_inference_steps''', '''return_dict''', '''guidance_scale''', '''num_images_per_prompt''', '''output_type''', '''return_dict''', ] _UpperCAmelCase : Tuple = False @property def lowerCAmelCase ( self : Tuple): return 3_2 @property def lowerCAmelCase ( self : List[Any]): return 3_2 @property def lowerCAmelCase ( self : str): return self.time_input_dim @property def lowerCAmelCase ( self : List[str]): return self.time_input_dim * 4 @property def lowerCAmelCase ( self : List[str]): return 1_0_0 @property def lowerCAmelCase ( self : Dict): torch.manual_seed(0) __lowerCamelCase : Optional[Any] = { 'in_channels': 8, # Out channels is double in channels because predicts mean and variance 'out_channels': 8, 'addition_embed_type': 'image_hint', 'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'), 'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'), 'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn', 'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2), 'layers_per_block': 1, 'encoder_hid_dim': self.text_embedder_hidden_size, 'encoder_hid_dim_type': 'image_proj', 'cross_attention_dim': self.cross_attention_dim, 'attention_head_dim': 4, 'resnet_time_scale_shift': 'scale_shift', 'class_embed_type': None, } __lowerCamelCase : Union[str, Any] = UNetaDConditionModel(**SCREAMING_SNAKE_CASE__) return model @property def lowerCAmelCase ( self : Union[str, Any]): return { "block_out_channels": [3_2, 3_2, 6_4, 6_4], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 1_2, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def lowerCAmelCase ( self : Optional[Any]): torch.manual_seed(0) __lowerCamelCase : int = VQModel(**self.dummy_movq_kwargs) return model def lowerCAmelCase ( self : Optional[Any]): __lowerCamelCase : Tuple = self.dummy_unet __lowerCamelCase : List[Any] = self.dummy_movq __lowerCamelCase : str = DDIMScheduler( num_train_timesteps=1_0_0_0 ,beta_schedule='linear' ,beta_start=0.00085 ,beta_end=0.012 ,clip_sample=SCREAMING_SNAKE_CASE__ ,set_alpha_to_one=SCREAMING_SNAKE_CASE__ ,steps_offset=1 ,prediction_type='epsilon' ,thresholding=SCREAMING_SNAKE_CASE__ ,) __lowerCamelCase : Dict = { 'unet': unet, 'scheduler': scheduler, 'movq': movq, } return components def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Optional[int]=0): __lowerCamelCase : str = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(SCREAMING_SNAKE_CASE__)).to(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1)).to( SCREAMING_SNAKE_CASE__) # create hint __lowerCamelCase : Optional[int] = floats_tensor((1, 3, 6_4, 6_4) ,rng=random.Random(SCREAMING_SNAKE_CASE__)).to(SCREAMING_SNAKE_CASE__) if str(SCREAMING_SNAKE_CASE__).startswith('mps'): __lowerCamelCase : int = torch.manual_seed(SCREAMING_SNAKE_CASE__) else: __lowerCamelCase : int = torch.Generator(device=SCREAMING_SNAKE_CASE__).manual_seed(SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[str] = { 'image_embeds': image_embeds, 'negative_image_embeds': negative_image_embeds, 'hint': hint, 'generator': generator, 'height': 6_4, 'width': 6_4, 'guidance_scale': 4.0, 'num_inference_steps': 2, 'output_type': 'np', } return inputs def lowerCAmelCase ( self : Optional[Any]): __lowerCamelCase : Dict = 'cpu' __lowerCamelCase : Tuple = self.get_dummy_components() __lowerCamelCase : Any = self.pipeline_class(**SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[str] = pipe.to(SCREAMING_SNAKE_CASE__) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[Any] = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__)) __lowerCamelCase : int = output.images __lowerCamelCase : Tuple = pipe( **self.get_dummy_inputs(SCREAMING_SNAKE_CASE__) ,return_dict=SCREAMING_SNAKE_CASE__ ,)[0] __lowerCamelCase : Dict = image[0, -3:, -3:, -1] __lowerCamelCase : int = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) __lowerCamelCase : List[str] = np.array( [0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1E-2 ), F" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2 ), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @slow @require_torch_gpu class A_ ( unittest.TestCase ): def lowerCAmelCase ( self : int): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase ( self : int): __lowerCamelCase : List[Any] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy') __lowerCamelCase : Union[str, Any] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinskyv22/hint_image_cat.png') __lowerCamelCase : Tuple = torch.from_numpy(np.array(SCREAMING_SNAKE_CASE__)).float() / 255.0 __lowerCamelCase : str = hint.permute(2 ,0 ,1).unsqueeze(0) __lowerCamelCase : Tuple = KandinskyVaaPriorPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-prior' ,torch_dtype=torch.floataa) pipe_prior.to(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Any = KandinskyVaaControlnetPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-controlnet-depth' ,torch_dtype=torch.floataa) __lowerCamelCase : int = pipeline.to(SCREAMING_SNAKE_CASE__) pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[Any] = 'A robot, 4k photo' __lowerCamelCase : List[str] = torch.Generator(device='cuda').manual_seed(0) __lowerCamelCase , __lowerCamelCase : Optional[Any] = pipe_prior( SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,num_inference_steps=5 ,negative_prompt='' ,).to_tuple() __lowerCamelCase : Optional[Any] = torch.Generator(device='cuda').manual_seed(0) __lowerCamelCase : Any = pipeline( image_embeds=SCREAMING_SNAKE_CASE__ ,negative_image_embeds=SCREAMING_SNAKE_CASE__ ,hint=SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,num_inference_steps=1_0_0 ,output_type='np' ,) __lowerCamelCase : List[Any] = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
652
0
'''simple docstring''' from typing import List, Union import numpy as np from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING SCREAMING_SNAKE_CASE_: Union[str, Any] =logging.get_logger(__name__) @add_end_docstrings(UpperCamelCase__ ) class __A ( UpperCamelCase__ ): def __init__(self : List[Any] , *__a : List[str] , **__a : int ): super().__init__(*__a , **__a ) requires_backends(self , "vision" ) self.check_model_type(__a ) def __call__(self : Optional[Any] , __a : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__a : List[str] ): return super().__call__(__a , **__a ) def _lowercase (self : Union[str, Any] , **__a : Optional[int] ): return {}, {}, {} def _lowercase (self : Union[str, Any] , __a : Union[str, Any] ): UpperCAmelCase_ = load_image(__a ) UpperCAmelCase_ = image.size UpperCAmelCase_ = self.image_processor(images=__a , return_tensors=self.framework ) return model_inputs def _lowercase (self : Any , __a : Tuple ): UpperCAmelCase_ = self.model(**__a ) return model_outputs def _lowercase (self : Optional[Any] , __a : List[str] ): UpperCAmelCase_ = model_outputs.predicted_depth UpperCAmelCase_ = torch.nn.functional.interpolate( predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="bicubic" , align_corners=__a ) UpperCAmelCase_ = prediction.squeeze().cpu().numpy() UpperCAmelCase_ = (output * 255 / np.max(__a )).astype("uint8" ) UpperCAmelCase_ = Image.fromarray(__a ) UpperCAmelCase_ = {} UpperCAmelCase_ = predicted_depth UpperCAmelCase_ = depth return output_dict
78
from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class A_ : _UpperCAmelCase : int = XGLMConfig _UpperCAmelCase : List[Any] = {} _UpperCAmelCase : Tuple = '''gelu''' def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int]=1_4 ,SCREAMING_SNAKE_CASE__ : Tuple=7 ,SCREAMING_SNAKE_CASE__ : List[Any]=True ,SCREAMING_SNAKE_CASE__ : Any=True ,SCREAMING_SNAKE_CASE__ : Optional[Any]=True ,SCREAMING_SNAKE_CASE__ : Optional[int]=9_9 ,SCREAMING_SNAKE_CASE__ : str=3_2 ,SCREAMING_SNAKE_CASE__ : Tuple=2 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=4 ,SCREAMING_SNAKE_CASE__ : Tuple=3_7 ,SCREAMING_SNAKE_CASE__ : Tuple="gelu" ,SCREAMING_SNAKE_CASE__ : Any=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 ,SCREAMING_SNAKE_CASE__ : List[Any]=5_1_2 ,SCREAMING_SNAKE_CASE__ : str=0.02 ,): __lowerCamelCase : List[str] = parent __lowerCamelCase : List[str] = batch_size __lowerCamelCase : str = seq_length __lowerCamelCase : Optional[Any] = is_training __lowerCamelCase : Any = use_input_mask __lowerCamelCase : str = use_labels __lowerCamelCase : Any = vocab_size __lowerCamelCase : Dict = d_model __lowerCamelCase : int = num_hidden_layers __lowerCamelCase : List[Any] = num_attention_heads __lowerCamelCase : List[str] = ffn_dim __lowerCamelCase : Optional[Any] = activation_function __lowerCamelCase : Tuple = activation_dropout __lowerCamelCase : Union[str, Any] = attention_dropout __lowerCamelCase : List[str] = max_position_embeddings __lowerCamelCase : List[Any] = initializer_range __lowerCamelCase : Any = None __lowerCamelCase : List[str] = 0 __lowerCamelCase : List[str] = 2 __lowerCamelCase : Dict = 1 def lowerCAmelCase ( self : Any): return XGLMConfig.from_pretrained('facebook/xglm-564M') def lowerCAmelCase ( self : str): __lowerCamelCase : Any = tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size) ,clip_value_min=0 ,clip_value_max=3) __lowerCamelCase : Dict = None if self.use_input_mask: __lowerCamelCase : int = random_attention_mask([self.batch_size, self.seq_length]) __lowerCamelCase : int = self.get_config() __lowerCamelCase : Union[str, Any] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] ,2) return ( config, input_ids, input_mask, head_mask, ) def lowerCAmelCase ( self : List[Any]): return XGLMConfig( vocab_size=self.vocab_size ,d_model=self.hidden_size ,num_layers=self.num_hidden_layers ,attention_heads=self.num_attention_heads ,ffn_dim=self.ffn_dim ,activation_function=self.activation_function ,activation_dropout=self.activation_dropout ,attention_dropout=self.attention_dropout ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,use_cache=SCREAMING_SNAKE_CASE__ ,bos_token_id=self.bos_token_id ,eos_token_id=self.eos_token_id ,pad_token_id=self.pad_token_id ,return_dict=SCREAMING_SNAKE_CASE__ ,) def lowerCAmelCase ( self : int): __lowerCamelCase : int = self.prepare_config_and_inputs() ( ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ) : Any = config_and_inputs __lowerCamelCase : str = { 'input_ids': input_ids, 'head_mask': head_mask, } return config, inputs_dict @require_tf class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ): _UpperCAmelCase : str = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () _UpperCAmelCase : List[Any] = (TFXGLMForCausalLM,) if is_tf_available() else () _UpperCAmelCase : str = ( {'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {} ) _UpperCAmelCase : Tuple = False _UpperCAmelCase : Optional[int] = False _UpperCAmelCase : Union[str, Any] = False def lowerCAmelCase ( self : Tuple): __lowerCamelCase : Tuple = TFXGLMModelTester(self) __lowerCamelCase : int = ConfigTester(self ,config_class=SCREAMING_SNAKE_CASE__ ,n_embd=3_7) def lowerCAmelCase ( self : List[Any]): self.config_tester.run_common_tests() @slow def lowerCAmelCase ( self : str): for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase : Union[str, Any] = TFXGLMModel.from_pretrained(SCREAMING_SNAKE_CASE__) self.assertIsNotNone(SCREAMING_SNAKE_CASE__) @unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.') def lowerCAmelCase ( self : Union[str, Any]): super().test_resize_token_embeddings() @require_tf class A_ ( unittest.TestCase ): @slow def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=True): __lowerCamelCase : Any = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M') __lowerCamelCase : int = tf.convert_to_tensor([[2, 2_6_8, 9_8_6_5]] ,dtype=tf.intaa) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off __lowerCamelCase : Optional[Any] = [2, 2_6_8, 9_8_6_5, 6_7, 1_1, 1_9_8_8, 5_7_2_5_2, 9_8_6_5, 5, 9_8_4, 6_7, 1_9_8_8, 2_1_3_8_3_8, 1_6_5_8, 5_3, 7_0_4_4_6, 3_3, 6_6_5_7, 2_7_8, 1_5_8_1] # fmt: on __lowerCamelCase : int = model.generate(SCREAMING_SNAKE_CASE__ ,do_sample=SCREAMING_SNAKE_CASE__ ,num_beams=1) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist() ,SCREAMING_SNAKE_CASE__) @slow def lowerCAmelCase ( self : List[str]): __lowerCamelCase : Tuple = XGLMTokenizer.from_pretrained('facebook/xglm-564M') __lowerCamelCase : int = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M') tf.random.set_seed(0) __lowerCamelCase : Optional[Any] = tokenizer('Today is a nice day and' ,return_tensors='tf') __lowerCamelCase : List[Any] = tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(':/CPU:0'): __lowerCamelCase : Union[str, Any] = model.generate(SCREAMING_SNAKE_CASE__ ,do_sample=SCREAMING_SNAKE_CASE__ ,seed=[7, 0]) __lowerCamelCase : List[str] = tokenizer.decode(output_ids[0] ,skip_special_tokens=SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[int] = ( 'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due' ) self.assertEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) @slow def lowerCAmelCase ( self : Dict): __lowerCamelCase : Union[str, Any] = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M') __lowerCamelCase : Union[str, Any] = XGLMTokenizer.from_pretrained('facebook/xglm-564M') __lowerCamelCase : Union[str, Any] = 'left' # use different length sentences to test batching __lowerCamelCase : List[str] = [ 'This is an extremelly long sentence that only exists to test the ability of the model to cope with ' 'left-padding, such as in batched generation. The output for the sequence below should be the same ' 'regardless of whether left padding is applied or not. When', 'Hello, my dog is a little', ] __lowerCamelCase : List[Any] = tokenizer(SCREAMING_SNAKE_CASE__ ,return_tensors='tf' ,padding=SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[int] = inputs['input_ids'] __lowerCamelCase : Dict = model.generate(input_ids=SCREAMING_SNAKE_CASE__ ,attention_mask=inputs['attention_mask'] ,max_new_tokens=1_2) __lowerCamelCase : Tuple = tokenizer(sentences[0] ,return_tensors='tf').input_ids __lowerCamelCase : List[str] = model.generate(input_ids=SCREAMING_SNAKE_CASE__ ,max_new_tokens=1_2) __lowerCamelCase : Any = tokenizer(sentences[1] ,return_tensors='tf').input_ids __lowerCamelCase : List[Any] = model.generate(input_ids=SCREAMING_SNAKE_CASE__ ,max_new_tokens=1_2) __lowerCamelCase : int = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ ,skip_special_tokens=SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[Any] = tokenizer.decode(output_non_padded[0] ,skip_special_tokens=SCREAMING_SNAKE_CASE__) __lowerCamelCase : Any = tokenizer.decode(output_padded[0] ,skip_special_tokens=SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[int] = [ 'This is an extremelly long sentence that only exists to test the ability of the model to cope with ' 'left-padding, such as in batched generation. The output for the sequence below should be the same ' 'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be ' 'a single', 'Hello, my dog is a little bit of a shy one, but he is very friendly', ] self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) self.assertListEqual(SCREAMING_SNAKE_CASE__ ,[non_padded_sentence, padded_sentence])
652
0
from queue import Queue from typing import TYPE_CHECKING, Optional if TYPE_CHECKING: from ..models.auto import AutoTokenizer class UpperCAmelCase_ : def __UpperCAmelCase ( self , _lowerCAmelCase ): raise NotImplementedError() def __UpperCAmelCase ( self ): raise NotImplementedError() class UpperCAmelCase_ ( __lowerCamelCase ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase = False , **_lowerCAmelCase ): UpperCAmelCase__ : Optional[int] = tokenizer UpperCAmelCase__ : Dict = skip_prompt UpperCAmelCase__ : Union[str, Any] = decode_kwargs # variables used in the streaming process UpperCAmelCase__ : str = [] UpperCAmelCase__ : Optional[Any] = 0 UpperCAmelCase__ : Tuple = True def __UpperCAmelCase ( self , _lowerCAmelCase ): if len(value.shape ) > 1 and value.shape[0] > 1: raise ValueError("""TextStreamer only supports batch size 1""" ) elif len(value.shape ) > 1: UpperCAmelCase__ : Optional[int] = value[0] if self.skip_prompt and self.next_tokens_are_prompt: UpperCAmelCase__ : int = False return # Add the new token to the cache and decodes the entire thing. self.token_cache.extend(value.tolist() ) UpperCAmelCase__ : int = self.tokenizer.decode(self.token_cache , **self.decode_kwargs ) # After the symbol for a new line, we flush the cache. if text.endswith("""\n""" ): UpperCAmelCase__ : Optional[Any] = text[self.print_len :] UpperCAmelCase__ : Union[str, Any] = [] UpperCAmelCase__ : Union[str, Any] = 0 # If the last token is a CJK character, we print the characters. elif len(_lowerCAmelCase ) > 0 and self._is_chinese_char(ord(text[-1] ) ): UpperCAmelCase__ : str = text[self.print_len :] self.print_len += len(_lowerCAmelCase ) # Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words, # which may change with the subsequent token -- there are probably smarter ways to do this!) else: UpperCAmelCase__ : Dict = text[self.print_len : text.rfind(""" """ ) + 1] self.print_len += len(_lowerCAmelCase ) self.on_finalized_text(_lowerCAmelCase ) def __UpperCAmelCase ( self ): # Flush the cache, if it exists if len(self.token_cache ) > 0: UpperCAmelCase__ : str = self.tokenizer.decode(self.token_cache , **self.decode_kwargs ) UpperCAmelCase__ : Tuple = text[self.print_len :] UpperCAmelCase__ : List[Any] = [] UpperCAmelCase__ : Union[str, Any] = 0 else: UpperCAmelCase__ : Dict = """""" UpperCAmelCase__ : str = True self.on_finalized_text(_lowerCAmelCase , stream_end=_lowerCAmelCase ) def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = False ): print(_lowerCAmelCase , flush=_lowerCAmelCase , end="""""" if not stream_end else None ) def __UpperCAmelCase ( self , _lowerCAmelCase ): # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0X4e00 and cp <= 0X9fff) or (cp >= 0X3400 and cp <= 0X4dbf) # or (cp >= 0X2_0000 and cp <= 0X2_a6df) # or (cp >= 0X2_a700 and cp <= 0X2_b73f) # or (cp >= 0X2_b740 and cp <= 0X2_b81f) # or (cp >= 0X2_b820 and cp <= 0X2_ceaf) # or (cp >= 0Xf900 and cp <= 0Xfaff) or (cp >= 0X2_f800 and cp <= 0X2_fa1f) # ): # return True return False class UpperCAmelCase_ ( __lowerCamelCase ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase = False , _lowerCAmelCase = None , **_lowerCAmelCase ): super().__init__(_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ) UpperCAmelCase__ : Tuple = Queue() UpperCAmelCase__ : Optional[int] = None UpperCAmelCase__ : Tuple = timeout def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = False ): self.text_queue.put(_lowerCAmelCase , timeout=self.timeout ) if stream_end: self.text_queue.put(self.stop_signal , timeout=self.timeout ) def __iter__( self ): return self def __UpperCAmelCase ( self ): UpperCAmelCase__ : List[str] = self.text_queue.get(timeout=self.timeout ) if value == self.stop_signal: raise StopIteration() else: return value
79
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline else: from .pipeline_kandinsky import KandinskyPipeline from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput from .text_encoder import MultilingualCLIP
652
0
import argparse import os import torch from transformers import FlavaImageCodebook, FlavaImageCodebookConfig def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' __lowercase = s.rsplit(lowerCamelCase , lowerCamelCase ) return new.join(lowerCamelCase ) def snake_case ( lowerCamelCase ): '''simple docstring''' return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() ) def snake_case ( lowerCamelCase ): '''simple docstring''' __lowercase = {} __lowercase = ["""group_1""", """group_2""", """group_3""", """group_4"""] for key, value in state_dict.items(): for group_key in group_keys: if group_key in key: __lowercase = key.replace(F'{group_key}.' , F'{group_key}.group.' ) if "res_path" in key: __lowercase = key.replace("""res_path.""" , """res_path.path.""" ) if key.endswith(""".w""" ): __lowercase = rreplace(lowerCamelCase , """.w""" , """.weight""" , 1 ) if key.endswith(""".b""" ): __lowercase = rreplace(lowerCamelCase , """.b""" , """.bias""" , 1 ) __lowercase = value.float() return upgrade @torch.no_grad() def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=True ): '''simple docstring''' from dall_e import Encoder __lowercase = Encoder() if os.path.exists(lowerCamelCase ): __lowercase = torch.load(lowerCamelCase ) else: __lowercase = torch.hub.load_state_dict_from_url(lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ): __lowercase = ckpt.state_dict() encoder.load_state_dict(lowerCamelCase ) if config_path is not None: __lowercase = FlavaImageCodebookConfig.from_pretrained(lowerCamelCase ) else: __lowercase = FlavaImageCodebookConfig() __lowercase = FlavaImageCodebook(lowerCamelCase ).eval() __lowercase = encoder.state_dict() __lowercase = upgrade_state_dict(lowerCamelCase ) hf_model.load_state_dict(lowerCamelCase ) __lowercase = hf_model.state_dict() __lowercase = count_parameters(lowerCamelCase ) __lowercase = count_parameters(lowerCamelCase ) assert torch.allclose(lowerCamelCase , lowerCamelCase , atol=1e-3 ) if save_checkpoint: hf_model.save_pretrained(lowerCamelCase ) else: return hf_state_dict if __name__ == "__main__": __UpperCamelCase : Any = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") __UpperCamelCase : Optional[Any] = parser.parse_args() convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
80
from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig a =logging.get_logger(__name__) # General docstring a ="""RegNetConfig""" # Base docstring a ="""facebook/regnet-y-040""" a =[1, 1088, 7, 7] # Image classification docstring a ="""facebook/regnet-y-040""" a ="""tabby, tabby cat""" a =[ """facebook/regnet-y-040""", # See all regnet models at https://huggingface.co/models?filter=regnet ] class A_ ( tf.keras.layers.Layer ): def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int = 3 ,SCREAMING_SNAKE_CASE__ : int = 1 ,SCREAMING_SNAKE_CASE__ : int = 1 ,SCREAMING_SNAKE_CASE__ : Optional[str] = "relu" ,**SCREAMING_SNAKE_CASE__ : Optional[int] ,): super().__init__(**SCREAMING_SNAKE_CASE__) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb __lowerCamelCase : Optional[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2) __lowerCamelCase : Union[str, Any] = tf.keras.layers.ConvaD( filters=SCREAMING_SNAKE_CASE__ ,kernel_size=SCREAMING_SNAKE_CASE__ ,strides=SCREAMING_SNAKE_CASE__ ,padding='VALID' ,groups=SCREAMING_SNAKE_CASE__ ,use_bias=SCREAMING_SNAKE_CASE__ ,name='convolution' ,) __lowerCamelCase : int = tf.keras.layers.BatchNormalization(epsilon=1E-5 ,momentum=0.9 ,name='normalization') __lowerCamelCase : Union[str, Any] = ACTaFN[activation] if activation is not None else tf.identity def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : List[str]): __lowerCamelCase : List[Any] = self.convolution(self.padding(SCREAMING_SNAKE_CASE__)) __lowerCamelCase : Union[str, Any] = self.normalization(SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[Any] = self.activation(SCREAMING_SNAKE_CASE__) return hidden_state class A_ ( tf.keras.layers.Layer ): def __init__( self : str ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,**SCREAMING_SNAKE_CASE__ : Dict): super().__init__(**SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[int] = config.num_channels __lowerCamelCase : Dict = TFRegNetConvLayer( out_channels=config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act ,name='embedder' ,) def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[Any]): __lowerCamelCase : Optional[int] = shape_list(SCREAMING_SNAKE_CASE__)[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( 'Make sure that the channel dimension of the pixel values match with the one set in the configuration.') # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) __lowerCamelCase : Optional[int] = tf.transpose(SCREAMING_SNAKE_CASE__ ,perm=(0, 2, 3, 1)) __lowerCamelCase : List[Any] = self.embedder(SCREAMING_SNAKE_CASE__) return hidden_state class A_ ( tf.keras.layers.Layer ): def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int = 2 ,**SCREAMING_SNAKE_CASE__ : Tuple): super().__init__(**SCREAMING_SNAKE_CASE__) __lowerCamelCase : Any = tf.keras.layers.ConvaD( filters=SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,strides=SCREAMING_SNAKE_CASE__ ,use_bias=SCREAMING_SNAKE_CASE__ ,name='convolution') __lowerCamelCase : Optional[Any] = tf.keras.layers.BatchNormalization(epsilon=1E-5 ,momentum=0.9 ,name='normalization') def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : tf.Tensor ,SCREAMING_SNAKE_CASE__ : bool = False): return self.normalization(self.convolution(SCREAMING_SNAKE_CASE__) ,training=SCREAMING_SNAKE_CASE__) class A_ ( tf.keras.layers.Layer ): def __init__( self : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : Any): super().__init__(**SCREAMING_SNAKE_CASE__) __lowerCamelCase : int = tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE__ ,name='pooler') __lowerCamelCase : Dict = [ tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation='relu' ,name='attention.0'), tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation='sigmoid' ,name='attention.2'), ] def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int): # [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels] __lowerCamelCase : Optional[Any] = self.pooler(SCREAMING_SNAKE_CASE__) for layer_module in self.attention: __lowerCamelCase : Any = layer_module(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[int] = hidden_state * pooled return hidden_state class A_ ( tf.keras.layers.Layer ): def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int = 1 ,**SCREAMING_SNAKE_CASE__ : List[Any]): super().__init__(**SCREAMING_SNAKE_CASE__) __lowerCamelCase : Tuple = in_channels != out_channels or stride != 1 __lowerCamelCase : Union[str, Any] = max(1 ,out_channels // config.groups_width) __lowerCamelCase : Dict = ( TFRegNetShortCut(SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,name='shortcut') if should_apply_shortcut else tf.keras.layers.Activation('linear' ,name='shortcut') ) # `self.layers` instead of `self.layer` because that is a reserved argument. __lowerCamelCase : Optional[int] = [ TFRegNetConvLayer(SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation=config.hidden_act ,name='layer.0'), TFRegNetConvLayer( SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,groups=SCREAMING_SNAKE_CASE__ ,activation=config.hidden_act ,name='layer.1'), TFRegNetConvLayer(SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation=SCREAMING_SNAKE_CASE__ ,name='layer.2'), ] __lowerCamelCase : Dict = ACTaFN[config.hidden_act] def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Optional[int]): __lowerCamelCase : int = hidden_state for layer_module in self.layers: __lowerCamelCase : List[str] = layer_module(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[int] = self.shortcut(SCREAMING_SNAKE_CASE__) hidden_state += residual __lowerCamelCase : int = self.activation(SCREAMING_SNAKE_CASE__) return hidden_state class A_ ( tf.keras.layers.Layer ): def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int = 1 ,**SCREAMING_SNAKE_CASE__ : List[str]): super().__init__(**SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[Any] = in_channels != out_channels or stride != 1 __lowerCamelCase : Tuple = max(1 ,out_channels // config.groups_width) __lowerCamelCase : int = ( TFRegNetShortCut(SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,name='shortcut') if should_apply_shortcut else tf.keras.layers.Activation('linear' ,name='shortcut') ) __lowerCamelCase : Optional[int] = [ TFRegNetConvLayer(SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation=config.hidden_act ,name='layer.0'), TFRegNetConvLayer( SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,groups=SCREAMING_SNAKE_CASE__ ,activation=config.hidden_act ,name='layer.1'), TFRegNetSELayer(SCREAMING_SNAKE_CASE__ ,reduced_channels=int(round(in_channels / 4)) ,name='layer.2'), TFRegNetConvLayer(SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation=SCREAMING_SNAKE_CASE__ ,name='layer.3'), ] __lowerCamelCase : List[Any] = ACTaFN[config.hidden_act] def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : str): __lowerCamelCase : Optional[int] = hidden_state for layer_module in self.layers: __lowerCamelCase : Dict = layer_module(SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[Any] = self.shortcut(SCREAMING_SNAKE_CASE__) hidden_state += residual __lowerCamelCase : Any = self.activation(SCREAMING_SNAKE_CASE__) return hidden_state class A_ ( tf.keras.layers.Layer ): def __init__( self : int ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int = 2 ,SCREAMING_SNAKE_CASE__ : int = 2 ,**SCREAMING_SNAKE_CASE__ : Tuple): super().__init__(**SCREAMING_SNAKE_CASE__) __lowerCamelCase : Any = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer __lowerCamelCase : Tuple = [ # downsampling is done in the first layer with stride of 2 layer(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,name='layers.0'), *[layer(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,name=F"layers.{i+1}") for i in range(depth - 1)], ] def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : List[Any]): for layer_module in self.layers: __lowerCamelCase : Any = layer_module(SCREAMING_SNAKE_CASE__) return hidden_state class A_ ( tf.keras.layers.Layer ): def __init__( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,**SCREAMING_SNAKE_CASE__ : Any): super().__init__(**SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[Any] = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( SCREAMING_SNAKE_CASE__ ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,name='stages.0' ,)) __lowerCamelCase : Optional[int] = zip(config.hidden_sizes ,config.hidden_sizes[1:]) for i, ((in_channels, out_channels), depth) in enumerate(zip(SCREAMING_SNAKE_CASE__ ,config.depths[1:])): self.stages.append(TFRegNetStage(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,depth=SCREAMING_SNAKE_CASE__ ,name=F"stages.{i+1}")) def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : tf.Tensor ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = True): __lowerCamelCase : Optional[Any] = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: __lowerCamelCase : Optional[Any] = hidden_states + (hidden_state,) __lowerCamelCase : str = stage_module(SCREAMING_SNAKE_CASE__) if output_hidden_states: __lowerCamelCase : Tuple = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None) return TFBaseModelOutputWithNoAttention(last_hidden_state=SCREAMING_SNAKE_CASE__ ,hidden_states=SCREAMING_SNAKE_CASE__) @keras_serializable class A_ ( tf.keras.layers.Layer ): _UpperCAmelCase : List[Any] = RegNetConfig def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : Optional[int]): super().__init__(**SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[Any] = config __lowerCamelCase : Optional[int] = TFRegNetEmbeddings(SCREAMING_SNAKE_CASE__ ,name='embedder') __lowerCamelCase : Union[str, Any] = TFRegNetEncoder(SCREAMING_SNAKE_CASE__ ,name='encoder') __lowerCamelCase : int = tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE__ ,name='pooler') @unpack_inputs def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : tf.Tensor ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : bool = False ,): __lowerCamelCase : Dict = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __lowerCamelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict __lowerCamelCase : Union[str, Any] = self.embedder(SCREAMING_SNAKE_CASE__ ,training=SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[Any] = self.encoder( SCREAMING_SNAKE_CASE__ ,output_hidden_states=SCREAMING_SNAKE_CASE__ ,return_dict=SCREAMING_SNAKE_CASE__ ,training=SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[int] = encoder_outputs[0] __lowerCamelCase : int = self.pooler(SCREAMING_SNAKE_CASE__) # Change to NCHW output format have uniformity in the modules __lowerCamelCase : Union[str, Any] = tf.transpose(SCREAMING_SNAKE_CASE__ ,perm=(0, 3, 1, 2)) __lowerCamelCase : str = tf.transpose(SCREAMING_SNAKE_CASE__ ,perm=(0, 3, 1, 2)) # Change the other hidden state outputs to NCHW as well if output_hidden_states: __lowerCamelCase : Union[str, Any] = tuple([tf.transpose(SCREAMING_SNAKE_CASE__ ,perm=(0, 3, 1, 2)) for h in encoder_outputs[1]]) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=SCREAMING_SNAKE_CASE__ ,pooler_output=SCREAMING_SNAKE_CASE__ ,hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states ,) class A_ ( SCREAMING_SNAKE_CASE ): _UpperCAmelCase : Any = RegNetConfig _UpperCAmelCase : Optional[int] = '''regnet''' _UpperCAmelCase : List[Any] = '''pixel_values''' @property def lowerCAmelCase ( self : int): return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) ,dtype=tf.floataa)} a =r""" Parameters: This model is a Tensorflow [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and behavior. config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. """ a =r""" Args: pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConveNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( '''The bare RegNet model outputting raw features without any specific head on top.''' , SCREAMING_SNAKE_CASE , ) class A_ ( SCREAMING_SNAKE_CASE ): def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,*SCREAMING_SNAKE_CASE__ : Tuple ,**SCREAMING_SNAKE_CASE__ : Tuple): super().__init__(SCREAMING_SNAKE_CASE__ ,*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[str] = TFRegNetMainLayer(SCREAMING_SNAKE_CASE__ ,name='regnet') @unpack_inputs @add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC ,output_type=SCREAMING_SNAKE_CASE__ ,config_class=_CONFIG_FOR_DOC ,modality='vision' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,) def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : tf.Tensor ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : int=False ,): __lowerCamelCase : List[Any] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __lowerCamelCase : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict __lowerCamelCase : Tuple = self.regnet( pixel_values=SCREAMING_SNAKE_CASE__ ,output_hidden_states=SCREAMING_SNAKE_CASE__ ,return_dict=SCREAMING_SNAKE_CASE__ ,training=SCREAMING_SNAKE_CASE__ ,) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state ,pooler_output=outputs.pooler_output ,hidden_states=outputs.hidden_states ,) @add_start_docstrings( ''' RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. ''' , SCREAMING_SNAKE_CASE , ) class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,*SCREAMING_SNAKE_CASE__ : List[str] ,**SCREAMING_SNAKE_CASE__ : str): super().__init__(SCREAMING_SNAKE_CASE__ ,*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__) __lowerCamelCase : Union[str, Any] = config.num_labels __lowerCamelCase : Union[str, Any] = TFRegNetMainLayer(SCREAMING_SNAKE_CASE__ ,name='regnet') # classification head __lowerCamelCase : Optional[Any] = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels ,name='classifier.1') if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=SCREAMING_SNAKE_CASE__ ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,) def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : tf.Tensor = None ,SCREAMING_SNAKE_CASE__ : tf.Tensor = None ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : Any=False ,): __lowerCamelCase : Any = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __lowerCamelCase : Tuple = return_dict if return_dict is not None else self.config.use_return_dict __lowerCamelCase : str = self.regnet( SCREAMING_SNAKE_CASE__ ,output_hidden_states=SCREAMING_SNAKE_CASE__ ,return_dict=SCREAMING_SNAKE_CASE__ ,training=SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[Any] = outputs.pooler_output if return_dict else outputs[1] __lowerCamelCase : Optional[Any] = self.classifier[0](SCREAMING_SNAKE_CASE__) __lowerCamelCase : Dict = self.classifier[1](SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[str] = None if labels is None else self.hf_compute_loss(labels=SCREAMING_SNAKE_CASE__ ,logits=SCREAMING_SNAKE_CASE__) if not return_dict: __lowerCamelCase : Union[str, Any] = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=SCREAMING_SNAKE_CASE__ ,logits=SCREAMING_SNAKE_CASE__ ,hidden_states=outputs.hidden_states)
652
0
import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin _snake_case : Any = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece @require_tokenizers class a (_lowerCAmelCase , unittest.TestCase ): """simple docstring""" __UpperCAmelCase : Optional[Any] = XLMRobertaTokenizer __UpperCAmelCase : str = XLMRobertaTokenizerFast __UpperCAmelCase : List[str] = True __UpperCAmelCase : Tuple = True def __snake_case ( self : Tuple ) -> Union[str, Any]: super().setUp() # We have a SentencePiece fixture for testing __snake_case : Optional[int] = XLMRobertaTokenizer(lowerCamelCase , keep_accents=lowerCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) def __snake_case ( self : Tuple ) -> int: __snake_case : Optional[int] = "<pad>" __snake_case : Any = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase ) , lowerCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase ) , lowerCamelCase ) def __snake_case ( self : Optional[Any] ) -> int: __snake_case : Optional[int] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(vocab_keys[-1] , "<mask>" ) self.assertEqual(len(lowerCamelCase ) , 1002 ) def __snake_case ( self : Optional[Any] ) -> str: self.assertEqual(self.get_tokenizer().vocab_size , 1002 ) def __snake_case ( self : Dict ) -> List[Any]: __snake_case : Union[str, Any] = XLMRobertaTokenizer(lowerCamelCase , keep_accents=lowerCamelCase ) __snake_case : int = tokenizer.tokenize("This is a test" ) self.assertListEqual(lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCamelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) __snake_case : List[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( lowerCamelCase , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) __snake_case : int = tokenizer.convert_tokens_to_ids(lowerCamelCase ) self.assertListEqual( lowerCamelCase , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) __snake_case : Optional[int] = tokenizer.convert_ids_to_tokens(lowerCamelCase ) self.assertListEqual( lowerCamelCase , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) def __snake_case ( self : Optional[int] ) -> int: if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return __snake_case : List[str] = (self.rust_tokenizer_class, "hf-internal-testing/tiny-xlm-roberta", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): __snake_case : Optional[int] = self.rust_tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase ) __snake_case : Optional[Any] = self.tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase ) __snake_case : str = tempfile.mkdtemp() __snake_case : List[str] = tokenizer_r.save_pretrained(lowerCamelCase ) __snake_case : Any = tokenizer_p.save_pretrained(lowerCamelCase ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) ) __snake_case : Tuple = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f ) self.assertSequenceEqual(lowerCamelCase , lowerCamelCase ) # Checks everything loads correctly in the same way __snake_case : Any = tokenizer_r.from_pretrained(lowerCamelCase ) __snake_case : Optional[Any] = tokenizer_p.from_pretrained(lowerCamelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCamelCase , lowerCamelCase ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(lowerCamelCase ) # Save tokenizer rust, legacy_format=True __snake_case : str = tempfile.mkdtemp() __snake_case : int = tokenizer_r.save_pretrained(lowerCamelCase , legacy_format=lowerCamelCase ) __snake_case : Any = tokenizer_p.save_pretrained(lowerCamelCase ) # Checks it save with the same files self.assertSequenceEqual(lowerCamelCase , lowerCamelCase ) # Checks everything loads correctly in the same way __snake_case : Optional[int] = tokenizer_r.from_pretrained(lowerCamelCase ) __snake_case : Optional[int] = tokenizer_p.from_pretrained(lowerCamelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCamelCase , lowerCamelCase ) ) shutil.rmtree(lowerCamelCase ) # Save tokenizer rust, legacy_format=False __snake_case : Tuple = tempfile.mkdtemp() __snake_case : Union[str, Any] = tokenizer_r.save_pretrained(lowerCamelCase , legacy_format=lowerCamelCase ) __snake_case : Optional[Any] = tokenizer_p.save_pretrained(lowerCamelCase ) # Checks it saved the tokenizer.json file self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way __snake_case : Tuple = tokenizer_r.from_pretrained(lowerCamelCase ) __snake_case : Tuple = tokenizer_p.from_pretrained(lowerCamelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCamelCase , lowerCamelCase ) ) shutil.rmtree(lowerCamelCase ) @cached_property def __snake_case ( self : Union[str, Any] ) -> int: return XLMRobertaTokenizer.from_pretrained("xlm-roberta-base" ) def __snake_case ( self : List[str] ) -> Dict: with tempfile.NamedTemporaryFile() as f: shutil.copyfile(lowerCamelCase , f.name ) __snake_case : int = XLMRobertaTokenizer(f.name , keep_accents=lowerCamelCase ) __snake_case : Union[str, Any] = pickle.dumps(lowerCamelCase ) pickle.loads(lowerCamelCase ) def __snake_case ( self : List[str] ) -> Tuple: if not self.test_rust_tokenizer: return __snake_case : Dict = self.get_tokenizer() __snake_case : Any = self.get_rust_tokenizer() __snake_case : Dict = "I was born in 92000, and this is falsé." __snake_case : Dict = tokenizer.tokenize(lowerCamelCase ) __snake_case : Any = rust_tokenizer.tokenize(lowerCamelCase ) self.assertListEqual(lowerCamelCase , lowerCamelCase ) __snake_case : Union[str, Any] = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase ) __snake_case : str = rust_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase ) self.assertListEqual(lowerCamelCase , lowerCamelCase ) __snake_case : Any = self.get_rust_tokenizer() __snake_case : Dict = tokenizer.encode(lowerCamelCase ) __snake_case : int = rust_tokenizer.encode(lowerCamelCase ) self.assertListEqual(lowerCamelCase , lowerCamelCase ) @slow def __snake_case ( self : int ) -> List[str]: __snake_case : List[str] = "Hello World!" __snake_case : Any = [0, 35378, 6661, 38, 2] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(lowerCamelCase , self.big_tokenizer.encode(lowerCamelCase ) ) @slow def __snake_case ( self : Any ) -> Optional[Any]: __snake_case : Tuple = ( "This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will" " add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth" ) __snake_case : str = [ 0, 3293, 83, 10, 4552, 4989, 7986, 678, 10, 5915, 111, 179459, 124850, 4, 6044, 237, 12, 6, 5, 6, 4, 6780, 705, 15, 1388, 44, 378, 10114, 711, 152, 20, 6, 5, 22376, 642, 1221, 15190, 34153, 450, 5608, 959, 1119, 57702, 136, 186, 47, 1098, 29367, 47, # 4426, # What fairseq tokenizes from "<unk>": "_<" # 3678, # What fairseq tokenizes from "<unk>": "unk" # 2740, # What fairseq tokenizes from "<unk>": ">" 3, # What we tokenize from "<unk>": "<unk>" 6, # Residue from the tokenization: an extra sentencepiece underline 4, 6044, 237, 6284, 50901, 528, 31, 90, 34, 927, 2, ] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(lowerCamelCase , self.big_tokenizer.encode(lowerCamelCase ) ) @slow def __snake_case ( self : Optional[Any] ) -> Optional[int]: # fmt: off __snake_case : Union[str, Any] = {"input_ids": [[0, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [0, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCamelCase , model_name="xlm-roberta-base" , revision="d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3" , )
81
import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # Sentinels a =object() # For specifying empty leaf dict `{}` a =object() def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> int: __lowerCamelCase : Tuple = tuple((re.compile(x + '$' ) for x in qs) ) for i in range(len(lowerCamelCase__ ) - len(lowerCamelCase__ ) + 1 ): __lowerCamelCase : str = [x.match(lowerCamelCase__ ) for x, y in zip(lowerCamelCase__ , ks[i:] )] if matches and all(lowerCamelCase__ ): return True return False def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple: def replace(lowerCamelCase__ , lowerCamelCase__ ): for rule, replacement in rules: if _match(lowerCamelCase__ , lowerCamelCase__ ): return replacement return val return replace def SCREAMING_SNAKE_CASE__ ( ) -> str: return [ # embeddings (("transformer", "wpe", "embedding"), P('mp' , lowerCamelCase__ )), (("transformer", "wte", "embedding"), P('mp' , lowerCamelCase__ )), # atention (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(lowerCamelCase__ , 'mp' )), (("attention", "out_proj", "kernel"), P('mp' , lowerCamelCase__ )), (("attention", "out_proj", "bias"), None), # mlp (("mlp", "c_fc", "kernel"), P(lowerCamelCase__ , 'mp' )), (("mlp", "c_fc", "bias"), P('mp' )), (("mlp", "c_proj", "kernel"), P('mp' , lowerCamelCase__ )), (("mlp", "c_proj", "bias"), None), # layer norms ((r"ln_\d+", "bias"), None), ((r"\d+", r"ln_\d+", "scale"), None), (("ln_f", "bias"), None), (("ln_f", "scale"), None), ] def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str: __lowerCamelCase : List[str] = _get_partition_rules() __lowerCamelCase : Optional[Any] = _replacement_rules(lowerCamelCase__ ) __lowerCamelCase : Tuple = {k: _unmatched for k in flatten_dict(lowerCamelCase__ )} __lowerCamelCase : List[Any] = {k: replace(lowerCamelCase__ , lowerCamelCase__ ) for k, v in initd.items()} assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(lowerCamelCase__ ) )
652
0
"""simple docstring""" import unittest import numpy as np def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , ): UpperCAmelCase_ = np.shape(lowerCAmelCase__ ) UpperCAmelCase_ = np.shape(lowerCAmelCase__ ) UpperCAmelCase_ = np.shape(lowerCAmelCase__ ) if shape_a[0] != shape_b[0]: UpperCAmelCase_ = ( "Expected the same number of rows for A and B. " f"""Instead found A of size {shape_a} and B of size {shape_b}""" ) raise ValueError(lowerCAmelCase__ ) if shape_b[1] != shape_c[1]: UpperCAmelCase_ = ( "Expected the same number of columns for B and C. " f"""Instead found B of size {shape_b} and C of size {shape_c}""" ) raise ValueError(lowerCAmelCase__ ) UpperCAmelCase_ = pseudo_inv if a_inv is None: try: UpperCAmelCase_ = np.linalg.inv(lowerCAmelCase__ ) except np.linalg.LinAlgError: raise ValueError( "Input matrix A is not invertible. Cannot compute Schur complement." ) return mat_c - mat_b.T @ a_inv @ mat_b class lowercase__ ( unittest.TestCase ): '''simple docstring''' def lowercase__ ( self : Any ) -> None: '''simple docstring''' UpperCAmelCase_ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) UpperCAmelCase_ = np.array([[0, 3], [3, 0], [2, 3]] ) UpperCAmelCase_ = np.array([[2, 1], [6, 3]] ) UpperCAmelCase_ = schur_complement(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase_ = np.block([[a, b], [b.T, c]] ) UpperCAmelCase_ = np.linalg.det(_UpperCAmelCase ) UpperCAmelCase_ = np.linalg.det(_UpperCAmelCase ) UpperCAmelCase_ = np.linalg.det(_UpperCAmelCase ) self.assertAlmostEqual(_UpperCAmelCase , det_a * det_s ) def lowercase__ ( self : Tuple ) -> None: '''simple docstring''' UpperCAmelCase_ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) UpperCAmelCase_ = np.array([[0, 3], [3, 0], [2, 3]] ) UpperCAmelCase_ = np.array([[2, 1], [6, 3]] ) with self.assertRaises(_UpperCAmelCase ): schur_complement(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) def lowercase__ ( self : Tuple ) -> None: '''simple docstring''' UpperCAmelCase_ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) UpperCAmelCase_ = np.array([[0, 3], [3, 0], [2, 3]] ) UpperCAmelCase_ = np.array([[2, 1, 3], [6, 3, 5]] ) with self.assertRaises(_UpperCAmelCase ): schur_complement(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod() unittest.main()
82
import math def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> list: __lowerCamelCase : Union[str, Any] = [True] * n __lowerCamelCase : List[Any] = False __lowerCamelCase : int = False __lowerCamelCase : Any = True for i in range(3 , int(n**0.5 + 1 ) , 2 ): __lowerCamelCase : List[str] = i * 2 while index < n: __lowerCamelCase : Optional[int] = False __lowerCamelCase : List[Any] = index + i __lowerCamelCase : Optional[Any] = [2] for i in range(3 , lowerCamelCase__ , 2 ): if is_prime[i]: primes.append(lowerCamelCase__ ) return primes def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = 9_9_9_9_6_6_6_6_3_3_3_3 ) -> int: __lowerCamelCase : Optional[Any] = math.floor(math.sqrt(lowerCamelCase__ ) ) + 1_0_0 __lowerCamelCase : Dict = prime_sieve(lowerCamelCase__ ) __lowerCamelCase : Tuple = 0 __lowerCamelCase : Dict = 0 __lowerCamelCase : Any = primes[prime_index] while (last_prime**2) <= limit: __lowerCamelCase : Any = primes[prime_index + 1] __lowerCamelCase : Optional[Any] = last_prime**2 __lowerCamelCase : Dict = next_prime**2 # Get numbers divisible by lps(current) __lowerCamelCase : Tuple = lower_bound + last_prime while upper_bound > current <= limit: matches_sum += current current += last_prime # Reset the upper_bound while (upper_bound - next_prime) > limit: upper_bound -= next_prime # Add the numbers divisible by ups(current) __lowerCamelCase : Any = upper_bound - next_prime while current > lower_bound: matches_sum += current current -= next_prime # Remove the numbers divisible by both ups and lps __lowerCamelCase : List[Any] = 0 while upper_bound > current <= limit: if current <= lower_bound: # Increment the current number current += last_prime * next_prime continue if current > limit: break # Remove twice since it was added by both ups and lps matches_sum -= current * 2 # Increment the current number current += last_prime * next_prime # Setup for next pair __lowerCamelCase : Dict = next_prime prime_index += 1 return matches_sum if __name__ == "__main__": print(solution())
652
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { '''tiiuae/falcon-40b''': '''https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json''', '''tiiuae/falcon-7b''': '''https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json''', } class __snake_case ( _lowercase): snake_case__ : List[Any] = "falcon" snake_case__ : Any = ["past_key_values"] def __init__( self : Any , __lowerCAmelCase : Union[str, Any]=6_5_0_2_4 , __lowerCAmelCase : Optional[Any]=4_5_4_4 , __lowerCAmelCase : Optional[Any]=3_2 , __lowerCAmelCase : List[Any]=7_1 , __lowerCAmelCase : Any=1E-5 , __lowerCAmelCase : Dict=0.02 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : List[str]=0.0 , __lowerCAmelCase : int=None , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Dict=False , __lowerCAmelCase : int=True , __lowerCAmelCase : str=True , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : int=1_1 , __lowerCAmelCase : Optional[Any]=1_1 , **__lowerCAmelCase : Dict , ): """simple docstring""" _lowerCamelCase : Union[str, Any] = vocab_size # Backward compatibility with n_embed kwarg _lowerCamelCase : Any = kwargs.pop('''n_embed''' , __lowerCAmelCase ) _lowerCamelCase : Tuple = hidden_size if n_embed is None else n_embed _lowerCamelCase : Optional[int] = num_hidden_layers _lowerCamelCase : Any = num_attention_heads _lowerCamelCase : Dict = layer_norm_epsilon _lowerCamelCase : int = initializer_range _lowerCamelCase : List[Any] = use_cache _lowerCamelCase : Tuple = hidden_dropout _lowerCamelCase : List[str] = attention_dropout _lowerCamelCase : List[str] = bos_token_id _lowerCamelCase : Optional[Any] = eos_token_id _lowerCamelCase : Optional[int] = num_attention_heads if num_kv_heads is None else num_kv_heads _lowerCamelCase : Optional[Any] = alibi _lowerCamelCase : str = new_decoder_architecture _lowerCamelCase : Union[str, Any] = multi_query # Ignored when new_decoder_architecture is True _lowerCamelCase : List[Any] = parallel_attn _lowerCamelCase : str = bias super().__init__(bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase ) @property def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" return self.hidden_size // self.num_attention_heads @property def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" return not self.alibi
83
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin, SchedulerOutput @dataclass class A_ ( SCREAMING_SNAKE_CASE ): _UpperCAmelCase : torch.FloatTensor _UpperCAmelCase : torch.FloatTensor class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): _UpperCAmelCase : Dict = 1 @register_to_config def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : int = 2_0_0_0 ,SCREAMING_SNAKE_CASE__ : float = 0.15 ,SCREAMING_SNAKE_CASE__ : float = 0.01 ,SCREAMING_SNAKE_CASE__ : float = 1348.0 ,SCREAMING_SNAKE_CASE__ : float = 1E-5 ,SCREAMING_SNAKE_CASE__ : int = 1 ,): # standard deviation of the initial noise distribution __lowerCamelCase : int = sigma_max # setable values __lowerCamelCase : List[str] = None self.set_sigmas(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : Optional[int] = None): return sample def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : float = None ,SCREAMING_SNAKE_CASE__ : Union[str, torch.device] = None): __lowerCamelCase : Dict = sampling_eps if sampling_eps is not None else self.config.sampling_eps __lowerCamelCase : Optional[int] = torch.linspace(1 ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,device=SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : float = None ,SCREAMING_SNAKE_CASE__ : float = None ,SCREAMING_SNAKE_CASE__ : float = None): __lowerCamelCase : Optional[int] = sigma_min if sigma_min is not None else self.config.sigma_min __lowerCamelCase : Optional[int] = sigma_max if sigma_max is not None else self.config.sigma_max __lowerCamelCase : Any = sampling_eps if sampling_eps is not None else self.config.sampling_eps if self.timesteps is None: self.set_timesteps(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[int] = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps) __lowerCamelCase : Optional[Any] = torch.exp(torch.linspace(math.log(SCREAMING_SNAKE_CASE__) ,math.log(SCREAMING_SNAKE_CASE__) ,SCREAMING_SNAKE_CASE__)) __lowerCamelCase : str = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps]) def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : List[str]): return torch.where( timesteps == 0 ,torch.zeros_like(t.to(timesteps.device)) ,self.discrete_sigmas[timesteps - 1].to(timesteps.device) ,) def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : Optional[torch.Generator] = None ,SCREAMING_SNAKE_CASE__ : bool = True ,): if self.timesteps is None: raise ValueError( '`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler') __lowerCamelCase : List[str] = timestep * torch.ones( sample.shape[0] ,device=sample.device) # torch.repeat_interleave(timestep, sample.shape[0]) __lowerCamelCase : str = (timestep * (len(self.timesteps) - 1)).long() # mps requires indices to be in the same device, so we use cpu as is the default with cuda __lowerCamelCase : Dict = timesteps.to(self.discrete_sigmas.device) __lowerCamelCase : Optional[Any] = self.discrete_sigmas[timesteps].to(sample.device) __lowerCamelCase : Optional[Any] = self.get_adjacent_sigma(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__).to(sample.device) __lowerCamelCase : int = torch.zeros_like(SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[Any] = (sigma**2 - adjacent_sigma**2) ** 0.5 # equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x) # also equation 47 shows the analog from SDE models to ancestral sampling methods __lowerCamelCase : int = diffusion.flatten() while len(diffusion.shape) < len(sample.shape): __lowerCamelCase : List[Any] = diffusion.unsqueeze(-1) __lowerCamelCase : Any = drift - diffusion**2 * model_output # equation 6: sample noise for the diffusion term of __lowerCamelCase : int = randn_tensor( sample.shape ,layout=sample.layout ,generator=SCREAMING_SNAKE_CASE__ ,device=sample.device ,dtype=sample.dtype) __lowerCamelCase : Optional[int] = sample - drift # subtract because `dt` is a small negative timestep # TODO is the variable diffusion the correct scaling term for the noise? __lowerCamelCase : Union[str, Any] = prev_sample_mean + diffusion * noise # add impact of diffusion field g if not return_dict: return (prev_sample, prev_sample_mean) return SdeVeOutput(prev_sample=SCREAMING_SNAKE_CASE__ ,prev_sample_mean=SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : Optional[torch.Generator] = None ,SCREAMING_SNAKE_CASE__ : bool = True ,): if self.timesteps is None: raise ValueError( '`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler') # For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z" # sample noise for correction __lowerCamelCase : Optional[int] = randn_tensor(sample.shape ,layout=sample.layout ,generator=SCREAMING_SNAKE_CASE__).to(sample.device) # compute step size from the model_output, the noise, and the snr __lowerCamelCase : str = torch.norm(model_output.reshape(model_output.shape[0] ,-1) ,dim=-1).mean() __lowerCamelCase : Tuple = torch.norm(noise.reshape(noise.shape[0] ,-1) ,dim=-1).mean() __lowerCamelCase : Tuple = (self.config.snr * noise_norm / grad_norm) ** 2 * 2 __lowerCamelCase : Optional[int] = step_size * torch.ones(sample.shape[0]).to(sample.device) # self.repeat_scalar(step_size, sample.shape[0]) # compute corrected sample: model_output term and noise term __lowerCamelCase : Union[str, Any] = step_size.flatten() while len(step_size.shape) < len(sample.shape): __lowerCamelCase : List[str] = step_size.unsqueeze(-1) __lowerCamelCase : str = sample + step_size * model_output __lowerCamelCase : Any = prev_sample_mean + ((step_size * 2) ** 0.5) * noise if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,): # Make sure sigmas and timesteps have the same device and dtype as original_samples __lowerCamelCase : int = timesteps.to(original_samples.device) __lowerCamelCase : Any = self.discrete_sigmas.to(original_samples.device)[timesteps] __lowerCamelCase : Optional[Any] = ( noise * sigmas[:, None, None, None] if noise is not None else torch.randn_like(SCREAMING_SNAKE_CASE__) * sigmas[:, None, None, None] ) __lowerCamelCase : str = noise + original_samples return noisy_samples def __len__( self : Optional[int]): return self.config.num_train_timesteps
652
0
import random import unittest import numpy as np import transformers from transformers import is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax if is_flax_available(): import os import jax.numpy as jnp from jax import jit from transformers import AutoTokenizer, FlaxAutoModelForCausalLM from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model UpperCAmelCase = '''0.12''' # assumed parallelism: 8 if is_torch_available(): import torch def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ): if rng is None: lowercase = random.Random() lowercase = 1 for dim in shape: total_dims *= dim lowercase = [] for _ in range(__SCREAMING_SNAKE_CASE ): values.append(rng.randint(0 , vocab_size - 1 ) ) lowercase = np.array(__SCREAMING_SNAKE_CASE , dtype=jnp.intaa ).reshape(__SCREAMING_SNAKE_CASE ) return output def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ): lowercase = ids_tensor(__SCREAMING_SNAKE_CASE , vocab_size=2 , rng=__SCREAMING_SNAKE_CASE ) # make sure that at least one token is attended to for each batch lowercase = 1 return attn_mask @require_flax class A_ : '''simple docstring''' _UpperCamelCase : int = None _UpperCamelCase : Any = () def SCREAMING_SNAKE_CASE__ ( self ): lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common() # cut to half length & take max batch_size 3 lowercase = 2 lowercase = inputs['input_ids'].shape[-1] // 2 lowercase = inputs['input_ids'][:max_batch_size, :sequence_length] lowercase = jnp.ones_like(snake_case ) lowercase = attention_mask[:max_batch_size, :sequence_length] # generate max 5 tokens lowercase = input_ids.shape[-1] + 5 if config.eos_token_id is not None and config.pad_token_id is None: # hack to allow generate for models such as GPT2 as is done in `generate()` lowercase = config.eos_token_id return config, input_ids, attention_mask, max_length @is_pt_flax_cross_test def SCREAMING_SNAKE_CASE__ ( self ): lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config() lowercase = False lowercase = max_length lowercase = 0 for model_class in self.all_generative_model_classes: lowercase = model_class(snake_case ) lowercase = model_class.__name__[4:] # Skip the "Flax" at the beginning lowercase = getattr(snake_case , snake_case ) lowercase = pt_model_class(snake_case ).eval() lowercase = load_flax_weights_in_pytorch_model(snake_case , flax_model.params ) lowercase = flax_model.generate(snake_case ).sequences lowercase = pt_model.generate(torch.tensor(snake_case , dtype=torch.long ) ) if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]: lowercase = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]] self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() ) def SCREAMING_SNAKE_CASE__ ( self ): lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config() lowercase = False lowercase = max_length for model_class in self.all_generative_model_classes: lowercase = model_class(snake_case ) lowercase = model.generate(snake_case ).sequences self.assertEqual(generation_outputs.shape[-1] , snake_case ) lowercase = jit(model.generate ) lowercase = jit_generate(snake_case ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def SCREAMING_SNAKE_CASE__ ( self ): lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config() lowercase = True lowercase = max_length for model_class in self.all_generative_model_classes: lowercase = model_class(snake_case ) lowercase = model.generate(snake_case ).sequences self.assertEqual(generation_outputs.shape[-1] , snake_case ) lowercase = jit(model.generate ) lowercase = jit_generate(snake_case ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def SCREAMING_SNAKE_CASE__ ( self ): lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config() lowercase = False lowercase = max_length lowercase = 2 for model_class in self.all_generative_model_classes: lowercase = model_class(snake_case ) lowercase = model.generate(snake_case ).sequences self.assertEqual(generation_outputs.shape[-1] , snake_case ) lowercase = jit(model.generate ) lowercase = jit_generate(snake_case ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def SCREAMING_SNAKE_CASE__ ( self ): lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config() lowercase = False lowercase = max_length lowercase = 2 lowercase = 2 for model_class in self.all_generative_model_classes: lowercase = model_class(snake_case ) lowercase = model.generate(snake_case ).sequences self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences ) def SCREAMING_SNAKE_CASE__ ( self ): lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config() lowercase = True lowercase = max_length lowercase = 0.8 lowercase = 10 lowercase = 0.3 lowercase = 1 lowercase = 8 lowercase = 9 for model_class in self.all_generative_model_classes: lowercase = model_class(snake_case ) lowercase = model.generate(snake_case ).sequences self.assertEqual(generation_outputs.shape[-1] , snake_case ) lowercase = jit(model.generate ) lowercase = jit_generate(snake_case ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def SCREAMING_SNAKE_CASE__ ( self ): lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config() lowercase = max_length lowercase = 1 lowercase = 8 lowercase = 9 for model_class in self.all_generative_model_classes: lowercase = model_class(snake_case ) lowercase = model.generate(snake_case ).sequences self.assertEqual(generation_outputs.shape[-1] , snake_case ) lowercase = jit(model.generate ) lowercase = jit_generate(snake_case ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def SCREAMING_SNAKE_CASE__ ( self ): lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config() lowercase = max_length lowercase = 2 lowercase = 1 lowercase = 8 lowercase = 9 for model_class in self.all_generative_model_classes: lowercase = model_class(snake_case ) lowercase = model.generate(snake_case ).sequences self.assertEqual(generation_outputs.shape[-1] , snake_case ) lowercase = jit(model.generate ) lowercase = jit_generate(snake_case ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def SCREAMING_SNAKE_CASE__ ( self ): lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config() # pad attention mask on the left lowercase = attention_mask.at[(0, 0)].set(0 ) lowercase = False lowercase = max_length for model_class in self.all_generative_model_classes: lowercase = model_class(snake_case ) lowercase = model.generate(snake_case , attention_mask=snake_case ).sequences self.assertEqual(generation_outputs.shape[-1] , snake_case ) lowercase = jit(model.generate ) lowercase = jit_generate(snake_case , attention_mask=snake_case ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def SCREAMING_SNAKE_CASE__ ( self ): lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config() # pad attention mask on the left lowercase = attention_mask.at[(0, 0)].set(0 ) lowercase = True lowercase = max_length for model_class in self.all_generative_model_classes: lowercase = model_class(snake_case ) lowercase = model.generate(snake_case , attention_mask=snake_case ).sequences self.assertEqual(generation_outputs.shape[-1] , snake_case ) lowercase = jit(model.generate ) lowercase = jit_generate(snake_case , attention_mask=snake_case ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def SCREAMING_SNAKE_CASE__ ( self ): lowercase , lowercase , lowercase , lowercase = self._get_input_ids_and_config() # pad attention mask on the left lowercase = attention_mask.at[(0, 0)].set(0 ) lowercase = 2 lowercase = max_length for model_class in self.all_generative_model_classes: lowercase = model_class(snake_case ) lowercase = model.generate(snake_case , attention_mask=snake_case ).sequences self.assertEqual(generation_outputs.shape[-1] , snake_case ) lowercase = jit(model.generate ) lowercase = jit_generate(snake_case , attention_mask=snake_case ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) @require_flax class A_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self ): lowercase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-bert' ) lowercase = FlaxAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-bert-flax-only' ) lowercase = 'Hello world' lowercase = tokenizer(snake_case , return_tensors='np' ).input_ids # typos are quickly detected (the correct argument is `do_sample`) with self.assertRaisesRegex(snake_case , 'do_samples' ): model.generate(snake_case , do_samples=snake_case ) # arbitrary arguments that will not be used anywhere are also not accepted with self.assertRaisesRegex(snake_case , 'foo' ): lowercase = {'foo': 'bar'} model.generate(snake_case , **snake_case )
84
import logging import numpy as np import pytest from scipy.linalg import eigh logging.basicConfig(level=logging.INFO, format="""%(message)s""") def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> np.ndarray: return input_array.reshape((input_array.size, 1) ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray: __lowerCamelCase : str = np.nan for i in range(lowerCamelCase__ ): __lowerCamelCase : int = features[:, labels == i] __lowerCamelCase : Optional[int] = data.mean(1 ) # Centralize the data of class i __lowerCamelCase : int = data - column_reshape(lowerCamelCase__ ) if i > 0: # If covariance_sum is not None covariance_sum += np.dot(lowerCamelCase__ , centered_data.T ) else: # If covariance_sum is np.nan (i.e. first loop) __lowerCamelCase : Union[str, Any] = np.dot(lowerCamelCase__ , centered_data.T ) return covariance_sum / features.shape[1] def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray: __lowerCamelCase : Optional[Any] = features.mean(1 ) __lowerCamelCase : Union[str, Any] = np.nan for i in range(lowerCamelCase__ ): __lowerCamelCase : Optional[Any] = features[:, labels == i] __lowerCamelCase : Union[str, Any] = data.shape[1] __lowerCamelCase : Union[str, Any] = data.mean(1 ) if i > 0: # If covariance_sum is not None covariance_sum += device_data * np.dot( column_reshape(lowerCamelCase__ ) - column_reshape(lowerCamelCase__ ) , (column_reshape(lowerCamelCase__ ) - column_reshape(lowerCamelCase__ )).T , ) else: # If covariance_sum is np.nan (i.e. first loop) __lowerCamelCase : List[str] = device_data * np.dot( column_reshape(lowerCamelCase__ ) - column_reshape(lowerCamelCase__ ) , (column_reshape(lowerCamelCase__ ) - column_reshape(lowerCamelCase__ )).T , ) return covariance_sum / features.shape[1] def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray: # Check if the features have been loaded if features.any(): __lowerCamelCase : Tuple = features.mean(1 ) # Center the dataset __lowerCamelCase : Any = features - np.reshape(lowerCamelCase__ , (data_mean.size, 1) ) __lowerCamelCase : Optional[int] = np.dot(lowerCamelCase__ , centered_data.T ) / features.shape[1] __lowerCamelCase , __lowerCamelCase : List[Any] = np.linalg.eigh(lowerCamelCase__ ) # Take all the columns in the reverse order (-1), and then takes only the first __lowerCamelCase : Dict = eigenvectors[:, ::-1][:, 0:dimensions] # Project the database on the new space __lowerCamelCase : int = np.dot(filtered_eigenvectors.T , lowerCamelCase__ ) logging.info('Principal Component Analysis computed' ) return projected_data else: logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=lowerCamelCase__ ) logging.error('Dataset empty' ) raise AssertionError def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray: assert classes > dimensions # Check if features have been already loaded if features.any: __lowerCamelCase , __lowerCamelCase : Dict = eigh( covariance_between_classes(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) , covariance_within_classes(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) , ) __lowerCamelCase : Union[str, Any] = eigenvectors[:, ::-1][:, :dimensions] __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = np.linalg.svd(lowerCamelCase__ ) __lowerCamelCase : int = svd_matrix[:, 0:dimensions] __lowerCamelCase : Optional[int] = np.dot(filtered_svd_matrix.T , lowerCamelCase__ ) logging.info('Linear Discriminant Analysis computed' ) return projected_data else: logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=lowerCamelCase__ ) logging.error('Dataset empty' ) raise AssertionError def SCREAMING_SNAKE_CASE__ ( ) -> None: # Create dummy dataset with 2 classes and 3 features __lowerCamelCase : Optional[int] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] ) __lowerCamelCase : Optional[int] = np.array([0, 0, 0, 1, 1] ) __lowerCamelCase : Optional[Any] = 2 __lowerCamelCase : Tuple = 2 # Assert that the function raises an AssertionError if dimensions > classes with pytest.raises(lowerCamelCase__ ) as error_info: __lowerCamelCase : int = linear_discriminant_analysis( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) if isinstance(lowerCamelCase__ , np.ndarray ): raise AssertionError( 'Did not raise AssertionError for dimensions > classes' ) assert error_info.type is AssertionError def SCREAMING_SNAKE_CASE__ ( ) -> None: __lowerCamelCase : Dict = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] ) __lowerCamelCase : Dict = 2 __lowerCamelCase : int = np.array([[6.9282_0323, 8.6602_5404, 10.3923_0485], [3.0, 3.0, 3.0]] ) with pytest.raises(lowerCamelCase__ ) as error_info: __lowerCamelCase : Optional[Any] = principal_component_analysis(lowerCamelCase__ , lowerCamelCase__ ) if not np.allclose(lowerCamelCase__ , lowerCamelCase__ ): raise AssertionError assert error_info.type is AssertionError if __name__ == "__main__": import doctest doctest.testmod()
652
0
from math import pow def _a ( lowercase__ : int , lowercase__ : int , lowercase__ : int , lowercase__ : int , lowercase__ : int , ): '''simple docstring''' if current_sum == needed_sum: # If the sum of the powers is equal to needed_sum, then we have a solution. solutions_count += 1 return current_sum, solutions_count SCREAMING_SNAKE_CASE__ : Dict = int(pow(lowercase__ , lowercase__ ) ) if current_sum + i_to_n <= needed_sum: # If the sum of the powers is less than needed_sum, then continue adding powers. current_sum += i_to_n SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = backtrack( lowercase__ , lowercase__ , current_number + 1 , lowercase__ , lowercase__ ) current_sum -= i_to_n if i_to_n < needed_sum: # If the power of i is less than needed_sum, then try with the next power. SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = backtrack( lowercase__ , lowercase__ , current_number + 1 , lowercase__ , lowercase__ ) return current_sum, solutions_count def _a ( lowercase__ : int , lowercase__ : int ): '''simple docstring''' if not (1 <= needed_sum <= 10_00 and 2 <= power <= 10): raise ValueError( 'Invalid input\n' 'needed_sum must be between 1 and 1000, power between 2 and 10.' ) return backtrack(lowercase__ , lowercase__ , 1 , 0 , 0 )[1] # Return the solutions_count if __name__ == "__main__": import doctest doctest.testmod()
85
import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow a =logging.getLogger() @unittest.skip('''Temporarily disable the doc tests.''' ) @require_torch @require_tf @slow class A_ ( unittest.TestCase ): def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Path ,SCREAMING_SNAKE_CASE__ : Union[str, None] = None ,SCREAMING_SNAKE_CASE__ : Union[List[str], None] = None ,SCREAMING_SNAKE_CASE__ : Union[str, List[str], None] = None ,SCREAMING_SNAKE_CASE__ : bool = True ,): __lowerCamelCase : List[str] = [file for file in os.listdir(SCREAMING_SNAKE_CASE__) if os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__))] if identifier is not None: __lowerCamelCase : str = [file for file in files if identifier in file] if n_identifier is not None: if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__): for n_ in n_identifier: __lowerCamelCase : Optional[int] = [file for file in files if n_ not in file] else: __lowerCamelCase : Dict = [file for file in files if n_identifier not in file] __lowerCamelCase : str = ignore_files or [] ignore_files.append('__init__.py') __lowerCamelCase : Tuple = [file for file in files if file not in ignore_files] for file in files: # Open all files print('Testing' ,SCREAMING_SNAKE_CASE__) if only_modules: __lowerCamelCase : Optional[int] = file.split('.')[0] try: __lowerCamelCase : Optional[Any] = getattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[str] = doctest.DocTestSuite(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[int] = unittest.TextTestRunner().run(SCREAMING_SNAKE_CASE__) self.assertIs(len(result.failures) ,0) except AttributeError: logger.info(F"{module_identifier} is not a module.") else: __lowerCamelCase : int = doctest.testfile(str('..' / directory / file) ,optionflags=doctest.ELLIPSIS) self.assertIs(result.failed ,0) def lowerCAmelCase ( self : List[Any]): __lowerCamelCase : Dict = Path('src/transformers') __lowerCamelCase : Any = 'modeling' __lowerCamelCase : Dict = [ 'modeling_ctrl.py', 'modeling_tf_ctrl.py', ] self.analyze_directory(SCREAMING_SNAKE_CASE__ ,identifier=SCREAMING_SNAKE_CASE__ ,ignore_files=SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Dict): __lowerCamelCase : Tuple = Path('src/transformers') __lowerCamelCase : Optional[int] = 'tokenization' self.analyze_directory(SCREAMING_SNAKE_CASE__ ,identifier=SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Tuple): __lowerCamelCase : List[Any] = Path('src/transformers') __lowerCamelCase : str = 'configuration' self.analyze_directory(SCREAMING_SNAKE_CASE__ ,identifier=SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : int): __lowerCamelCase : Dict = Path('src/transformers') __lowerCamelCase : Any = ['configuration', 'modeling', 'tokenization'] self.analyze_directory(SCREAMING_SNAKE_CASE__ ,n_identifier=SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : int): __lowerCamelCase : List[Any] = Path('docs/source') __lowerCamelCase : str = ['favicon.ico'] self.analyze_directory(SCREAMING_SNAKE_CASE__ ,ignore_files=SCREAMING_SNAKE_CASE__ ,only_modules=SCREAMING_SNAKE_CASE__)
652
0
import os import re import sys import traceback import warnings from pathlib import Path from typing import Dict, Optional, Union from uuid import uuida from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami from huggingface_hub.file_download import REGEX_COMMIT_HASH from huggingface_hub.utils import ( EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, is_jinja_available, ) from packaging import version from requests import HTTPError from .. import __version__ from .constants import ( DEPRECATED_REVISION_ARGS, DIFFUSERS_CACHE, HUGGINGFACE_CO_RESOLVE_ENDPOINT, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, ) from .import_utils import ( ENV_VARS_TRUE_VALUES, _flax_version, _jax_version, _onnxruntime_version, _torch_version, is_flax_available, is_onnx_available, is_torch_available, ) from .logging import get_logger __a :Dict = get_logger(__name__) __a :int = Path(__file__).parent / 'model_card_template.md' __a :int = uuida().hex __a :Optional[Any] = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES __a :List[Any] = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES __a :List[Any] = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/' def __snake_case ( __UpperCamelCase : Union[Dict, str, None] = None ): """simple docstring""" A_ = f'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}''' if DISABLE_TELEMETRY or HF_HUB_OFFLINE: return ua + "; telemetry/off" if is_torch_available(): ua += f'''; torch/{_torch_version}''' if is_flax_available(): ua += f'''; jax/{_jax_version}''' ua += f'''; flax/{_flax_version}''' if is_onnx_available(): ua += f'''; onnxruntime/{_onnxruntime_version}''' # CI will set this value to True if os.environ.get("DIFFUSERS_IS_CI" ,"" ).upper() in ENV_VARS_TRUE_VALUES: ua += "; is_ci/true" if isinstance(__UpperCamelCase ,__UpperCamelCase ): ua += "; " + "; ".join(f'''{k}/{v}''' for k, v in user_agent.items() ) elif isinstance(__UpperCamelCase ,__UpperCamelCase ): ua += "; " + user_agent return ua def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Optional[str] = None ,__UpperCamelCase : Optional[str] = None ): """simple docstring""" if token is None: A_ = HfFolder.get_token() if organization is None: A_ = whoami(__UpperCamelCase )["name"] return f'''{username}/{model_id}''' else: return f'''{organization}/{model_id}''' def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Dict ): """simple docstring""" if not is_jinja_available(): raise ValueError( "Modelcard rendering is based on Jinja templates." " Please make sure to have `jinja` installed before using `create_model_card`." " To install it, please run `pip install Jinja2`." ) if hasattr(__UpperCamelCase ,"local_rank" ) and args.local_rank not in [-1, 0]: return A_ = args.hub_token if hasattr(__UpperCamelCase ,"hub_token" ) else None A_ = get_full_repo_name(__UpperCamelCase ,token=__UpperCamelCase ) A_ = ModelCard.from_template( card_data=ModelCardData( # Card metadata object that will be converted to YAML block language="en" ,license="apache-2.0" ,library_name="diffusers" ,tags=[] ,datasets=args.dataset_name ,metrics=[] ,) ,template_path=__UpperCamelCase ,model_name=__UpperCamelCase ,repo_name=__UpperCamelCase ,dataset_name=args.dataset_name if hasattr(__UpperCamelCase ,"dataset_name" ) else None ,learning_rate=args.learning_rate ,train_batch_size=args.train_batch_size ,eval_batch_size=args.eval_batch_size ,gradient_accumulation_steps=( args.gradient_accumulation_steps if hasattr(__UpperCamelCase ,"gradient_accumulation_steps" ) else None ) ,adam_betaa=args.adam_betaa if hasattr(__UpperCamelCase ,"adam_beta1" ) else None ,adam_betaa=args.adam_betaa if hasattr(__UpperCamelCase ,"adam_beta2" ) else None ,adam_weight_decay=args.adam_weight_decay if hasattr(__UpperCamelCase ,"adam_weight_decay" ) else None ,adam_epsilon=args.adam_epsilon if hasattr(__UpperCamelCase ,"adam_epsilon" ) else None ,lr_scheduler=args.lr_scheduler if hasattr(__UpperCamelCase ,"lr_scheduler" ) else None ,lr_warmup_steps=args.lr_warmup_steps if hasattr(__UpperCamelCase ,"lr_warmup_steps" ) else None ,ema_inv_gamma=args.ema_inv_gamma if hasattr(__UpperCamelCase ,"ema_inv_gamma" ) else None ,ema_power=args.ema_power if hasattr(__UpperCamelCase ,"ema_power" ) else None ,ema_max_decay=args.ema_max_decay if hasattr(__UpperCamelCase ,"ema_max_decay" ) else None ,mixed_precision=args.mixed_precision ,) A_ = os.path.join(args.output_dir ,"README.md" ) model_card.save(__UpperCamelCase ) def __snake_case ( __UpperCamelCase : Optional[str] ,__UpperCamelCase : Optional[str] = None ): """simple docstring""" if resolved_file is None or commit_hash is not None: return commit_hash A_ = str(Path(__UpperCamelCase ).as_posix() ) A_ = re.search(R"snapshots/([^/]+)/" ,__UpperCamelCase ) if search is None: return None A_ = search.groups()[0] return commit_hash if REGEX_COMMIT_HASH.match(__UpperCamelCase ) else None # Old default cache path, potentially to be migrated. # This logic was more or less taken from `transformers`, with the following differences: # - Diffusers doesn't use custom environment variables to specify the cache path. # - There is no need to migrate the cache format, just move the files to the new location. __a :Optional[int] = os.path.expanduser( os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface')) ) __a :Union[str, Any] = os.path.join(hf_cache_home, 'diffusers') def __snake_case ( __UpperCamelCase : Optional[str] = None ,__UpperCamelCase : Optional[str] = None ): """simple docstring""" if new_cache_dir is None: A_ = DIFFUSERS_CACHE if old_cache_dir is None: A_ = old_diffusers_cache A_ = Path(__UpperCamelCase ).expanduser() A_ = Path(__UpperCamelCase ).expanduser() for old_blob_path in old_cache_dir.glob("**/blobs/*" ): if old_blob_path.is_file() and not old_blob_path.is_symlink(): A_ = new_cache_dir / old_blob_path.relative_to(__UpperCamelCase ) new_blob_path.parent.mkdir(parents=__UpperCamelCase ,exist_ok=__UpperCamelCase ) os.replace(__UpperCamelCase ,__UpperCamelCase ) try: os.symlink(__UpperCamelCase ,__UpperCamelCase ) except OSError: logger.warning( "Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." ) # At this point, old_cache_dir contains symlinks to the new cache (it can still be used). __a :str = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt') if not os.path.isfile(cache_version_file): __a :Dict = 0 else: with open(cache_version_file) as f: try: __a :str = int(f.read()) except ValueError: __a :List[str] = 0 if cache_version < 1: __a :Tuple = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0 if old_cache_is_not_empty: logger.warning( 'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your ' 'existing cached models. This is a one-time operation, you can interrupt it or run it ' 'later by calling `diffusers.utils.hub_utils.move_cache()`.' ) try: move_cache() except Exception as e: __a :List[Any] = '\n'.join(traceback.format_tb(e.__traceback__)) logger.error( F"There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease " 'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole ' 'message and we will do our best to help.' ) if cache_version < 1: try: os.makedirs(DIFFUSERS_CACHE, exist_ok=True) with open(cache_version_file, 'w') as f: f.write('1') except Exception: logger.warning( F"There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure " 'the directory exists and can be written to.' ) def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Optional[str] = None ): """simple docstring""" if variant is not None: A_ = weights_name.split("." ) A_ = splits[:-1] + [variant] + splits[-1:] A_ = ".".join(__UpperCamelCase ) return weights_name def __snake_case ( __UpperCamelCase : Any ,*, __UpperCamelCase : List[Any] ,__UpperCamelCase : Tuple ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Tuple ,__UpperCamelCase : Any ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Dict ,__UpperCamelCase : Tuple=None ,): """simple docstring""" A_ = str(__UpperCamelCase ) if os.path.isfile(__UpperCamelCase ): return pretrained_model_name_or_path elif os.path.isdir(__UpperCamelCase ): if os.path.isfile(os.path.join(__UpperCamelCase ,__UpperCamelCase ) ): # Load from a PyTorch checkpoint A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase ) return model_file elif subfolder is not None and os.path.isfile( os.path.join(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) ): A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) return model_file else: raise EnvironmentError( f'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' ) else: # 1. First check if deprecated way of loading from branches is used if ( revision in DEPRECATED_REVISION_ARGS and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME) and version.parse(version.parse(__UpperCamelCase ).base_version ) >= version.parse("0.20.0" ) ): try: A_ = hf_hub_download( __UpperCamelCase ,filename=_add_variant(__UpperCamelCase ,__UpperCamelCase ) ,cache_dir=__UpperCamelCase ,force_download=__UpperCamelCase ,proxies=__UpperCamelCase ,resume_download=__UpperCamelCase ,local_files_only=__UpperCamelCase ,use_auth_token=__UpperCamelCase ,user_agent=__UpperCamelCase ,subfolder=__UpperCamelCase ,revision=revision or commit_hash ,) warnings.warn( f'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' ,__UpperCamelCase ,) return model_file except: # noqa: E722 warnings.warn( f'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__UpperCamelCase ,__UpperCamelCase )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(__UpperCamelCase ,__UpperCamelCase )}\' so that the correct variant file can be added.''' ,__UpperCamelCase ,) try: # 2. Load model file as usual A_ = hf_hub_download( __UpperCamelCase ,filename=__UpperCamelCase ,cache_dir=__UpperCamelCase ,force_download=__UpperCamelCase ,proxies=__UpperCamelCase ,resume_download=__UpperCamelCase ,local_files_only=__UpperCamelCase ,use_auth_token=__UpperCamelCase ,user_agent=__UpperCamelCase ,subfolder=__UpperCamelCase ,revision=revision or commit_hash ,) return model_file except RepositoryNotFoundError: raise EnvironmentError( f'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier ''' "listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a " "token having permission to this repo with `use_auth_token` or log in with `huggingface-cli " "login`." ) except RevisionNotFoundError: raise EnvironmentError( f'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for ''' "this model name. Check the model page at " f'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' ) except EntryNotFoundError: raise EnvironmentError( f'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' ) except HTTPError as err: raise EnvironmentError( f'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' ) except ValueError: raise EnvironmentError( f'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it''' f''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a''' f''' directory containing a file named {weights_name} or''' " \nCheckout your internet connection or see how to run the library in" " offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." ) except EnvironmentError: raise EnvironmentError( f'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from ''' "'https://huggingface.co/models', make sure you don't have a local directory with the same name. " f'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory ''' f'''containing a file named {weights_name}''' )
86
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging a =logging.get_logger(__name__) a ="""▁""" a ={"""vocab_file""": """sentencepiece.bpe.model""", """monolingual_vocab_file""": """dict.txt"""} a ={ """vocab_file""": { """vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model""", }, """monolingual_vocab_file""": { """vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt""", }, } a ={"""vinai/bartpho-syllable""": 1024} class A_ ( SCREAMING_SNAKE_CASE ): _UpperCAmelCase : List[str] = VOCAB_FILES_NAMES _UpperCAmelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase : Dict = ['''input_ids''', '''attention_mask'''] def __init__( self : Dict ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : List[Any]="<s>" ,SCREAMING_SNAKE_CASE__ : Any="</s>" ,SCREAMING_SNAKE_CASE__ : List[str]="</s>" ,SCREAMING_SNAKE_CASE__ : List[str]="<s>" ,SCREAMING_SNAKE_CASE__ : int="<unk>" ,SCREAMING_SNAKE_CASE__ : Dict="<pad>" ,SCREAMING_SNAKE_CASE__ : List[str]="<mask>" ,SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None ,**SCREAMING_SNAKE_CASE__ : Dict ,): # Mask token behave like a normal word, i.e. include the space before it __lowerCamelCase : Union[str, Any] = AddedToken(SCREAMING_SNAKE_CASE__ ,lstrip=SCREAMING_SNAKE_CASE__ ,rstrip=SCREAMING_SNAKE_CASE__) if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) else mask_token __lowerCamelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=SCREAMING_SNAKE_CASE__ ,eos_token=SCREAMING_SNAKE_CASE__ ,unk_token=SCREAMING_SNAKE_CASE__ ,sep_token=SCREAMING_SNAKE_CASE__ ,cls_token=SCREAMING_SNAKE_CASE__ ,pad_token=SCREAMING_SNAKE_CASE__ ,mask_token=SCREAMING_SNAKE_CASE__ ,sp_model_kwargs=self.sp_model_kwargs ,**SCREAMING_SNAKE_CASE__ ,) __lowerCamelCase : int = vocab_file __lowerCamelCase : Tuple = monolingual_vocab_file __lowerCamelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(str(SCREAMING_SNAKE_CASE__)) # Load the reduced vocab # Keep order of special tokens for backward compatibility __lowerCamelCase : Optional[int] = {} __lowerCamelCase : List[Any] = 0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(SCREAMING_SNAKE_CASE__) not in self.fairseq_tokens_to_ids: __lowerCamelCase : Any = cnt cnt += 1 with open(SCREAMING_SNAKE_CASE__ ,'r' ,encoding='utf-8') as f: for line in f.readlines(): __lowerCamelCase : Any = line.strip().split()[0] __lowerCamelCase : List[str] = len(self.fairseq_tokens_to_ids) if str(SCREAMING_SNAKE_CASE__) not in self.fairseq_tokens_to_ids: __lowerCamelCase : Dict = len(self.fairseq_tokens_to_ids) __lowerCamelCase : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : int): __lowerCamelCase : Tuple = self.__dict__.copy() __lowerCamelCase : Optional[int] = None __lowerCamelCase : Optional[int] = self.sp_model.serialized_model_proto() return state def __setstate__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any]): __lowerCamelCase : List[str] = d # for backward compatibility if not hasattr(self ,'sp_model_kwargs'): __lowerCamelCase : str = {} __lowerCamelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.LoadFromSerializedProto(self.sp_model_proto) def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __lowerCamelCase : Tuple = [self.cls_token_id] __lowerCamelCase : int = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ,SCREAMING_SNAKE_CASE__ : bool = False): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE__ ,token_ids_a=SCREAMING_SNAKE_CASE__ ,already_has_special_tokens=SCREAMING_SNAKE_CASE__) if token_ids_a is None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1] return [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1] def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None): __lowerCamelCase : Dict = [self.sep_token_id] __lowerCamelCase : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] @property def lowerCAmelCase ( self : List[str]): return len(self.fairseq_ids_to_tokens) def lowerCAmelCase ( self : Dict): __lowerCamelCase : Optional[int] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : str): return self.sp_model.encode(SCREAMING_SNAKE_CASE__ ,out_type=SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[Any]): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Dict): return self.fairseq_ids_to_tokens[index] def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : str): __lowerCamelCase : Any = ''.join(SCREAMING_SNAKE_CASE__).replace(SCREAMING_SNAKE_CASE__ ,' ').strip() return out_string def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[str] = None): if not os.path.isdir(SCREAMING_SNAKE_CASE__): logger.error(F"Vocabulary path ({save_directory}) should be a directory") return __lowerCamelCase : Union[str, Any] = os.path.join( SCREAMING_SNAKE_CASE__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) __lowerCamelCase : Union[str, Any] = os.path.join( SCREAMING_SNAKE_CASE__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['monolingual_vocab_file'] ,) if os.path.abspath(self.vocab_file) != os.path.abspath(SCREAMING_SNAKE_CASE__) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file ,SCREAMING_SNAKE_CASE__) elif not os.path.isfile(self.vocab_file): with open(SCREAMING_SNAKE_CASE__ ,'wb') as fi: __lowerCamelCase : List[str] = self.sp_model.serialized_model_proto() fi.write(SCREAMING_SNAKE_CASE__) if os.path.abspath(self.monolingual_vocab_file) != os.path.abspath( SCREAMING_SNAKE_CASE__) and os.path.isfile(self.monolingual_vocab_file): copyfile(self.monolingual_vocab_file ,SCREAMING_SNAKE_CASE__) elif not os.path.isfile(self.monolingual_vocab_file): with open(SCREAMING_SNAKE_CASE__ ,'w' ,encoding='utf-8') as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(F"{str(SCREAMING_SNAKE_CASE__)} \n") return out_vocab_file, out_monolingual_vocab_file
652
0
import json import multiprocessing as mp import re from collections import defaultdict from functools import partial from typing import Dict, List, Optional, Set, Tuple, Type from datasets import Dataset from datasketch import MinHash, MinHashLSH from dpu_utils.utils.iterators import ThreadedIterator from tqdm import tqdm _lowerCamelCase : Any = re.compile("""[^A-Za-z_0-9]""") # parameters used in DuplicationIndex _lowerCamelCase : int = 10 _lowerCamelCase : Dict = 256 def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[MinHash]: """simple docstring""" if len(lowercase_ ) < MIN_NUM_TOKENS: return None A__ = MinHash(num_perm=lowercase_ ) for token in set(lowercase_ ): min_hash.update(token.encode() ) return min_hash def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Set[str]: """simple docstring""" return {t for t in NON_ALPHA.split(lowercase_ ) if len(t.strip() ) > 0} class UpperCamelCase_ : '''simple docstring''' def __init__( self : Tuple , *, UpperCAmelCase__ : float = 0.85 , ) ->int: '''simple docstring''' A__ = duplication_jaccard_threshold A__ = NUM_PERM A__ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm) A__ = defaultdict(UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : MinHash) ->None: '''simple docstring''' A__ = self._index.query(UpperCAmelCase__) if code_key in self._index.keys: print(f"""Duplicate key {code_key}""") return self._index.insert(UpperCAmelCase__ , UpperCAmelCase__) if len(UpperCAmelCase__) > 0: for base_duplicate in close_duplicates: if base_duplicate in self._duplicate_clusters: self._duplicate_clusters[base_duplicate].add(UpperCAmelCase__) break else: self._duplicate_clusters[close_duplicates[0]].add(UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[List[Dict]]: '''simple docstring''' A__ = [] for base, duplicates in self._duplicate_clusters.items(): A__ = [base] + list(UpperCAmelCase__) # reformat the cluster to be a list of dict A__ = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster] duplicate_clusters.append(UpperCAmelCase__) return duplicate_clusters def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : Tuple) ->None: '''simple docstring''' A__ = self.get_duplicate_clusters() with open(UpperCAmelCase__ , '''w''') as f: json.dump(UpperCAmelCase__ , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[Any]: """simple docstring""" A__ , A__ = element A__ = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] ) if min_hash is not None: return (index, data["repo_name"], data["path"]), min_hash def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[Any]: """simple docstring""" with mp.Pool() as pool: for data in pool.imap_unordered( _compute_min_hash , ThreadedIterator(lowercase_ , max_queue_size=10_000 ) , chunksize=100 , ): if data is not None: yield data def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Dict: """simple docstring""" A__ = DuplicationIndex(duplication_jaccard_threshold=lowercase_ ) for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowercase_ ) ) , max_queue_size=100 ) ): di.add(lowercase_ , lowercase_ ) # Returns a List[Cluster] where Cluster is List[str] with the filenames. return di.get_duplicate_clusters() def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> float: """simple docstring""" A__ = get_tokens(lowercase_ ) A__ = get_tokens(lowercase_ ) return len(tokensa & tokensa ) / len(tokensa | tokensa ) _lowerCamelCase : Any = None def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[int]: """simple docstring""" A__ = [] for elementa in cluster: A__ = _shared_dataset[elementa['''base_index''']]['''content'''] for elementa in extremes: A__ = _shared_dataset[elementa['''base_index''']]['''content'''] if jaccard_similarity(lowercase_ , lowercase_ ) >= jaccard_threshold: elementa["copies"] += 1 break else: A__ = 1 extremes.append(lowercase_ ) return extremes def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> List[str]: """simple docstring""" global _shared_dataset A__ = dataset A__ = [] A__ = partial(_find_cluster_extremes_shared , jaccard_threshold=lowercase_ ) with mp.Pool() as pool: for extremes in tqdm( pool.imap_unordered( lowercase_ , lowercase_ , ) , total=len(lowercase_ ) , ): extremes_list.append(lowercase_ ) return extremes_list def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]: """simple docstring""" A__ = make_duplicate_clusters(lowercase_ , lowercase_ ) A__ = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster} A__ = {} A__ = find_extremes(lowercase_ , lowercase_ , lowercase_ ) for extremes in extremes_clusters: for element in extremes: A__ = element A__ = duplicate_indices - set(extreme_dict.keys() ) A__ = dataset.filter(lambda lowercase_ , lowercase_ : idx not in remove_indices , with_indices=lowercase_ ) # update duplicate_clusters for cluster in duplicate_clusters: for element in cluster: A__ = element['''base_index'''] in extreme_dict if element["is_extreme"]: A__ = extreme_dict[element['''base_index''']]['''copies'''] print(f"""Original dataset size: {len(lowercase_ )}""" ) print(f"""Number of duplicate clusters: {len(lowercase_ )}""" ) print(f"""Files in duplicate cluster: {len(lowercase_ )}""" ) print(f"""Unique files in duplicate cluster: {len(lowercase_ )}""" ) print(f"""Filtered dataset size: {len(lowercase_ )}""" ) return ds_filter, duplicate_clusters
87
from __future__ import annotations import inspect import unittest import numpy as np from transformers import DeiTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, ) from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class A_ : def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Dict=1_3 ,SCREAMING_SNAKE_CASE__ : int=3_0 ,SCREAMING_SNAKE_CASE__ : int=2 ,SCREAMING_SNAKE_CASE__ : List[Any]=3 ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : int=True ,SCREAMING_SNAKE_CASE__ : List[str]=3_2 ,SCREAMING_SNAKE_CASE__ : Any=2 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=4 ,SCREAMING_SNAKE_CASE__ : List[str]=3_7 ,SCREAMING_SNAKE_CASE__ : Optional[Any]="gelu" ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_0 ,SCREAMING_SNAKE_CASE__ : int=0.02 ,SCREAMING_SNAKE_CASE__ : str=3 ,SCREAMING_SNAKE_CASE__ : Dict=None ,SCREAMING_SNAKE_CASE__ : Optional[Any]=2 ,): __lowerCamelCase : Optional[int] = parent __lowerCamelCase : Optional[Any] = batch_size __lowerCamelCase : Dict = image_size __lowerCamelCase : Optional[Any] = patch_size __lowerCamelCase : Optional[Any] = num_channels __lowerCamelCase : str = is_training __lowerCamelCase : List[Any] = use_labels __lowerCamelCase : Any = hidden_size __lowerCamelCase : Optional[int] = num_hidden_layers __lowerCamelCase : Any = num_attention_heads __lowerCamelCase : Tuple = intermediate_size __lowerCamelCase : Dict = hidden_act __lowerCamelCase : Optional[Any] = hidden_dropout_prob __lowerCamelCase : List[Any] = attention_probs_dropout_prob __lowerCamelCase : Dict = type_sequence_label_size __lowerCamelCase : Optional[Any] = initializer_range __lowerCamelCase : List[str] = scope __lowerCamelCase : Union[str, Any] = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) __lowerCamelCase : str = (image_size // patch_size) ** 2 __lowerCamelCase : str = num_patches + 2 def lowerCAmelCase ( self : Optional[Any]): __lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) __lowerCamelCase : List[Any] = None if self.use_labels: __lowerCamelCase : int = ids_tensor([self.batch_size] ,self.type_sequence_label_size) __lowerCamelCase : List[str] = self.get_config() return config, pixel_values, labels def lowerCAmelCase ( self : List[Any]): return DeiTConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=SCREAMING_SNAKE_CASE__ ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,) def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Dict): __lowerCamelCase : Optional[Any] = TFDeiTModel(config=SCREAMING_SNAKE_CASE__) __lowerCamelCase : Dict = model(SCREAMING_SNAKE_CASE__) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size)) def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Any): __lowerCamelCase : Optional[int] = TFDeiTForMaskedImageModeling(config=SCREAMING_SNAKE_CASE__) __lowerCamelCase : str = model(SCREAMING_SNAKE_CASE__) self.parent.assertEqual( result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size)) # test greyscale images __lowerCamelCase : int = 1 __lowerCamelCase : Tuple = TFDeiTForMaskedImageModeling(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) __lowerCamelCase : str = model(SCREAMING_SNAKE_CASE__) self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size)) def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any]): __lowerCamelCase : Dict = self.type_sequence_label_size __lowerCamelCase : List[Any] = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE__) __lowerCamelCase : str = model(SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size)) # test greyscale images __lowerCamelCase : List[Any] = 1 __lowerCamelCase : Tuple = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) __lowerCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size)) def lowerCAmelCase ( self : Dict): __lowerCamelCase : Optional[Any] = self.prepare_config_and_inputs() __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = config_and_inputs __lowerCamelCase : Optional[int] = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ): _UpperCAmelCase : Union[str, Any] = ( ( TFDeiTModel, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, ) if is_tf_available() else () ) _UpperCAmelCase : List[Any] = ( { '''feature-extraction''': TFDeiTModel, '''image-classification''': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher), } if is_tf_available() else {} ) _UpperCAmelCase : Optional[int] = False _UpperCAmelCase : Optional[Any] = False _UpperCAmelCase : Optional[int] = False _UpperCAmelCase : Optional[int] = False def lowerCAmelCase ( self : Any): __lowerCamelCase : str = TFDeiTModelTester(self) __lowerCamelCase : Optional[int] = ConfigTester(self ,config_class=SCREAMING_SNAKE_CASE__ ,has_text_modality=SCREAMING_SNAKE_CASE__ ,hidden_size=3_7) def lowerCAmelCase ( self : str): self.config_tester.run_common_tests() @unittest.skip(reason='DeiT does not use inputs_embeds') def lowerCAmelCase ( self : List[Any]): pass def lowerCAmelCase ( self : Dict): __lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase : Dict = model_class(SCREAMING_SNAKE_CASE__) self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer)) __lowerCamelCase : int = model.get_output_embeddings() self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE__ ,tf.keras.layers.Dense)) def lowerCAmelCase ( self : List[Any]): __lowerCamelCase , __lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase : List[Any] = model_class(SCREAMING_SNAKE_CASE__) __lowerCamelCase : str = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCamelCase : Any = [*signature.parameters.keys()] __lowerCamelCase : Union[str, Any] = ['pixel_values'] self.assertListEqual(arg_names[:1] ,SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : str): __lowerCamelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : str): __lowerCamelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Dict): __lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : str=False): __lowerCamelCase : Optional[Any] = super()._prepare_for_class(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,return_labels=SCREAMING_SNAKE_CASE__) if return_labels: if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call).parameters: del inputs_dict["labels"] return inputs_dict @slow def lowerCAmelCase ( self : Optional[int]): for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase : Union[str, Any] = TFDeiTModel.from_pretrained(SCREAMING_SNAKE_CASE__) self.assertIsNotNone(SCREAMING_SNAKE_CASE__) def SCREAMING_SNAKE_CASE__ ( ) -> Tuple: __lowerCamelCase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class A_ ( unittest.TestCase ): @cached_property def lowerCAmelCase ( self : List[Any]): return ( DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224') if is_vision_available() else None ) @slow def lowerCAmelCase ( self : List[Any]): __lowerCamelCase : Optional[int] = TFDeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224') __lowerCamelCase : int = self.default_image_processor __lowerCamelCase : Tuple = prepare_img() __lowerCamelCase : Tuple = image_processor(images=SCREAMING_SNAKE_CASE__ ,return_tensors='tf') # forward pass __lowerCamelCase : int = model(**SCREAMING_SNAKE_CASE__) # verify the logits __lowerCamelCase : Optional[int] = tf.TensorShape((1, 1_0_0_0)) self.assertEqual(outputs.logits.shape ,SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[Any] = tf.constant([-1.0266, 0.1912, -1.2861]) self.assertTrue(np.allclose(outputs.logits[0, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4))
652
0
"""simple docstring""" import argparse import torch from transformers import GPTaLMHeadModel, RobertaForMaskedLM if __name__ == "__main__": UpperCAmelCase = argparse.ArgumentParser( description=( """Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned""" """ Distillation""" ) ) parser.add_argument("""--model_type""", default="""roberta""", choices=["""roberta""", """gpt2"""]) parser.add_argument("""--model_name""", default="""roberta-large""", type=str) parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_roberta_048131723.pth""", type=str) parser.add_argument("""--vocab_transform""", action="""store_true""") UpperCAmelCase = parser.parse_args() if args.model_type == "roberta": UpperCAmelCase = RobertaForMaskedLM.from_pretrained(args.model_name) UpperCAmelCase = """roberta""" elif args.model_type == "gpt2": UpperCAmelCase = GPTaLMHeadModel.from_pretrained(args.model_name) UpperCAmelCase = """transformer""" UpperCAmelCase = model.state_dict() UpperCAmelCase = {} # Embeddings # if args.model_type == "gpt2": for param_name in ["wte.weight", "wpe.weight"]: UpperCAmelCase = state_dict[f'''{prefix}.{param_name}'''] else: for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]: UpperCAmelCase = f'''{prefix}.embeddings.{w}.weight''' UpperCAmelCase = state_dict[param_name] for w in ["weight", "bias"]: UpperCAmelCase = f'''{prefix}.embeddings.LayerNorm.{w}''' UpperCAmelCase = state_dict[param_name] # Transformer Blocks # UpperCAmelCase = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: if args.model_type == "gpt2": for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]: for w in ["weight", "bias"]: UpperCAmelCase = state_dict[ f'''{prefix}.h.{teacher_idx}.{layer}.{w}''' ] UpperCAmelCase = state_dict[f'''{prefix}.h.{teacher_idx}.attn.bias'''] else: for layer in [ "attention.self.query", "attention.self.key", "attention.self.value", "attention.output.dense", "attention.output.LayerNorm", "intermediate.dense", "output.dense", "output.LayerNorm", ]: for w in ["weight", "bias"]: UpperCAmelCase = state_dict[ f'''{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}''' ] std_idx += 1 # Language Modeling Head ###s if args.model_type == "roberta": for layer in ["lm_head.decoder.weight", "lm_head.bias"]: UpperCAmelCase = state_dict[f'''{layer}'''] if args.vocab_transform: for w in ["weight", "bias"]: UpperCAmelCase = state_dict[f'''lm_head.dense.{w}'''] UpperCAmelCase = state_dict[f'''lm_head.layer_norm.{w}'''] elif args.model_type == "gpt2": for w in ["weight", "bias"]: UpperCAmelCase = state_dict[f'''{prefix}.ln_f.{w}'''] UpperCAmelCase = state_dict["""lm_head.weight"""] print(f'''N layers selected for distillation: {std_idx}''') print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''') print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''') torch.save(compressed_sd, args.dump_checkpoint)
88
from dataclasses import dataclass from enum import Enum from typing import List, Optional, Union import numpy as np import PIL from PIL import Image from ...utils import BaseOutput, is_torch_available, is_transformers_available @dataclass class A_ ( SCREAMING_SNAKE_CASE ): _UpperCAmelCase : Union[List[PIL.Image.Image], np.ndarray] _UpperCAmelCase : Optional[List[bool]] if is_transformers_available() and is_torch_available(): from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
652
0
# Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING import numpy as np import pyarrow as pa from .. import config from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import torch class _lowerCamelCase( TensorFormatter[Mapping, """torch.Tensor""", Mapping] ): def __init__( self, lowerCamelCase=None, **lowerCamelCase) -> Tuple: """simple docstring""" super().__init__(features=lowerCamelCase) _lowercase : List[str] = torch_tensor_kwargs import torch # noqa import torch at initialization def UpperCamelCase ( self, lowerCamelCase) -> str: """simple docstring""" import torch if isinstance(lowerCamelCase, lowerCamelCase) and column: if all( isinstance(lowerCamelCase, torch.Tensor) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column): return torch.stack(lowerCamelCase) return column def UpperCamelCase ( self, lowerCamelCase) -> Any: """simple docstring""" import torch if isinstance(lowerCamelCase, (str, bytes, type(lowerCamelCase))): return value elif isinstance(lowerCamelCase, (np.character, np.ndarray)) and np.issubdtype(value.dtype, np.character): return value.tolist() _lowercase : Optional[int] = {} if isinstance(lowerCamelCase, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.integer): _lowercase : List[str] = {'dtype': torch.intaa} elif isinstance(lowerCamelCase, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.floating): _lowercase : Any = {'dtype': torch.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(lowerCamelCase, PIL.Image.Image): _lowercase : List[Any] = np.asarray(lowerCamelCase) return torch.tensor(lowerCamelCase, **{**default_dtype, **self.torch_tensor_kwargs}) def UpperCamelCase ( self, lowerCamelCase) -> Any: """simple docstring""" import torch # support for torch, tf, jax etc. if hasattr(lowerCamelCase, '__array__') and not isinstance(lowerCamelCase, torch.Tensor): _lowercase : Any = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(lowerCamelCase, np.ndarray): if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(lowerCamelCase) for substruct in data_struct]) elif isinstance(lowerCamelCase, (list, tuple)): return self._consolidate([self.recursive_tensorize(lowerCamelCase) for substruct in data_struct]) return self._tensorize(lowerCamelCase) def UpperCamelCase ( self, lowerCamelCase) -> Optional[int]: """simple docstring""" return map_nested(self._recursive_tensorize, lowerCamelCase, map_list=lowerCamelCase) def UpperCamelCase ( self, lowerCamelCase) -> Mapping: """simple docstring""" _lowercase : Optional[Any] = self.numpy_arrow_extractor().extract_row(lowerCamelCase) _lowercase : Optional[Any] = self.python_features_decoder.decode_row(lowerCamelCase) return self.recursive_tensorize(lowerCamelCase) def UpperCamelCase ( self, lowerCamelCase) -> "torch.Tensor": """simple docstring""" _lowercase : Union[str, Any] = self.numpy_arrow_extractor().extract_column(lowerCamelCase) _lowercase : int = self.python_features_decoder.decode_column(lowerCamelCase, pa_table.column_names[0]) _lowercase : Union[str, Any] = self.recursive_tensorize(lowerCamelCase) _lowercase : Optional[Any] = self._consolidate(lowerCamelCase) return column def UpperCamelCase ( self, lowerCamelCase) -> Mapping: """simple docstring""" _lowercase : str = self.numpy_arrow_extractor().extract_batch(lowerCamelCase) _lowercase : List[Any] = self.python_features_decoder.decode_batch(lowerCamelCase) _lowercase : Any = self.recursive_tensorize(lowerCamelCase) for column_name in batch: _lowercase : Tuple = self._consolidate(batch[column_name]) return batch
89
# NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from ...utils import deprecate from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401 from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401 deprecate( """stable diffusion controlnet""", """0.22.0""", """Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""", standard_warn=False, stacklevel=3, )
652
0
'''simple docstring''' import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers __UpperCAmelCase = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append('''dataclasses''') if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append('''importlib_metadata''') for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(f"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""") def _snake_case ( A , A=None ) -> Optional[Any]: require_version(deps[pkg] , A )
90
import collections import os from typing import List, Optional, Tuple from transformers.utils import is_jieba_available, requires_backends if is_jieba_available(): import jieba from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging a =logging.get_logger(__name__) a ={"""vocab_file""": """vocab.txt"""} a ={ """vocab_file""": { """openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""", }, } a ={ """openbmb/cpm-ant-10b""": 1024, } def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple: __lowerCamelCase : int = collections.OrderedDict() with open(lowerCamelCase__ , 'r' , encoding='utf-8' ) as reader: __lowerCamelCase : Optional[int] = reader.readlines() for index, token in enumerate(lowerCamelCase__ ): __lowerCamelCase : Optional[Any] = token.rstrip('\n' ) __lowerCamelCase : Union[str, Any] = index return vocab class A_ ( SCREAMING_SNAKE_CASE ): def __init__( self : int ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Optional[int]="<unk>" ,SCREAMING_SNAKE_CASE__ : Optional[int]=2_0_0): __lowerCamelCase : str = vocab __lowerCamelCase : Dict = unk_token __lowerCamelCase : int = max_input_chars_per_word def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any]): __lowerCamelCase : int = list(SCREAMING_SNAKE_CASE__) if len(SCREAMING_SNAKE_CASE__) > self.max_input_chars_per_word: return [self.unk_token] __lowerCamelCase : Tuple = 0 __lowerCamelCase : str = [] while start < len(SCREAMING_SNAKE_CASE__): __lowerCamelCase : List[Any] = len(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Any = None while start < end: __lowerCamelCase : Any = ''.join(chars[start:end]) if substr in self.vocab: __lowerCamelCase : Optional[Any] = substr break end -= 1 if cur_substr is None: sub_tokens.append(self.unk_token) start += 1 else: sub_tokens.append(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Dict = end return sub_tokens class A_ ( SCREAMING_SNAKE_CASE ): _UpperCAmelCase : List[str] = VOCAB_FILES_NAMES _UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase : str = ['''input_ids''', '''attention_mask'''] _UpperCAmelCase : Optional[int] = False def __init__( self : Tuple ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Tuple="<d>" ,SCREAMING_SNAKE_CASE__ : Tuple="</d>" ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="<s>" ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="</s>" ,SCREAMING_SNAKE_CASE__ : str="<pad>" ,SCREAMING_SNAKE_CASE__ : List[str]="<unk>" ,SCREAMING_SNAKE_CASE__ : List[Any]="</n>" ,SCREAMING_SNAKE_CASE__ : int="</_>" ,SCREAMING_SNAKE_CASE__ : List[Any]="left" ,**SCREAMING_SNAKE_CASE__ : List[str] ,): requires_backends(self ,['jieba']) super().__init__( bod_token=SCREAMING_SNAKE_CASE__ ,eod_token=SCREAMING_SNAKE_CASE__ ,bos_token=SCREAMING_SNAKE_CASE__ ,eos_token=SCREAMING_SNAKE_CASE__ ,pad_token=SCREAMING_SNAKE_CASE__ ,unk_token=SCREAMING_SNAKE_CASE__ ,line_token=SCREAMING_SNAKE_CASE__ ,space_token=SCREAMING_SNAKE_CASE__ ,padding_side=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,) __lowerCamelCase : Optional[Any] = bod_token __lowerCamelCase : Dict = eod_token __lowerCamelCase : Any = load_vocab(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Any = self.encoder[space_token] __lowerCamelCase : Dict = self.encoder[line_token] del self.encoder[space_token] del self.encoder[line_token] __lowerCamelCase : Optional[Any] = collections.OrderedDict(sorted(self.encoder.items() ,key=lambda SCREAMING_SNAKE_CASE__: x[1])) __lowerCamelCase : int = {v: k for k, v in self.encoder.items()} __lowerCamelCase : Union[str, Any] = WordpieceTokenizer(vocab=self.encoder ,unk_token=self.unk_token) @property def lowerCAmelCase ( self : List[Any]): return self.encoder[self.bod_token] @property def lowerCAmelCase ( self : Tuple): return self.encoder[self.eod_token] @property def lowerCAmelCase ( self : Union[str, Any]): return self.encoder["\n"] @property def lowerCAmelCase ( self : str): return len(self.encoder) def lowerCAmelCase ( self : str): return dict(self.encoder ,**self.added_tokens_encoder) def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Optional[int]): __lowerCamelCase : Any = [] for x in jieba.cut(SCREAMING_SNAKE_CASE__ ,cut_all=SCREAMING_SNAKE_CASE__): output_tokens.extend(self.wordpiece_tokenizer.tokenize(SCREAMING_SNAKE_CASE__)) return output_tokens def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Tuple ,**SCREAMING_SNAKE_CASE__ : List[Any]): __lowerCamelCase : Tuple = [i for i in token_ids if i >= 0] __lowerCamelCase : str = [ x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id ] return super()._decode(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : List[Any]): return token in self.encoder def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[str]): return "".join(SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[Any]): return self.encoder.get(SCREAMING_SNAKE_CASE__ ,self.encoder.get(self.unk_token)) def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[Any]): return self.decoder.get(SCREAMING_SNAKE_CASE__ ,self.unk_token) def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[str] = None): if os.path.isdir(SCREAMING_SNAKE_CASE__): __lowerCamelCase : Any = os.path.join( SCREAMING_SNAKE_CASE__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) else: __lowerCamelCase : int = (filename_prefix + '-' if filename_prefix else '') + save_directory __lowerCamelCase : Any = 0 if " " in self.encoder: __lowerCamelCase : Any = self.encoder[' '] del self.encoder[" "] if "\n" in self.encoder: __lowerCamelCase : str = self.encoder['\n'] del self.encoder["\n"] __lowerCamelCase : str = collections.OrderedDict(sorted(self.encoder.items() ,key=lambda SCREAMING_SNAKE_CASE__: x[1])) with open(SCREAMING_SNAKE_CASE__ ,'w' ,encoding='utf-8') as writer: for token, token_index in self.encoder.items(): if index != token_index: logger.warning( F"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive." ' Please check that the vocabulary is not corrupted!') __lowerCamelCase : Any = token_index writer.write(token + '\n') index += 1 return (vocab_file,) def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : List[int] = None): if token_ids_a is None: return [self.bos_token_id] + token_ids_a return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ,SCREAMING_SNAKE_CASE__ : bool = False): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE__ ,token_ids_a=SCREAMING_SNAKE_CASE__ ,already_has_special_tokens=SCREAMING_SNAKE_CASE__) if token_ids_a is not None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) return [1] + ([0] * len(SCREAMING_SNAKE_CASE__))
652
0
"""simple docstring""" from __future__ import annotations from decimal import Decimal from math import * # noqa: F403 from sympy import diff def _snake_case ( snake_case__ : str , snake_case__ : float | Decimal , snake_case__ : float = 10**-10 ): A = a while True: A = Decimal(snake_case__ ) - ( Decimal(eval(snake_case__ ) ) / Decimal(eval(str(diff(snake_case__ ) ) ) ) # noqa: S307 ) # This number dictates the accuracy of the answer if abs(eval(snake_case__ ) ) < precision: # noqa: S307 return float(snake_case__ ) # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(F"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""") # Find root of polynomial print(F"""The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}""") # Find Square Root of 5 print(F"""The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}""") # Exponential Roots print(F"""The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}""")
91
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a ={"""configuration_mmbt""": ["""MMBTConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a =["""MMBTForClassification""", """MMBTModel""", """ModalEmbeddings"""] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys a =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
652
0
'''simple docstring''' def _lowerCAmelCase ( __magic_name__ : int ) -> str: if number > 0: raise ValueError('''input must be a negative integer''' ) lowercase : Dict =len(bin(__magic_name__ )[3:] ) lowercase : List[Any] =bin(abs(__magic_name__ ) - (1 << binary_number_length) )[3:] lowercase : List[str] =( ( '''1''' + '''0''' * (binary_number_length - len(__magic_name__ )) + twos_complement_number ) if number < 0 else '''0''' ) return "0b" + twos_complement_number if __name__ == "__main__": import doctest doctest.testmod()
92
import torch from diffusers import UnCLIPScheduler from .test_schedulers import SchedulerCommonTest class A_ ( SCREAMING_SNAKE_CASE ): _UpperCAmelCase : int = (UnCLIPScheduler,) def lowerCAmelCase ( self : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : List[Any]): __lowerCamelCase : Any = { 'num_train_timesteps': 1_0_0_0, 'variance_type': 'fixed_small_log', 'clip_sample': True, 'clip_sample_range': 1.0, 'prediction_type': 'epsilon', } config.update(**SCREAMING_SNAKE_CASE__) return config def lowerCAmelCase ( self : Optional[Any]): for timesteps in [1, 5, 1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Optional[Any]): for variance in ["fixed_small_log", "learned_range"]: self.check_over_configs(variance_type=SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Union[str, Any]): for clip_sample in [True, False]: self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Tuple): for clip_sample_range in [1, 5, 1_0, 2_0]: self.check_over_configs(clip_sample_range=SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : List[Any]): for prediction_type in ["epsilon", "sample"]: self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Dict): for time_step in [0, 5_0_0, 9_9_9]: for prev_timestep in [None, 5, 1_0_0, 2_5_0, 5_0_0, 7_5_0]: if prev_timestep is not None and prev_timestep >= time_step: continue self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ ,prev_timestep=SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Optional[int]): __lowerCamelCase : Optional[int] = self.scheduler_classes[0] __lowerCamelCase : Any = self.get_scheduler_config(variance_type='fixed_small_log') __lowerCamelCase : Dict = scheduler_class(**SCREAMING_SNAKE_CASE__) assert torch.sum(torch.abs(scheduler._get_variance(0) - 1.00_00E-10)) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(4_8_7) - 0.0549625)) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(9_9_9) - 0.9994987)) < 1E-5 def lowerCAmelCase ( self : Any): __lowerCamelCase : Dict = self.scheduler_classes[0] __lowerCamelCase : List[str] = self.get_scheduler_config(variance_type='learned_range') __lowerCamelCase : int = scheduler_class(**SCREAMING_SNAKE_CASE__) __lowerCamelCase : Tuple = 0.5 assert scheduler._get_variance(1 ,predicted_variance=SCREAMING_SNAKE_CASE__) - -10.1712790 < 1E-5 assert scheduler._get_variance(4_8_7 ,predicted_variance=SCREAMING_SNAKE_CASE__) - -5.7998052 < 1E-5 assert scheduler._get_variance(9_9_9 ,predicted_variance=SCREAMING_SNAKE_CASE__) - -0.0010011 < 1E-5 def lowerCAmelCase ( self : List[str]): __lowerCamelCase : str = self.scheduler_classes[0] __lowerCamelCase : str = self.get_scheduler_config() __lowerCamelCase : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[Any] = scheduler.timesteps __lowerCamelCase : Union[str, Any] = self.dummy_model() __lowerCamelCase : Optional[Any] = self.dummy_sample_deter __lowerCamelCase : List[str] = torch.manual_seed(0) for i, t in enumerate(SCREAMING_SNAKE_CASE__): # 1. predict noise residual __lowerCamelCase : int = model(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) # 2. predict previous mean of sample x_t-1 __lowerCamelCase : Optional[int] = scheduler.step(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__).prev_sample __lowerCamelCase : Optional[Any] = pred_prev_sample __lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__)) __lowerCamelCase : Tuple = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__)) assert abs(result_sum.item() - 252.2682495) < 1E-2 assert abs(result_mean.item() - 0.3284743) < 1E-3 def lowerCAmelCase ( self : str): __lowerCamelCase : str = self.scheduler_classes[0] __lowerCamelCase : List[Any] = self.get_scheduler_config() __lowerCamelCase : int = scheduler_class(**SCREAMING_SNAKE_CASE__) scheduler.set_timesteps(2_5) __lowerCamelCase : int = scheduler.timesteps __lowerCamelCase : Tuple = self.dummy_model() __lowerCamelCase : Any = self.dummy_sample_deter __lowerCamelCase : Any = torch.manual_seed(0) for i, t in enumerate(SCREAMING_SNAKE_CASE__): # 1. predict noise residual __lowerCamelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) if i + 1 == timesteps.shape[0]: __lowerCamelCase : Optional[Any] = None else: __lowerCamelCase : Union[str, Any] = timesteps[i + 1] # 2. predict previous mean of sample x_t-1 __lowerCamelCase : int = scheduler.step( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,prev_timestep=SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__).prev_sample __lowerCamelCase : Union[str, Any] = pred_prev_sample __lowerCamelCase : Tuple = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__)) __lowerCamelCase : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__)) assert abs(result_sum.item() - 258.2044983) < 1E-2 assert abs(result_mean.item() - 0.3362038) < 1E-3 def lowerCAmelCase ( self : List[Any]): pass def lowerCAmelCase ( self : Union[str, Any]): pass
652
0
"""simple docstring""" from itertools import product from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Any: """simple docstring""" lowerCAmelCase__ :Dict = k_size // 2 lowerCAmelCase__ , lowerCAmelCase__ :Tuple = mgrid[0 - center : k_size - center, 0 - center : k_size - center] lowerCAmelCase__ :List[Any] = 1 / (2 * pi * sigma) * exp(-(square(_SCREAMING_SNAKE_CASE ) + square(_SCREAMING_SNAKE_CASE )) / (2 * square(_SCREAMING_SNAKE_CASE )) ) return g def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[int]: """simple docstring""" lowerCAmelCase__ , lowerCAmelCase__ :Optional[Any] = image.shape[0], image.shape[1] # dst image height and width lowerCAmelCase__ :Union[str, Any] = height - k_size + 1 lowerCAmelCase__ :Optional[Any] = width - k_size + 1 # im2col, turn the k_size*k_size pixels into a row and np.vstack all rows lowerCAmelCase__ :Optional[int] = zeros((dst_height * dst_width, k_size * k_size) ) lowerCAmelCase__ :Tuple = 0 for i, j in product(range(_SCREAMING_SNAKE_CASE ) , range(_SCREAMING_SNAKE_CASE ) ): lowerCAmelCase__ :Union[str, Any] = ravel(image[i : i + k_size, j : j + k_size] ) lowerCAmelCase__ :Optional[Any] = window row += 1 # turn the kernel into shape(k*k, 1) lowerCAmelCase__ :str = gen_gaussian_kernel(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) lowerCAmelCase__ :str = ravel(_SCREAMING_SNAKE_CASE ) # reshape and get the dst image lowerCAmelCase__ :Optional[Any] = dot(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).reshape(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).astype(_SCREAMING_SNAKE_CASE ) return dst if __name__ == "__main__": # read original image __A = imread(R"""../image_data/lena.jpg""") # turn image in gray scale value __A = cvtColor(img, COLOR_BGR2GRAY) # get values with two different mask size __A = gaussian_filter(gray, 3, sigma=1) __A = gaussian_filter(gray, 5, sigma=0.8) # show result images imshow("""gaussian filter with 3x3 mask""", gaussianaxa) imshow("""gaussian filter with 5x5 mask""", gaussianaxa) waitKey()
93
from ...configuration_utils import PretrainedConfig from ...utils import logging a =logging.get_logger(__name__) a ={ """caidas/swin2sr-classicalsr-x2-64""": ( """https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json""" ), } class A_ ( SCREAMING_SNAKE_CASE ): _UpperCAmelCase : Optional[int] = '''swin2sr''' _UpperCAmelCase : Any = { '''hidden_size''': '''embed_dim''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any]=6_4 ,SCREAMING_SNAKE_CASE__ : Optional[int]=1 ,SCREAMING_SNAKE_CASE__ : List[Any]=3 ,SCREAMING_SNAKE_CASE__ : Tuple=1_8_0 ,SCREAMING_SNAKE_CASE__ : Any=[6, 6, 6, 6, 6, 6] ,SCREAMING_SNAKE_CASE__ : int=[6, 6, 6, 6, 6, 6] ,SCREAMING_SNAKE_CASE__ : Optional[Any]=8 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=2.0 ,SCREAMING_SNAKE_CASE__ : Optional[int]=True ,SCREAMING_SNAKE_CASE__ : Any=0.0 ,SCREAMING_SNAKE_CASE__ : Any=0.0 ,SCREAMING_SNAKE_CASE__ : List[str]=0.1 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="gelu" ,SCREAMING_SNAKE_CASE__ : Any=False ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.02 ,SCREAMING_SNAKE_CASE__ : Dict=1E-5 ,SCREAMING_SNAKE_CASE__ : Dict=2 ,SCREAMING_SNAKE_CASE__ : Tuple=1.0 ,SCREAMING_SNAKE_CASE__ : int="1conv" ,SCREAMING_SNAKE_CASE__ : Optional[int]="pixelshuffle" ,**SCREAMING_SNAKE_CASE__ : Optional[int] ,): super().__init__(**SCREAMING_SNAKE_CASE__) __lowerCamelCase : Dict = image_size __lowerCamelCase : str = patch_size __lowerCamelCase : List[Any] = num_channels __lowerCamelCase : Dict = embed_dim __lowerCamelCase : Dict = depths __lowerCamelCase : Any = len(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[Any] = num_heads __lowerCamelCase : Tuple = window_size __lowerCamelCase : Dict = mlp_ratio __lowerCamelCase : str = qkv_bias __lowerCamelCase : Optional[int] = hidden_dropout_prob __lowerCamelCase : Optional[Any] = attention_probs_dropout_prob __lowerCamelCase : List[Any] = drop_path_rate __lowerCamelCase : Optional[int] = hidden_act __lowerCamelCase : Dict = use_absolute_embeddings __lowerCamelCase : Optional[Any] = layer_norm_eps __lowerCamelCase : str = initializer_range __lowerCamelCase : List[Any] = upscale __lowerCamelCase : List[Any] = img_range __lowerCamelCase : List[str] = resi_connection __lowerCamelCase : Union[str, Any] = upsampler
652
0
'''simple docstring''' import argparse import logging from collections import namedtuple import torch from model_bertabs import BertAbsSummarizer from models.model_builder import AbsSummarizer # The authors' implementation from transformers import BertTokenizer logging.basicConfig(level=logging.INFO) SCREAMING_SNAKE_CASE = logging.getLogger(__name__) SCREAMING_SNAKE_CASE = 'Hello world! cécé herlolip' SCREAMING_SNAKE_CASE = namedtuple( 'BertAbsConfig', [ 'temp_dir', 'large', 'use_bert_emb', 'finetune_bert', 'encoder', 'share_emb', 'max_pos', 'enc_layers', 'enc_hidden_size', 'enc_heads', 'enc_ff_size', 'enc_dropout', 'dec_layers', 'dec_hidden_size', 'dec_heads', 'dec_ff_size', 'dec_dropout', ], ) def lowercase_ ( __A : int , __A : Any ) -> int: """simple docstring""" lowercase : List[str] =BertAbsConfig( temp_dir='''.''' , finetune_bert=__A , large=__A , share_emb=__A , use_bert_emb=__A , encoder='''bert''' , max_pos=5_1_2 , enc_layers=6 , enc_hidden_size=5_1_2 , enc_heads=8 , enc_ff_size=5_1_2 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_6_8 , dec_heads=8 , dec_ff_size=2_0_4_8 , dec_dropout=0.2 , ) lowercase : Any =torch.load(__A , lambda __A , __A : storage ) lowercase : Union[str, Any] =AbsSummarizer(__A , torch.device('''cpu''' ) , __A ) original.eval() lowercase : Tuple =BertAbsSummarizer(__A , torch.device('''cpu''' ) ) new_model.eval() # ------------------- # Convert the weights # ------------------- logging.info('''convert the model''' ) new_model.bert.load_state_dict(original.bert.state_dict() ) new_model.decoder.load_state_dict(original.decoder.state_dict() ) new_model.generator.load_state_dict(original.generator.state_dict() ) # ---------------------------------- # Make sure the outpus are identical # ---------------------------------- logging.info('''Make sure that the models\' outputs are identical''' ) lowercase : Tuple =BertTokenizer.from_pretrained('''bert-base-uncased''' ) # prepare the model inputs lowercase : Tuple =tokenizer.encode('''This is sample éàalj\'-.''' ) encoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(__A )) ) lowercase : Dict =torch.tensor(__A ).unsqueeze(0 ) lowercase : Optional[int] =tokenizer.encode('''This is sample 3 éàalj\'-.''' ) decoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(__A )) ) lowercase : Any =torch.tensor(__A ).unsqueeze(0 ) # failsafe to make sure the weights reset does not affect the # loaded weights. assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0 # forward pass lowercase : Any =encoder_input_ids lowercase : int =decoder_input_ids lowercase : Any =None lowercase : Any =None lowercase : int =None lowercase : List[str] =None lowercase : str =None # The original model does not apply the geneator layer immediatly but rather in # the beam search (where it combines softmax + linear layer). Since we already # apply the softmax in our generation process we only apply the linear layer here. # We make sure that the outputs of the full stack are identical lowercase : List[Any] =original(__A , __A , __A , __A , __A , __A , __A )[0] lowercase : str =original.generator(__A ) lowercase : Optional[int] =new_model( __A , __A , __A , __A , __A )[0] lowercase : Union[str, Any] =new_model.generator(__A ) lowercase : List[str] =torch.max(torch.abs(output_converted_model - output_original_model ) ).item() print('''Maximum absolute difference beween weights: {:.2f}'''.format(__A ) ) lowercase : str =torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item() print('''Maximum absolute difference beween weights: {:.2f}'''.format(__A ) ) lowercase : Optional[Any] =torch.allclose(__A , __A , atol=1E-3 ) if are_identical: logging.info('''all weights are equal up to 1e-3''' ) else: raise ValueError('''the weights are different. The new model is likely different from the original one.''' ) # The model has been saved with torch.save(model) and this is bound to the exact # directory structure. We save the state_dict instead. logging.info('''saving the model\'s state dictionary''' ) torch.save( new_model.state_dict() , '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''' ) if __name__ == "__main__": SCREAMING_SNAKE_CASE = argparse.ArgumentParser() parser.add_argument( '--bertabs_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.', ) SCREAMING_SNAKE_CASE = parser.parse_args() convert_bertabs_checkpoints( args.bertabs_checkpoint_path, args.pytorch_dump_folder_path, )
94
import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=True , lowerCamelCase__="pt" ) -> Dict: __lowerCamelCase : Any = {'add_prefix_space': True} if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and not line.startswith(' ' ) else {} __lowerCamelCase : int = padding_side return tokenizer( [line] , max_length=lowerCamelCase__ , padding='max_length' if pad_to_max_length else None , truncation=lowerCamelCase__ , return_tensors=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , **lowerCamelCase__ , ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , ) -> List[str]: __lowerCamelCase : List[str] = input_ids.ne(lowerCamelCase__ ).any(dim=0 ) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class A_ ( SCREAMING_SNAKE_CASE ): def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int]="train" ,SCREAMING_SNAKE_CASE__ : Tuple=None ,SCREAMING_SNAKE_CASE__ : Dict=None ,SCREAMING_SNAKE_CASE__ : int=None ,SCREAMING_SNAKE_CASE__ : List[Any]="" ,): super().__init__() __lowerCamelCase : Optional[Any] = Path(SCREAMING_SNAKE_CASE__).joinpath(type_path + '.source') __lowerCamelCase : Any = Path(SCREAMING_SNAKE_CASE__).joinpath(type_path + '.target') __lowerCamelCase : List[Any] = self.get_char_lens(self.src_file) __lowerCamelCase : List[Any] = max_source_length __lowerCamelCase : List[str] = max_target_length assert min(self.src_lens) > 0, F"found empty line in {self.src_file}" __lowerCamelCase : Any = tokenizer __lowerCamelCase : Optional[int] = prefix if n_obs is not None: __lowerCamelCase : Dict = self.src_lens[:n_obs] __lowerCamelCase : str = src_lang __lowerCamelCase : Any = tgt_lang def __len__( self : Tuple): return len(self.src_lens) def __getitem__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : str): __lowerCamelCase : Dict = index + 1 # linecache starts at 1 __lowerCamelCase : Any = self.prefix + linecache.getline(str(self.src_file) ,SCREAMING_SNAKE_CASE__).rstrip('\n') __lowerCamelCase : int = linecache.getline(str(self.tgt_file) ,SCREAMING_SNAKE_CASE__).rstrip('\n') assert source_line, F"empty source line for index {index}" assert tgt_line, F"empty tgt line for index {index}" # Need to add eos token manually for T5 if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right __lowerCamelCase : Dict = ( self.tokenizer.question_encoder if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__) else self.tokenizer ) __lowerCamelCase : Optional[int] = self.tokenizer.generator if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__) else self.tokenizer __lowerCamelCase : List[str] = encode_line(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,self.max_source_length ,'right') __lowerCamelCase : Any = encode_line(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,self.max_target_length ,'right') __lowerCamelCase : List[Any] = source_inputs['input_ids'].squeeze() __lowerCamelCase : Tuple = target_inputs['input_ids'].squeeze() __lowerCamelCase : Tuple = source_inputs['attention_mask'].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def lowerCAmelCase ( SCREAMING_SNAKE_CASE__ : int): return [len(SCREAMING_SNAKE_CASE__) for x in Path(SCREAMING_SNAKE_CASE__).open().readlines()] def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : List[Any]): __lowerCamelCase : Optional[Any] = torch.stack([x['input_ids'] for x in batch]) __lowerCamelCase : Any = torch.stack([x['attention_mask'] for x in batch]) __lowerCamelCase : Union[str, Any] = torch.stack([x['decoder_input_ids'] for x in batch]) __lowerCamelCase : Optional[int] = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__) else self.tokenizer.pad_token_id ) __lowerCamelCase : int = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__) else self.tokenizer.pad_token_id ) __lowerCamelCase : int = trim_batch(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) __lowerCamelCase , __lowerCamelCase : int = trim_batch(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__) __lowerCamelCase : Union[str, Any] = { 'input_ids': source_ids, 'attention_mask': source_mask, 'decoder_input_ids': y, } return batch a =getLogger(__name__) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Any: return list(itertools.chain.from_iterable(lowerCamelCase__ ) ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> None: __lowerCamelCase : str = get_git_info() save_json(lowerCamelCase__ , os.path.join(lowerCamelCase__ , 'git_log.json' ) ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=4 , **lowerCamelCase__ ) -> List[str]: with open(lowerCamelCase__ , 'w' ) as f: json.dump(lowerCamelCase__ , lowerCamelCase__ , indent=lowerCamelCase__ , **lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple: with open(lowerCamelCase__ ) as f: return json.load(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( ) -> List[str]: __lowerCamelCase : str = git.Repo(search_parent_directories=lowerCamelCase__ ) __lowerCamelCase : Any = { 'repo_id': str(lowerCamelCase__ ), 'repo_sha': str(repo.head.object.hexsha ), 'repo_branch': str(repo.active_branch ), 'hostname': str(socket.gethostname() ), } return repo_infos def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List: return list(map(lowerCamelCase__ , lowerCamelCase__ ) ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]: with open(lowerCamelCase__ , 'wb' ) as f: return pickle.dump(lowerCamelCase__ , lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str: def remove_articles(lowerCamelCase__ ): return re.sub(R'\b(a|an|the)\b' , ' ' , lowerCamelCase__ ) def white_space_fix(lowerCamelCase__ ): return " ".join(text.split() ) def remove_punc(lowerCamelCase__ ): __lowerCamelCase : Dict = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(lowerCamelCase__ ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase__ ) ) ) ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> int: __lowerCamelCase : str = normalize_answer(lowerCamelCase__ ).split() __lowerCamelCase : Optional[int] = normalize_answer(lowerCamelCase__ ).split() __lowerCamelCase : Union[str, Any] = Counter(lowerCamelCase__ ) & Counter(lowerCamelCase__ ) __lowerCamelCase : Any = sum(common.values() ) if num_same == 0: return 0 __lowerCamelCase : List[Any] = 1.0 * num_same / len(lowerCamelCase__ ) __lowerCamelCase : int = 1.0 * num_same / len(lowerCamelCase__ ) __lowerCamelCase : Optional[Any] = (2 * precision * recall) / (precision + recall) return fa def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Dict: return normalize_answer(lowerCamelCase__ ) == normalize_answer(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Dict: assert len(lowerCamelCase__ ) == len(lowerCamelCase__ ) __lowerCamelCase : Dict = 0 for hypo, pred in zip(lowerCamelCase__ , lowerCamelCase__ ): em += exact_match_score(lowerCamelCase__ , lowerCamelCase__ ) if len(lowerCamelCase__ ) > 0: em /= len(lowerCamelCase__ ) return {"em": em} def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple: return model_prefix.startswith('rag' ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]: __lowerCamelCase : Optional[int] = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead __lowerCamelCase : List[str] = 'dropout_rate' for p in extra_params: if getattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): if not hasattr(lowerCamelCase__ , lowerCamelCase__ ) and not hasattr(lowerCamelCase__ , equivalent_param[p] ): logger.info('config doesn\'t have a `{}` attribute'.format(lowerCamelCase__ ) ) delattr(lowerCamelCase__ , lowerCamelCase__ ) continue __lowerCamelCase : List[Any] = p if hasattr(lowerCamelCase__ , lowerCamelCase__ ) else equivalent_param[p] setattr(lowerCamelCase__ , lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) ) delattr(lowerCamelCase__ , lowerCamelCase__ ) return hparams, config
652
0
"""simple docstring""" def snake_case ( A__ ): # if the collection is empty, returns empty if collection == []: return [] # get some information about the collection UpperCAmelCase_ : str = len(A__ ) UpperCAmelCase_ : Union[str, Any] = max(A__ ) UpperCAmelCase_ : Union[str, Any] = min(A__ ) # create the counting array UpperCAmelCase_ : Dict = coll_max + 1 - coll_min UpperCAmelCase_ : Dict = [0] * counting_arr_length # count how much a number appears in the collection for number in collection: counting_arr[number - coll_min] += 1 # sum each position with it's predecessors. now, counting_arr[i] tells # us how many elements <= i has in the collection for i in range(1 ,A__ ): UpperCAmelCase_ : Dict = counting_arr[i] + counting_arr[i - 1] # create the output collection UpperCAmelCase_ : Optional[Any] = [0] * coll_len # place the elements in the output, respecting the original order (stable # sort) from end to begin, updating counting_arr for i in reversed(range(0 ,A__ ) ): UpperCAmelCase_ : List[str] = collection[i] counting_arr[collection[i] - coll_min] -= 1 return ordered def snake_case ( A__ ): return "".join([chr(A__ ) for i in counting_sort([ord(A__ ) for c in string] )] ) if __name__ == "__main__": # Test string sort assert counting_sort_string('''thisisthestring''') == "eghhiiinrsssttt" lowerCamelCase_ = input('''Enter numbers separated by a comma:\n''').strip() lowerCamelCase_ = [int(item) for item in user_input.split(''',''')] print(counting_sort(unsorted))
95
from typing import Optional, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_mobilenet_va import MobileNetVaConfig a =logging.get_logger(__name__) # General docstring a ="""MobileNetV1Config""" # Base docstring a ="""google/mobilenet_v1_1.0_224""" a =[1, 1024, 7, 7] # Image classification docstring a ="""google/mobilenet_v1_1.0_224""" a ="""tabby, tabby cat""" a =[ """google/mobilenet_v1_1.0_224""", """google/mobilenet_v1_0.75_192""", # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 ] def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ) -> str: __lowerCamelCase : str = {} if isinstance(lowerCamelCase__ , lowerCamelCase__ ): __lowerCamelCase : int = model.mobilenet_va else: __lowerCamelCase : List[str] = model __lowerCamelCase : List[Any] = 'MobilenetV1/Conv2d_0/' __lowerCamelCase : List[Any] = backbone.conv_stem.convolution.weight __lowerCamelCase : List[str] = backbone.conv_stem.normalization.bias __lowerCamelCase : Tuple = backbone.conv_stem.normalization.weight __lowerCamelCase : Union[str, Any] = backbone.conv_stem.normalization.running_mean __lowerCamelCase : Optional[int] = backbone.conv_stem.normalization.running_var for i in range(1_3 ): __lowerCamelCase : Any = i + 1 __lowerCamelCase : Union[str, Any] = i * 2 __lowerCamelCase : Optional[Any] = backbone.layer[pt_index] __lowerCamelCase : Optional[int] = F"MobilenetV1/Conv2d_{tf_index}_depthwise/" __lowerCamelCase : Tuple = pointer.convolution.weight __lowerCamelCase : Optional[Any] = pointer.normalization.bias __lowerCamelCase : Union[str, Any] = pointer.normalization.weight __lowerCamelCase : List[str] = pointer.normalization.running_mean __lowerCamelCase : Union[str, Any] = pointer.normalization.running_var __lowerCamelCase : int = backbone.layer[pt_index + 1] __lowerCamelCase : Union[str, Any] = F"MobilenetV1/Conv2d_{tf_index}_pointwise/" __lowerCamelCase : Optional[Any] = pointer.convolution.weight __lowerCamelCase : Any = pointer.normalization.bias __lowerCamelCase : str = pointer.normalization.weight __lowerCamelCase : Dict = pointer.normalization.running_mean __lowerCamelCase : List[str] = pointer.normalization.running_var if isinstance(lowerCamelCase__ , lowerCamelCase__ ): __lowerCamelCase : Union[str, Any] = 'MobilenetV1/Logits/Conv2d_1c_1x1/' __lowerCamelCase : Any = model.classifier.weight __lowerCamelCase : int = model.classifier.bias return tf_to_pt_map def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]: try: import numpy as np import tensorflow as tf except ImportError: logger.error( 'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see ' 'https://www.tensorflow.org/install/ for installation instructions.' ) raise # Load weights from TF model __lowerCamelCase : List[str] = tf.train.list_variables(lowerCamelCase__ ) __lowerCamelCase : List[str] = {} for name, shape in init_vars: logger.info(F"Loading TF weight {name} with shape {shape}" ) __lowerCamelCase : Any = tf.train.load_variable(lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase : List[Any] = array # Build TF to PyTorch weights loading map __lowerCamelCase : Tuple = _build_tf_to_pytorch_map(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) for name, pointer in tf_to_pt_map.items(): logger.info(F"Importing {name}" ) if name not in tf_weights: logger.info(F"{name} not in tf pre-trained weights, skipping" ) continue __lowerCamelCase : Optional[int] = tf_weights[name] if "depthwise_weights" in name: logger.info('Transposing depthwise' ) __lowerCamelCase : List[str] = np.transpose(lowerCamelCase__ , (2, 3, 0, 1) ) elif "weights" in name: logger.info('Transposing' ) if len(pointer.shape ) == 2: # copying into linear layer __lowerCamelCase : Any = array.squeeze().transpose() else: __lowerCamelCase : Tuple = np.transpose(lowerCamelCase__ , (3, 2, 0, 1) ) if pointer.shape != array.shape: raise ValueError(F"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" ) logger.info(F"Initialize PyTorch weight {name} {array.shape}" ) __lowerCamelCase : Optional[Any] = torch.from_numpy(lowerCamelCase__ ) tf_weights.pop(lowerCamelCase__ , lowerCamelCase__ ) tf_weights.pop(name + '/RMSProp' , lowerCamelCase__ ) tf_weights.pop(name + '/RMSProp_1' , lowerCamelCase__ ) tf_weights.pop(name + '/ExponentialMovingAverage' , lowerCamelCase__ ) logger.info(F"Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}" ) return model def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> torch.Tensor: __lowerCamelCase , __lowerCamelCase : int = features.shape[-2:] __lowerCamelCase , __lowerCamelCase : List[str] = conv_layer.stride __lowerCamelCase , __lowerCamelCase : str = conv_layer.kernel_size if in_height % stride_height == 0: __lowerCamelCase : Optional[int] = max(kernel_height - stride_height , 0 ) else: __lowerCamelCase : Union[str, Any] = max(kernel_height - (in_height % stride_height) , 0 ) if in_width % stride_width == 0: __lowerCamelCase : List[str] = max(kernel_width - stride_width , 0 ) else: __lowerCamelCase : List[str] = max(kernel_width - (in_width % stride_width) , 0 ) __lowerCamelCase : List[str] = pad_along_width // 2 __lowerCamelCase : Optional[int] = pad_along_width - pad_left __lowerCamelCase : Any = pad_along_height // 2 __lowerCamelCase : List[Any] = pad_along_height - pad_top __lowerCamelCase : Union[str, Any] = (pad_left, pad_right, pad_top, pad_bottom) return nn.functional.pad(lowerCamelCase__ , lowerCamelCase__ , 'constant' , 0.0 ) class A_ ( nn.Module ): def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : MobileNetVaConfig ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Optional[int] = 1 ,SCREAMING_SNAKE_CASE__ : Optional[int] = 1 ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : Optional[bool] = True ,SCREAMING_SNAKE_CASE__ : Optional[bool or str] = True ,): super().__init__() __lowerCamelCase : Dict = config if in_channels % groups != 0: raise ValueError(F"Input channels ({in_channels}) are not divisible by {groups} groups.") if out_channels % groups != 0: raise ValueError(F"Output channels ({out_channels}) are not divisible by {groups} groups.") __lowerCamelCase : Optional[Any] = 0 if config.tf_padding else int((kernel_size - 1) / 2) __lowerCamelCase : Optional[int] = nn.Convad( in_channels=SCREAMING_SNAKE_CASE__ ,out_channels=SCREAMING_SNAKE_CASE__ ,kernel_size=SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,padding=SCREAMING_SNAKE_CASE__ ,groups=SCREAMING_SNAKE_CASE__ ,bias=SCREAMING_SNAKE_CASE__ ,padding_mode='zeros' ,) if use_normalization: __lowerCamelCase : Optional[int] = nn.BatchNormad( num_features=SCREAMING_SNAKE_CASE__ ,eps=config.layer_norm_eps ,momentum=0.9997 ,affine=SCREAMING_SNAKE_CASE__ ,track_running_stats=SCREAMING_SNAKE_CASE__ ,) else: __lowerCamelCase : Dict = None if use_activation: if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__): __lowerCamelCase : Dict = ACTaFN[use_activation] elif isinstance(config.hidden_act ,SCREAMING_SNAKE_CASE__): __lowerCamelCase : str = ACTaFN[config.hidden_act] else: __lowerCamelCase : List[str] = config.hidden_act else: __lowerCamelCase : List[str] = None def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : torch.Tensor): if self.config.tf_padding: __lowerCamelCase : Any = apply_tf_padding(SCREAMING_SNAKE_CASE__ ,self.convolution) __lowerCamelCase : Optional[int] = self.convolution(SCREAMING_SNAKE_CASE__) if self.normalization is not None: __lowerCamelCase : Dict = self.normalization(SCREAMING_SNAKE_CASE__) if self.activation is not None: __lowerCamelCase : List[str] = self.activation(SCREAMING_SNAKE_CASE__) return features class A_ ( SCREAMING_SNAKE_CASE ): _UpperCAmelCase : Union[str, Any] = MobileNetVaConfig _UpperCAmelCase : List[str] = load_tf_weights_in_mobilenet_va _UpperCAmelCase : List[str] = '''mobilenet_v1''' _UpperCAmelCase : Any = '''pixel_values''' _UpperCAmelCase : int = False def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Union[nn.Linear, nn.Convad]): if isinstance(SCREAMING_SNAKE_CASE__ ,(nn.Linear, nn.Convad)): module.weight.data.normal_(mean=0.0 ,std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(SCREAMING_SNAKE_CASE__ ,nn.BatchNormad): module.bias.data.zero_() module.weight.data.fill_(1.0) a =r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ a =r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`MobileNetV1ImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( '''The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.''' , SCREAMING_SNAKE_CASE , ) class A_ ( SCREAMING_SNAKE_CASE ): def __init__( self : int ,SCREAMING_SNAKE_CASE__ : MobileNetVaConfig ,SCREAMING_SNAKE_CASE__ : bool = True): super().__init__(SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[str] = config __lowerCamelCase : Optional[int] = 3_2 __lowerCamelCase : List[str] = max(int(depth * config.depth_multiplier) ,config.min_depth) __lowerCamelCase : Optional[Any] = MobileNetVaConvLayer( SCREAMING_SNAKE_CASE__ ,in_channels=config.num_channels ,out_channels=SCREAMING_SNAKE_CASE__ ,kernel_size=3 ,stride=2 ,) __lowerCamelCase : Any = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1] __lowerCamelCase : str = nn.ModuleList() for i in range(1_3): __lowerCamelCase : str = out_channels if strides[i] == 2 or i == 0: depth *= 2 __lowerCamelCase : str = max(int(depth * config.depth_multiplier) ,config.min_depth) self.layer.append( MobileNetVaConvLayer( SCREAMING_SNAKE_CASE__ ,in_channels=SCREAMING_SNAKE_CASE__ ,out_channels=SCREAMING_SNAKE_CASE__ ,kernel_size=3 ,stride=strides[i] ,groups=SCREAMING_SNAKE_CASE__ ,)) self.layer.append( MobileNetVaConvLayer( SCREAMING_SNAKE_CASE__ ,in_channels=SCREAMING_SNAKE_CASE__ ,out_channels=SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,)) __lowerCamelCase : Optional[int] = nn.AdaptiveAvgPoolad((1, 1)) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Dict): raise NotImplementedError @add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC ,output_type=SCREAMING_SNAKE_CASE__ ,config_class=_CONFIG_FOR_DOC ,modality='vision' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,) def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,): __lowerCamelCase : int = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __lowerCamelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('You have to specify pixel_values') __lowerCamelCase : Optional[Any] = self.conv_stem(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[Any] = () if output_hidden_states else None for i, layer_module in enumerate(self.layer): __lowerCamelCase : Dict = layer_module(SCREAMING_SNAKE_CASE__) if output_hidden_states: __lowerCamelCase : Any = all_hidden_states + (hidden_states,) __lowerCamelCase : Optional[Any] = hidden_states if self.pooler is not None: __lowerCamelCase : Tuple = torch.flatten(self.pooler(SCREAMING_SNAKE_CASE__) ,start_dim=1) else: __lowerCamelCase : List[str] = None if not return_dict: return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None) return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=SCREAMING_SNAKE_CASE__ ,pooler_output=SCREAMING_SNAKE_CASE__ ,hidden_states=SCREAMING_SNAKE_CASE__ ,) @add_start_docstrings( ''' MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. ''' , SCREAMING_SNAKE_CASE , ) class A_ ( SCREAMING_SNAKE_CASE ): def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : MobileNetVaConfig): super().__init__(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[Any] = config.num_labels __lowerCamelCase : Optional[Any] = MobileNetVaModel(SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[str] = self.mobilenet_va.layer[-1].convolution.out_channels # Classifier head __lowerCamelCase : Any = nn.Dropout(config.classifier_dropout_prob ,inplace=SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[Any] = nn.Linear(SCREAMING_SNAKE_CASE__ ,config.num_labels) if config.num_labels > 0 else nn.Identity() # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=SCREAMING_SNAKE_CASE__ ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,) def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,): __lowerCamelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict __lowerCamelCase : Optional[int] = self.mobilenet_va(SCREAMING_SNAKE_CASE__ ,output_hidden_states=SCREAMING_SNAKE_CASE__ ,return_dict=SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[Any] = outputs.pooler_output if return_dict else outputs[1] __lowerCamelCase : List[str] = self.classifier(self.dropout(SCREAMING_SNAKE_CASE__)) __lowerCamelCase : List[str] = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: __lowerCamelCase : Dict = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): __lowerCamelCase : int = 'single_label_classification' else: __lowerCamelCase : Tuple = 'multi_label_classification' if self.config.problem_type == "regression": __lowerCamelCase : Tuple = MSELoss() if self.num_labels == 1: __lowerCamelCase : int = loss_fct(logits.squeeze() ,labels.squeeze()) else: __lowerCamelCase : Union[str, Any] = loss_fct(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) elif self.config.problem_type == "single_label_classification": __lowerCamelCase : List[str] = CrossEntropyLoss() __lowerCamelCase : List[str] = loss_fct(logits.view(-1 ,self.num_labels) ,labels.view(-1)) elif self.config.problem_type == "multi_label_classification": __lowerCamelCase : int = BCEWithLogitsLoss() __lowerCamelCase : int = loss_fct(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) if not return_dict: __lowerCamelCase : List[str] = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention( loss=SCREAMING_SNAKE_CASE__ ,logits=SCREAMING_SNAKE_CASE__ ,hidden_states=outputs.hidden_states ,)
652
0
"""simple docstring""" def a ( __UpperCAmelCase : str ) -> bool: if not all(x.isalpha() for x in string ): raise ValueError("""String must only contain alphabetic characters.""" ) __magic_name__: Optional[Any] = sorted(string.lower() ) return len(__UpperCAmelCase ) == len(set(__UpperCAmelCase ) ) if __name__ == "__main__": __lowerCamelCase = input('Enter a string ').strip() __lowerCamelCase = is_isogram(input_str) print(f'''{input_str} is {'an' if isogram else 'not an'} isogram.''')
96
from pathlib import Path import cva import numpy as np from matplotlib import pyplot as plt def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray: __lowerCamelCase : Optional[Any] = cva.getAffineTransform(lowerCamelCase__ , lowerCamelCase__ ) return cva.warpAffine(lowerCamelCase__ , lowerCamelCase__ , (rows, cols) ) if __name__ == "__main__": # read original image a =cva.imread( str(Path(__file__).resolve().parent.parent / """image_data""" / """lena.jpg""") ) # turn image in gray scale value a =cva.cvtColor(image, cva.COLOR_BGR2GRAY) # get image shape a , a =gray_img.shape # set different points to rotate image a =np.array([[50, 50], [200, 50], [50, 200]], np.floataa) a =np.array([[10, 100], [200, 50], [100, 250]], np.floataa) a =np.array([[50, 50], [150, 50], [120, 200]], np.floataa) a =np.array([[10, 100], [80, 50], [180, 250]], np.floataa) # add all rotated images in a list a =[ gray_img, get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), ] # plot different image rotations a =plt.figure(1) a =["""Original""", """Rotation 1""", """Rotation 2""", """Rotation 3"""] for i, image in enumerate(images): plt.subplot(2, 2, i + 1), plt.imshow(image, """gray""") plt.title(titles[i]) plt.axis("""off""") plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95) plt.show()
652
0
def a ( snake_case__: str , snake_case__: str = " " ): '''simple docstring''' lowercase_ = [] lowercase_ = 0 for index, char in enumerate(snake_case__ ): if char == separator: split_words.append(string[last_index:index] ) lowercase_ = index + 1 elif index + 1 == len(snake_case__ ): split_words.append(string[last_index : index + 1] ) return split_words if __name__ == "__main__": from doctest import testmod testmod()
97
import math def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> int: if not isinstance(lowerCamelCase__ , lowerCamelCase__ ): __lowerCamelCase : List[str] = F"Input value of [number={number}] must be an integer" raise TypeError(lowerCamelCase__ ) if number < 1: __lowerCamelCase : int = F"Input value of [number={number}] must be > 0" raise ValueError(lowerCamelCase__ ) elif number == 1: return 3 elif number == 2: return 5 else: __lowerCamelCase : Any = int(math.log(number // 3 , 2 ) ) + 2 __lowerCamelCase : List[Any] = [3, 5] __lowerCamelCase : Union[str, Any] = 2 __lowerCamelCase : List[str] = 3 for block in range(1 , lowerCamelCase__ ): for _ in range(lowerCamelCase__ ): proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] ) proth_index += 1 increment *= 2 return proth_list[number - 1] if __name__ == "__main__": import doctest doctest.testmod() for number in range(11): a =0 try: a =proth(number) except ValueError: print(F"""ValueError: there is no {number}th Proth number""") continue print(F"""The {number}th Proth number: {value}""")
652
0
'''simple docstring''' import warnings from ...utils import logging from .image_processing_clip import CLIPImageProcessor lowercase__ : int = logging.get_logger(__name__) class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" def __init__( self : int , *lowerCAmelCase__ : Tuple , **lowerCAmelCase__ : Any ) -> None: '''simple docstring''' warnings.warn( '''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use CLIPImageProcessor instead.''' , lowerCAmelCase__ , ) super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
98
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetaImageProcessor class A_ ( unittest.TestCase ): def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : str=7 ,SCREAMING_SNAKE_CASE__ : Any=3 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=3_0 ,SCREAMING_SNAKE_CASE__ : int=4_0_0 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=True ,SCREAMING_SNAKE_CASE__ : Optional[int]=None ,SCREAMING_SNAKE_CASE__ : Any=True ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=[0.5, 0.5, 0.5] ,SCREAMING_SNAKE_CASE__ : Dict=[0.5, 0.5, 0.5] ,SCREAMING_SNAKE_CASE__ : List[str]=True ,SCREAMING_SNAKE_CASE__ : List[str]=1 / 2_5_5 ,SCREAMING_SNAKE_CASE__ : Tuple=True ,): # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p __lowerCamelCase : List[Any] = size if size is not None else {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3} __lowerCamelCase : str = parent __lowerCamelCase : Union[str, Any] = batch_size __lowerCamelCase : int = num_channels __lowerCamelCase : Dict = min_resolution __lowerCamelCase : Tuple = max_resolution __lowerCamelCase : Dict = do_resize __lowerCamelCase : List[Any] = size __lowerCamelCase : Tuple = do_normalize __lowerCamelCase : Any = image_mean __lowerCamelCase : List[str] = image_std __lowerCamelCase : List[Any] = do_rescale __lowerCamelCase : str = rescale_factor __lowerCamelCase : Tuple = do_pad def lowerCAmelCase ( self : Dict): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : List[str]=False): if not batched: __lowerCamelCase : Optional[Any] = image_inputs[0] if isinstance(SCREAMING_SNAKE_CASE__ ,Image.Image): __lowerCamelCase , __lowerCamelCase : Any = image.size else: __lowerCamelCase , __lowerCamelCase : Any = image.shape[1], image.shape[2] if w < h: __lowerCamelCase : Optional[int] = int(self.size['shortest_edge'] * h / w) __lowerCamelCase : Tuple = self.size['shortest_edge'] elif w > h: __lowerCamelCase : Union[str, Any] = self.size['shortest_edge'] __lowerCamelCase : Union[str, Any] = int(self.size['shortest_edge'] * w / h) else: __lowerCamelCase : List[Any] = self.size['shortest_edge'] __lowerCamelCase : Optional[int] = self.size['shortest_edge'] else: __lowerCamelCase : List[str] = [] for image in image_inputs: __lowerCamelCase , __lowerCamelCase : List[Any] = self.get_expected_values([image]) expected_values.append((expected_height, expected_width)) __lowerCamelCase : Tuple = max(SCREAMING_SNAKE_CASE__ ,key=lambda SCREAMING_SNAKE_CASE__: item[0])[0] __lowerCamelCase : Dict = max(SCREAMING_SNAKE_CASE__ ,key=lambda SCREAMING_SNAKE_CASE__: item[1])[1] return expected_height, expected_width @require_torch @require_vision class A_ ( SCREAMING_SNAKE_CASE , unittest.TestCase ): _UpperCAmelCase : Optional[int] = DetaImageProcessor if is_vision_available() else None def lowerCAmelCase ( self : Optional[Any]): __lowerCamelCase : List[str] = DetaImageProcessingTester(self) @property def lowerCAmelCase ( self : Any): return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase ( self : Dict): __lowerCamelCase : Any = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'image_mean')) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'image_std')) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'do_normalize')) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'do_resize')) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'do_rescale')) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'do_pad')) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'size')) def lowerCAmelCase ( self : str): __lowerCamelCase : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size ,{'shortest_edge': 1_8, 'longest_edge': 1_3_3_3}) self.assertEqual(image_processor.do_pad ,SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Any): pass def lowerCAmelCase ( self : List[str]): # Initialize image_processing __lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict) # create random PIL images __lowerCamelCase : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE__) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,Image.Image) # Test not batched input __lowerCamelCase : List[Any] = image_processing(image_inputs[0] ,return_tensors='pt').pixel_values __lowerCamelCase , __lowerCamelCase : Tuple = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__) self.assertEqual( encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,) # Test batched __lowerCamelCase , __lowerCamelCase : Optional[Any] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ ,batched=SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[int] = image_processing(SCREAMING_SNAKE_CASE__ ,return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) ,) def lowerCAmelCase ( self : str): # Initialize image_processing __lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors __lowerCamelCase : str = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE__ ,numpify=SCREAMING_SNAKE_CASE__) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,np.ndarray) # Test not batched input __lowerCamelCase : Tuple = image_processing(image_inputs[0] ,return_tensors='pt').pixel_values __lowerCamelCase , __lowerCamelCase : int = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__) self.assertEqual( encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,) # Test batched __lowerCamelCase : str = image_processing(SCREAMING_SNAKE_CASE__ ,return_tensors='pt').pixel_values __lowerCamelCase , __lowerCamelCase : Optional[int] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ ,batched=SCREAMING_SNAKE_CASE__) self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) ,) def lowerCAmelCase ( self : int): # Initialize image_processing __lowerCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors __lowerCamelCase : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE__ ,torchify=SCREAMING_SNAKE_CASE__) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,torch.Tensor) # Test not batched input __lowerCamelCase : Union[str, Any] = image_processing(image_inputs[0] ,return_tensors='pt').pixel_values __lowerCamelCase , __lowerCamelCase : int = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__) self.assertEqual( encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,) # Test batched __lowerCamelCase : List[Any] = image_processing(SCREAMING_SNAKE_CASE__ ,return_tensors='pt').pixel_values __lowerCamelCase , __lowerCamelCase : Optional[int] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ ,batched=SCREAMING_SNAKE_CASE__) self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) ,) @slow def lowerCAmelCase ( self : Optional[Any]): # prepare image and target __lowerCamelCase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png') with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' ,'r') as f: __lowerCamelCase : List[str] = json.loads(f.read()) __lowerCamelCase : Union[str, Any] = {'image_id': 3_9_7_6_9, 'annotations': target} # encode them __lowerCamelCase : Optional[int] = DetaImageProcessor() __lowerCamelCase : int = image_processing(images=SCREAMING_SNAKE_CASE__ ,annotations=SCREAMING_SNAKE_CASE__ ,return_tensors='pt') # verify pixel values __lowerCamelCase : List[str] = torch.Size([1, 3, 8_0_0, 1_0_6_6]) self.assertEqual(encoding['pixel_values'].shape ,SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[Any] = torch.tensor([0.2796, 0.3138, 0.3481]) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4)) # verify area __lowerCamelCase : Dict = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438]) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,SCREAMING_SNAKE_CASE__)) # verify boxes __lowerCamelCase : int = torch.Size([6, 4]) self.assertEqual(encoding['labels'][0]['boxes'].shape ,SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[int] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215]) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,SCREAMING_SNAKE_CASE__ ,atol=1E-3)) # verify image_id __lowerCamelCase : Tuple = torch.tensor([3_9_7_6_9]) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,SCREAMING_SNAKE_CASE__)) # verify is_crowd __lowerCamelCase : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,SCREAMING_SNAKE_CASE__)) # verify class_labels __lowerCamelCase : List[Any] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7]) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,SCREAMING_SNAKE_CASE__)) # verify orig_size __lowerCamelCase : str = torch.tensor([4_8_0, 6_4_0]) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,SCREAMING_SNAKE_CASE__)) # verify size __lowerCamelCase : int = torch.tensor([8_0_0, 1_0_6_6]) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,SCREAMING_SNAKE_CASE__)) @slow def lowerCAmelCase ( self : str): # prepare image, target and masks_path __lowerCamelCase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png') with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' ,'r') as f: __lowerCamelCase : Tuple = json.loads(f.read()) __lowerCamelCase : List[Any] = {'file_name': '000000039769.png', 'image_id': 3_9_7_6_9, 'segments_info': target} __lowerCamelCase : Optional[int] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic') # encode them __lowerCamelCase : List[str] = DetaImageProcessor(format='coco_panoptic') __lowerCamelCase : Optional[Any] = image_processing(images=SCREAMING_SNAKE_CASE__ ,annotations=SCREAMING_SNAKE_CASE__ ,masks_path=SCREAMING_SNAKE_CASE__ ,return_tensors='pt') # verify pixel values __lowerCamelCase : List[str] = torch.Size([1, 3, 8_0_0, 1_0_6_6]) self.assertEqual(encoding['pixel_values'].shape ,SCREAMING_SNAKE_CASE__) __lowerCamelCase : Any = torch.tensor([0.2796, 0.3138, 0.3481]) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4)) # verify area __lowerCamelCase : Optional[Any] = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147]) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,SCREAMING_SNAKE_CASE__)) # verify boxes __lowerCamelCase : Tuple = torch.Size([6, 4]) self.assertEqual(encoding['labels'][0]['boxes'].shape ,SCREAMING_SNAKE_CASE__) __lowerCamelCase : Any = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625]) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,SCREAMING_SNAKE_CASE__ ,atol=1E-3)) # verify image_id __lowerCamelCase : int = torch.tensor([3_9_7_6_9]) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,SCREAMING_SNAKE_CASE__)) # verify is_crowd __lowerCamelCase : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,SCREAMING_SNAKE_CASE__)) # verify class_labels __lowerCamelCase : int = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3]) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,SCREAMING_SNAKE_CASE__)) # verify masks __lowerCamelCase : Optional[Any] = 8_2_2_8_7_3 self.assertEqual(encoding['labels'][0]['masks'].sum().item() ,SCREAMING_SNAKE_CASE__) # verify orig_size __lowerCamelCase : Any = torch.tensor([4_8_0, 6_4_0]) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,SCREAMING_SNAKE_CASE__)) # verify size __lowerCamelCase : Any = torch.tensor([8_0_0, 1_0_6_6]) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,SCREAMING_SNAKE_CASE__))
652
0
import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def a (lowerCAmelCase__ ): __a = args.pruning_method __a = args.threshold __a = args.model_name_or_path.rstrip("""/""" ) __a = args.target_model_path print(f'''Load fine-pruned model from {model_name_or_path}''' ) __a = torch.load(os.path.join(lowerCAmelCase__ , """pytorch_model.bin""" ) ) __a = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: __a = tensor print(f'''Copied layer {name}''' ) elif "classifier" in name or "qa_output" in name: __a = tensor print(f'''Copied layer {name}''' ) elif "bias" in name: __a = tensor print(f'''Copied layer {name}''' ) else: if pruning_method == "magnitude": __a = MagnitudeBinarizer.apply(inputs=lowerCAmelCase__ , threshold=lowerCAmelCase__ ) __a = tensor * mask print(f'''Pruned layer {name}''' ) elif pruning_method == "topK": if "mask_scores" in name: continue __a = name[:-6] __a = model[f'''{prefix_}mask_scores'''] __a = TopKBinarizer.apply(lowerCAmelCase__ , lowerCAmelCase__ ) __a = tensor * mask print(f'''Pruned layer {name}''' ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue __a = name[:-6] __a = model[f'''{prefix_}mask_scores'''] __a = ThresholdBinarizer.apply(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) __a = tensor * mask print(f'''Pruned layer {name}''' ) elif pruning_method == "l0": if "mask_scores" in name: continue __a = name[:-6] __a = model[f'''{prefix_}mask_scores'''] __a , __a = -0.1, 1.1 __a = torch.sigmoid(lowerCAmelCase__ ) __a = s * (r - l) + l __a = s_bar.clamp(min=0.0 , max=1.0 ) __a = tensor * mask print(f'''Pruned layer {name}''' ) else: raise ValueError("""Unknown pruning method""" ) if target_model_path is None: __a = os.path.join( os.path.dirname(lowerCAmelCase__ ) , f'''bertarized_{os.path.basename(lowerCAmelCase__ )}''' ) if not os.path.isdir(lowerCAmelCase__ ): shutil.copytree(lowerCAmelCase__ , lowerCAmelCase__ ) print(f'''\nCreated folder {target_model_path}''' ) torch.save(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , """pytorch_model.bin""" ) ) print("""\nPruned model saved! See you later!""" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE = argparse.ArgumentParser() parser.add_argument( '--pruning_method', choices=['l0', 'magnitude', 'topK', 'sigmoied_threshold'], type=str, required=True, help=( 'Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,' ' sigmoied_threshold = Soft movement pruning)' ), ) parser.add_argument( '--threshold', type=float, required=False, help=( 'For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.' 'For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.' 'Not needed for `l0`' ), ) parser.add_argument( '--model_name_or_path', type=str, required=True, help='Folder containing the model that was previously fine-pruned', ) parser.add_argument( '--target_model_path', default=None, type=str, required=False, help='Folder containing the model that was previously fine-pruned', ) SCREAMING_SNAKE_CASE = parser.parse_args() main(args)
99
import gc import random import unittest import numpy as np import torch from diffusers import ( DDIMScheduler, KandinskyVaaControlnetPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class A_ ( SCREAMING_SNAKE_CASE , unittest.TestCase ): _UpperCAmelCase : Optional[Any] = KandinskyVaaControlnetPipeline _UpperCAmelCase : Optional[Any] = ['''image_embeds''', '''negative_image_embeds''', '''hint'''] _UpperCAmelCase : int = ['''image_embeds''', '''negative_image_embeds''', '''hint'''] _UpperCAmelCase : List[Any] = [ '''generator''', '''height''', '''width''', '''latents''', '''guidance_scale''', '''num_inference_steps''', '''return_dict''', '''guidance_scale''', '''num_images_per_prompt''', '''output_type''', '''return_dict''', ] _UpperCAmelCase : Tuple = False @property def lowerCAmelCase ( self : Tuple): return 3_2 @property def lowerCAmelCase ( self : List[Any]): return 3_2 @property def lowerCAmelCase ( self : str): return self.time_input_dim @property def lowerCAmelCase ( self : List[str]): return self.time_input_dim * 4 @property def lowerCAmelCase ( self : List[str]): return 1_0_0 @property def lowerCAmelCase ( self : Dict): torch.manual_seed(0) __lowerCamelCase : Optional[Any] = { 'in_channels': 8, # Out channels is double in channels because predicts mean and variance 'out_channels': 8, 'addition_embed_type': 'image_hint', 'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'), 'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'), 'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn', 'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2), 'layers_per_block': 1, 'encoder_hid_dim': self.text_embedder_hidden_size, 'encoder_hid_dim_type': 'image_proj', 'cross_attention_dim': self.cross_attention_dim, 'attention_head_dim': 4, 'resnet_time_scale_shift': 'scale_shift', 'class_embed_type': None, } __lowerCamelCase : Union[str, Any] = UNetaDConditionModel(**SCREAMING_SNAKE_CASE__) return model @property def lowerCAmelCase ( self : Union[str, Any]): return { "block_out_channels": [3_2, 3_2, 6_4, 6_4], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 1_2, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def lowerCAmelCase ( self : Optional[Any]): torch.manual_seed(0) __lowerCamelCase : int = VQModel(**self.dummy_movq_kwargs) return model def lowerCAmelCase ( self : Optional[Any]): __lowerCamelCase : Tuple = self.dummy_unet __lowerCamelCase : List[Any] = self.dummy_movq __lowerCamelCase : str = DDIMScheduler( num_train_timesteps=1_0_0_0 ,beta_schedule='linear' ,beta_start=0.00085 ,beta_end=0.012 ,clip_sample=SCREAMING_SNAKE_CASE__ ,set_alpha_to_one=SCREAMING_SNAKE_CASE__ ,steps_offset=1 ,prediction_type='epsilon' ,thresholding=SCREAMING_SNAKE_CASE__ ,) __lowerCamelCase : Dict = { 'unet': unet, 'scheduler': scheduler, 'movq': movq, } return components def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Optional[int]=0): __lowerCamelCase : str = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(SCREAMING_SNAKE_CASE__)).to(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1)).to( SCREAMING_SNAKE_CASE__) # create hint __lowerCamelCase : Optional[int] = floats_tensor((1, 3, 6_4, 6_4) ,rng=random.Random(SCREAMING_SNAKE_CASE__)).to(SCREAMING_SNAKE_CASE__) if str(SCREAMING_SNAKE_CASE__).startswith('mps'): __lowerCamelCase : int = torch.manual_seed(SCREAMING_SNAKE_CASE__) else: __lowerCamelCase : int = torch.Generator(device=SCREAMING_SNAKE_CASE__).manual_seed(SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[str] = { 'image_embeds': image_embeds, 'negative_image_embeds': negative_image_embeds, 'hint': hint, 'generator': generator, 'height': 6_4, 'width': 6_4, 'guidance_scale': 4.0, 'num_inference_steps': 2, 'output_type': 'np', } return inputs def lowerCAmelCase ( self : Optional[Any]): __lowerCamelCase : Dict = 'cpu' __lowerCamelCase : Tuple = self.get_dummy_components() __lowerCamelCase : Any = self.pipeline_class(**SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[str] = pipe.to(SCREAMING_SNAKE_CASE__) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[Any] = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__)) __lowerCamelCase : int = output.images __lowerCamelCase : Tuple = pipe( **self.get_dummy_inputs(SCREAMING_SNAKE_CASE__) ,return_dict=SCREAMING_SNAKE_CASE__ ,)[0] __lowerCamelCase : Dict = image[0, -3:, -3:, -1] __lowerCamelCase : int = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) __lowerCamelCase : List[str] = np.array( [0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1E-2 ), F" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2 ), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @slow @require_torch_gpu class A_ ( unittest.TestCase ): def lowerCAmelCase ( self : int): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase ( self : int): __lowerCamelCase : List[Any] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy') __lowerCamelCase : Union[str, Any] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinskyv22/hint_image_cat.png') __lowerCamelCase : Tuple = torch.from_numpy(np.array(SCREAMING_SNAKE_CASE__)).float() / 255.0 __lowerCamelCase : str = hint.permute(2 ,0 ,1).unsqueeze(0) __lowerCamelCase : Tuple = KandinskyVaaPriorPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-prior' ,torch_dtype=torch.floataa) pipe_prior.to(SCREAMING_SNAKE_CASE__) __lowerCamelCase : Any = KandinskyVaaControlnetPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-controlnet-depth' ,torch_dtype=torch.floataa) __lowerCamelCase : int = pipeline.to(SCREAMING_SNAKE_CASE__) pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[Any] = 'A robot, 4k photo' __lowerCamelCase : List[str] = torch.Generator(device='cuda').manual_seed(0) __lowerCamelCase , __lowerCamelCase : Optional[Any] = pipe_prior( SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,num_inference_steps=5 ,negative_prompt='' ,).to_tuple() __lowerCamelCase : Optional[Any] = torch.Generator(device='cuda').manual_seed(0) __lowerCamelCase : Any = pipeline( image_embeds=SCREAMING_SNAKE_CASE__ ,negative_image_embeds=SCREAMING_SNAKE_CASE__ ,hint=SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,num_inference_steps=1_0_0 ,output_type='np' ,) __lowerCamelCase : List[Any] = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
652
0